repo_id stringlengths 6 101 | size int64 367 5.14M | file_path stringlengths 2 269 | content stringlengths 367 5.14M |
|---|---|---|---|
27182812/ChatGLM-LLaMA-chinese-insturct | 2,179 | src/transformers/models/clipseg/__init__.py | # Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_import_structure = {
"configuration_clipseg": [
"CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPSegConfig",
"CLIPSegTextConfig",
"CLIPSegVisionConfig",
],
"processing_clipseg": ["CLIPSegProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["modeling_clipseg"] = [
"CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPSegModel",
"CLIPSegPreTrainedModel",
"CLIPSegTextModel",
"CLIPSegVisionModel",
"CLIPSegForImageSegmentation",
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
27182812/ChatGLM-LLaMA-chinese-insturct | 7,838 | src/transformers/models/clipseg/processing_clipseg.py | # coding=utf-8
# Copyright 2022 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Image/Text processor class for CLIPSeg
"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class CLIPSegProcessor(ProcessorMixin):
r"""
Constructs a CLIPSeg processor which wraps a CLIPSeg image processor and a CLIP tokenizer into a single processor.
[`CLIPSegProcessor`] offers all the functionalities of [`ViTImageProcessor`] and [`CLIPTokenizerFast`]. See the
[`~CLIPSegProcessor.__call__`] and [`~CLIPSegProcessor.decode`] for more information.
Args:
image_processor ([`ViTImageProcessor`]):
The image processor is a required input.
tokenizer ([`CLIPTokenizerFast`]):
The tokenizer is a required input.
"""
attributes = ["image_processor", "tokenizer"]
image_processor_class = "ViTImageProcessor"
tokenizer_class = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__(self, image_processor=None, tokenizer=None, **kwargs):
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead.",
FutureWarning,
)
feature_extractor = kwargs.pop("feature_extractor")
image_processor = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`.")
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`.")
super().__init__(image_processor, tokenizer)
def __call__(self, text=None, images=None, visual_prompt=None, return_tensors=None, **kwargs):
"""
Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
and `kwargs` arguments to CLIPTokenizerFast's [`~CLIPTokenizerFast.__call__`] if `text` is not `None` to encode
the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to
ViTImageProcessor's [`~ViTImageProcessor.__call__`] if `images` is not `None`. Please refer to the doctsring of
the above two methods for more information.
Args:
text (`str`, `List[str]`, `List[List[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a
number of channels, H and W are image height and width.
visual_prompt (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):
The visual prompt image or batch of images to be prepared. Each visual prompt image can be a PIL image,
NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape
(C, H, W), where C is a number of channels, H and W are image height and width.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors of a particular framework. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return NumPy `np.ndarray` objects.
- `'jax'`: Return JAX `jnp.ndarray` objects.
Returns:
[`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
`None`).
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
"""
if text is None and visual_prompt is None and images is None:
raise ValueError("You have to specify either text, visual prompt or images.")
if text is not None and visual_prompt is not None:
raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt.")
if text is not None:
encoding = self.tokenizer(text, return_tensors=return_tensors, **kwargs)
if visual_prompt is not None:
prompt_features = self.image_processor(visual_prompt, return_tensors=return_tensors, **kwargs)
if images is not None:
image_features = self.image_processor(images, return_tensors=return_tensors, **kwargs)
if visual_prompt is not None and images is not None:
encoding = {
"pixel_values": image_features.pixel_values,
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
encoding["pixel_values"] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
encoding = {
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**image_features), tensor_type=return_tensors)
def batch_decode(self, *args, **kwargs):
"""
This method forwards all its arguments to CLIPTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
refer to the docstring of this method for more information.
"""
return self.tokenizer.batch_decode(*args, **kwargs)
def decode(self, *args, **kwargs):
"""
This method forwards all its arguments to CLIPTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
the docstring of this method for more information.
"""
return self.tokenizer.decode(*args, **kwargs)
@property
def feature_extractor_class(self):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.",
FutureWarning,
)
return self.image_processor_class
@property
def feature_extractor(self):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.",
FutureWarning,
)
return self.image_processor
|
27182812/ChatGLM-LLaMA-chinese-insturct | 11,121 | src/transformers/models/clipseg/convert_clipseg_original_pytorch_to_hf.py | # coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert CLIPSeg checkpoints from the original repository. URL: https://github.com/timojl/clipseg."""
import argparse
import requests
import torch
from PIL import Image
from transformers import (
CLIPSegConfig,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPSegTextConfig,
CLIPSegVisionConfig,
CLIPTokenizer,
ViTFeatureExtractor,
)
def get_clipseg_config(model_name):
text_config = CLIPSegTextConfig()
vision_config = CLIPSegVisionConfig(patch_size=16)
use_complex_transposed_convolution = True if "refined" in model_name else False
reduce_dim = 16 if "rd16" in model_name else 64
config = CLIPSegConfig.from_text_vision_configs(
text_config,
vision_config,
use_complex_transposed_convolution=use_complex_transposed_convolution,
reduce_dim=reduce_dim,
)
return config
def rename_key(name):
# update prefixes
if "clip_model" in name:
name = name.replace("clip_model", "clip")
if "transformer" in name:
if "visual" in name:
name = name.replace("visual.transformer", "vision_model")
else:
name = name.replace("transformer", "text_model")
if "resblocks" in name:
name = name.replace("resblocks", "encoder.layers")
if "ln_1" in name:
name = name.replace("ln_1", "layer_norm1")
if "ln_2" in name:
name = name.replace("ln_2", "layer_norm2")
if "c_fc" in name:
name = name.replace("c_fc", "fc1")
if "c_proj" in name:
name = name.replace("c_proj", "fc2")
if "attn" in name and "self" not in name:
name = name.replace("attn", "self_attn")
# text encoder
if "token_embedding" in name:
name = name.replace("token_embedding", "text_model.embeddings.token_embedding")
if "positional_embedding" in name and "visual" not in name:
name = name.replace("positional_embedding", "text_model.embeddings.position_embedding.weight")
if "ln_final" in name:
name = name.replace("ln_final", "text_model.final_layer_norm")
# vision encoder
if "visual.class_embedding" in name:
name = name.replace("visual.class_embedding", "vision_model.embeddings.class_embedding")
if "visual.conv1" in name:
name = name.replace("visual.conv1", "vision_model.embeddings.patch_embedding")
if "visual.positional_embedding" in name:
name = name.replace("visual.positional_embedding", "vision_model.embeddings.position_embedding.weight")
if "visual.ln_pre" in name:
name = name.replace("visual.ln_pre", "vision_model.pre_layrnorm")
if "visual.ln_post" in name:
name = name.replace("visual.ln_post", "vision_model.post_layernorm")
# projection layers
if "visual.proj" in name:
name = name.replace("visual.proj", "visual_projection.weight")
if "text_projection" in name:
name = name.replace("text_projection", "text_projection.weight")
# decoder
if "trans_conv" in name:
name = name.replace("trans_conv", "transposed_convolution")
if "film_mul" in name or "film_add" in name or "reduce" in name or "transposed_convolution" in name:
name = "decoder." + name
if "blocks" in name:
name = name.replace("blocks", "decoder.layers")
if "linear1" in name:
name = name.replace("linear1", "mlp.fc1")
if "linear2" in name:
name = name.replace("linear2", "mlp.fc2")
if "norm1" in name and "layer_" not in name:
name = name.replace("norm1", "layer_norm1")
if "norm2" in name and "layer_" not in name:
name = name.replace("norm2", "layer_norm2")
return name
def convert_state_dict(orig_state_dict, config):
for key in orig_state_dict.copy().keys():
val = orig_state_dict.pop(key)
if key.startswith("clip_model") and "attn.in_proj" in key:
key_split = key.split(".")
if "visual" in key:
layer_num = int(key_split[4])
dim = config.vision_config.hidden_size
prefix = "vision_model"
else:
layer_num = int(key_split[3])
dim = config.text_config.hidden_size
prefix = "text_model"
if "weight" in key:
orig_state_dict[f"clip.{prefix}.encoder.layers.{layer_num}.self_attn.q_proj.weight"] = val[:dim, :]
orig_state_dict[f"clip.{prefix}.encoder.layers.{layer_num}.self_attn.k_proj.weight"] = val[
dim : dim * 2, :
]
orig_state_dict[f"clip.{prefix}.encoder.layers.{layer_num}.self_attn.v_proj.weight"] = val[-dim:, :]
else:
orig_state_dict[f"clip.{prefix}.encoder.layers.{layer_num}.self_attn.q_proj.bias"] = val[:dim]
orig_state_dict[f"clip.{prefix}.encoder.layers.{layer_num}.self_attn.k_proj.bias"] = val[dim : dim * 2]
orig_state_dict[f"clip.{prefix}.encoder.layers.{layer_num}.self_attn.v_proj.bias"] = val[-dim:]
elif "self_attn" in key and "out_proj" not in key:
key_split = key.split(".")
layer_num = int(key_split[1])
dim = config.reduce_dim
if "weight" in key:
orig_state_dict[f"decoder.layers.{layer_num}.self_attn.q_proj.weight"] = val[:dim, :]
orig_state_dict[f"decoder.layers.{layer_num}.self_attn.k_proj.weight"] = val[dim : dim * 2, :]
orig_state_dict[f"decoder.layers.{layer_num}.self_attn.v_proj.weight"] = val[-dim:, :]
else:
orig_state_dict[f"decoder.layers.{layer_num}.self_attn.q_proj.bias"] = val[:dim]
orig_state_dict[f"decoder.layers.{layer_num}.self_attn.k_proj.bias"] = val[dim : dim * 2]
orig_state_dict[f"decoder.layers.{layer_num}.self_attn.v_proj.bias"] = val[-dim:]
else:
new_name = rename_key(key)
if "visual_projection" in new_name or "text_projection" in new_name:
val = val.T
orig_state_dict[new_name] = val
return orig_state_dict
# We will verify our results on an image of cute cats
def prepare_img():
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
image = Image.open(requests.get(url, stream=True).raw)
return image
def convert_clipseg_checkpoint(model_name, checkpoint_path, pytorch_dump_folder_path, push_to_hub):
config = get_clipseg_config(model_name)
model = CLIPSegForImageSegmentation(config)
model.eval()
state_dict = torch.load(checkpoint_path, map_location="cpu")
# remove some keys
for key in state_dict.copy().keys():
if key.startswith("model"):
state_dict.pop(key, None)
# rename some keys
state_dict = convert_state_dict(state_dict, config)
missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False)
if missing_keys != ["clip.text_model.embeddings.position_ids", "clip.vision_model.embeddings.position_ids"]:
raise ValueError("Missing keys that are not expected: {}".format(missing_keys))
if unexpected_keys != ["decoder.reduce.weight", "decoder.reduce.bias"]:
raise ValueError(f"Unexpected keys: {unexpected_keys}")
feature_extractor = ViTFeatureExtractor(size=352)
tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32")
processor = CLIPSegProcessor(feature_extractor=feature_extractor, tokenizer=tokenizer)
image = prepare_img()
text = ["a glass", "something to fill", "wood", "a jar"]
inputs = processor(text=text, images=[image] * len(text), padding="max_length", return_tensors="pt")
with torch.no_grad():
outputs = model(**inputs)
# verify values
expected_conditional = torch.tensor([0.1110, -0.1882, 0.1645])
expected_pooled_output = torch.tensor([0.2692, -0.7197, -0.1328])
if model_name == "clipseg-rd64-refined":
expected_masks_slice = torch.tensor(
[[-10.0407, -9.9431, -10.2646], [-9.9751, -9.7064, -9.9586], [-9.6891, -9.5645, -9.9618]]
)
elif model_name == "clipseg-rd64":
expected_masks_slice = torch.tensor(
[[-7.2877, -7.2711, -7.2463], [-7.2652, -7.2780, -7.2520], [-7.2239, -7.2204, -7.2001]]
)
elif model_name == "clipseg-rd16":
expected_masks_slice = torch.tensor(
[[-6.3955, -6.4055, -6.4151], [-6.3911, -6.4033, -6.4100], [-6.3474, -6.3702, -6.3762]]
)
else:
raise ValueError(f"Model name {model_name} not supported.")
assert torch.allclose(outputs.logits[0, :3, :3], expected_masks_slice, atol=1e-3)
assert torch.allclose(outputs.conditional_embeddings[0, :3], expected_conditional, atol=1e-3)
assert torch.allclose(outputs.pooled_output[0, :3], expected_pooled_output, atol=1e-3)
print("Looks ok!")
if pytorch_dump_folder_path is not None:
print(f"Saving model and processor to {pytorch_dump_folder_path}")
model.save_pretrained(pytorch_dump_folder_path)
processor.save_pretrained(pytorch_dump_folder_path)
if push_to_hub:
print(f"Pushing model and processor for {model_name} to the hub")
model.push_to_hub(f"CIDAS/{model_name}")
processor.push_to_hub(f"CIDAS/{model_name}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="clipseg-rd64",
type=str,
choices=["clipseg-rd16", "clipseg-rd64", "clipseg-rd64-refined"],
help=(
"Name of the model. Supported models are: clipseg-rd64, clipseg-rd16 and clipseg-rd64-refined (rd meaning"
" reduce dimension)"
),
)
parser.add_argument(
"--checkpoint_path",
default="/Users/nielsrogge/Documents/CLIPSeg/clip_plus_rd64-uni.pth",
type=str,
help=(
"Path to the original checkpoint. Note that the script assumes that the checkpoint includes both CLIP and"
" the decoder weights."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
args = parser.parse_args()
convert_clipseg_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
|
27182812/ChatGLM-LLaMA-chinese-insturct | 17,451 | src/transformers/models/clipseg/configuration_clipseg.py | # coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" CLIPSeg model configuration"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"CIDAS/clipseg-rd64": "https://huggingface.co/CIDAS/clipseg-rd64/resolve/main/config.json",
}
class CLIPSegTextConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`CLIPSegModel`]. It is used to instantiate an
CLIPSeg model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the CLIPSeg
[CIDAS/clipseg-rd64](https://huggingface.co/CIDAS/clipseg-rd64) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 49408):
Vocabulary size of the CLIPSeg text model. Defines the number of different tokens that can be represented
by the `inputs_ids` passed when calling [`CLIPSegModel`].
hidden_size (`int`, *optional*, defaults to 512):
Dimensionality of the encoder layers and the pooler layer.
intermediate_size (`int`, *optional*, defaults to 2048):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer encoder.
max_position_embeddings (`int`, *optional*, defaults to 77):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported.
layer_norm_eps (`float`, *optional*, defaults to 1e-5):
The epsilon used by the layer normalization layers.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
initializer_factor (`float``, *optional*, defaults to 1):
A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
testing).
Example:
```python
>>> from transformers import CLIPSegTextConfig, CLIPSegTextModel
>>> # Initializing a CLIPSegTextConfig with CIDAS/clipseg-rd64 style configuration
>>> configuration = CLIPSegTextConfig()
>>> # Initializing a CLIPSegTextModel (with random weights) from the CIDAS/clipseg-rd64 style configuration
>>> model = CLIPSegTextModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "clipseg_text_model"
def __init__(
self,
vocab_size=49408,
hidden_size=512,
intermediate_size=2048,
num_hidden_layers=12,
num_attention_heads=8,
max_position_embeddings=77,
hidden_act="quick_gelu",
layer_norm_eps=1e-5,
attention_dropout=0.0,
initializer_range=0.02,
initializer_factor=1.0,
pad_token_id=1,
bos_token_id=0,
eos_token_id=2,
**kwargs,
):
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.max_position_embeddings = max_position_embeddings
self.layer_norm_eps = layer_norm_eps
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.initializer_factor = initializer_factor
self.attention_dropout = attention_dropout
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
# get the text config dict if we are loading from CLIPSegConfig
if config_dict.get("model_type") == "clipseg":
config_dict = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
)
return cls.from_dict(config_dict, **kwargs)
class CLIPSegVisionConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`CLIPSegModel`]. It is used to instantiate an
CLIPSeg model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the CLIPSeg
[CIDAS/clipseg-rd64](https://huggingface.co/CIDAS/clipseg-rd64) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 32):
The size (resolution) of each patch.
hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported.
layer_norm_eps (`float`, *optional*, defaults to 1e-5):
The epsilon used by the layer normalization layers.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
initializer_factor (`float``, *optional*, defaults to 1):
A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
testing).
Example:
```python
>>> from transformers import CLIPSegVisionConfig, CLIPSegVisionModel
>>> # Initializing a CLIPSegVisionConfig with CIDAS/clipseg-rd64 style configuration
>>> configuration = CLIPSegVisionConfig()
>>> # Initializing a CLIPSegVisionModel (with random weights) from the CIDAS/clipseg-rd64 style configuration
>>> model = CLIPSegVisionModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "clipseg_vision_model"
def __init__(
self,
hidden_size=768,
intermediate_size=3072,
num_hidden_layers=12,
num_attention_heads=12,
num_channels=3,
image_size=224,
patch_size=32,
hidden_act="quick_gelu",
layer_norm_eps=1e-5,
attention_dropout=0.0,
initializer_range=0.02,
initializer_factor=1.0,
**kwargs,
):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_channels = num_channels
self.patch_size = patch_size
self.image_size = image_size
self.initializer_range = initializer_range
self.initializer_factor = initializer_factor
self.attention_dropout = attention_dropout
self.layer_norm_eps = layer_norm_eps
self.hidden_act = hidden_act
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
# get the vision config dict if we are loading from CLIPSegConfig
if config_dict.get("model_type") == "clipseg":
config_dict = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
)
return cls.from_dict(config_dict, **kwargs)
class CLIPSegConfig(PretrainedConfig):
r"""
[`CLIPSegConfig`] is the configuration class to store the configuration of a [`CLIPSegModel`]. It is used to
instantiate a CLIPSeg model according to the specified arguments, defining the text model and vision model configs.
Instantiating a configuration with the defaults will yield a similar configuration to that of the CLIPSeg
[CIDAS/clipseg-rd64](https://huggingface.co/CIDAS/clipseg-rd64) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
text_config (`dict`, *optional*):
Dictionary of configuration options used to initialize [`CLIPSegTextConfig`].
vision_config (`dict`, *optional*):
Dictionary of configuration options used to initialize [`CLIPSegVisionConfig`].
projection_dim (`int`, *optional*, defaults to 512):
Dimensionality of text and vision projection layers.
logit_scale_init_value (`float`, *optional*, defaults to 2.6592):
The inital value of the *logit_scale* paramter. Default is used as per the original CLIPSeg implementation.
extract_layers (`List[int]`, *optional*, defaults to [3, 6, 9]):
Layers to extract when forwarding the query image through the frozen visual backbone of CLIP.
reduce_dim (`int`, *optional*, defaults to 64):
Dimensionality to reduce the CLIP vision embedding.
decoder_num_attention_heads (`int`, *optional*, defaults to 4):
Number of attention heads in the decoder of CLIPSeg.
decoder_attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
decoder_hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported.
decoder_intermediate_size (`int`, *optional*, defaults to 2048):
Dimensionality of the "intermediate" (i.e., feed-forward) layers in the Transformer decoder.
conditional_layer (`int`, *optional*, defaults to 0):
The layer to use of the Transformer encoder whose activations will be combined with the condition
embeddings using FiLM (Feature-wise Linear Modulation). If 0, the last layer is used.
use_complex_transposed_convolution (`bool`, *optional*, defaults to `False`):
Whether to use a more complex transposed convolution in the decoder, enabling more fine-grained
segmentation.
kwargs (*optional*):
Dictionary of keyword arguments.
Example:
```python
>>> from transformers import CLIPSegConfig, CLIPSegModel
>>> # Initializing a CLIPSegConfig with CIDAS/clipseg-rd64 style configuration
>>> configuration = CLIPSegConfig()
>>> # Initializing a CLIPSegModel (with random weights) from the CIDAS/clipseg-rd64 style configuration
>>> model = CLIPSegModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
>>> # We can also initialize a CLIPSegConfig from a CLIPSegTextConfig and a CLIPSegVisionConfig
>>> # Initializing a CLIPSegText and CLIPSegVision configuration
>>> config_text = CLIPSegTextConfig()
>>> config_vision = CLIPSegVisionConfig()
>>> config = CLIPSegConfig.from_text_vision_configs(config_text, config_vision)
```"""
model_type = "clipseg"
is_composition = True
def __init__(
self,
text_config=None,
vision_config=None,
projection_dim=512,
logit_scale_init_value=2.6592,
extract_layers=[3, 6, 9],
reduce_dim=64,
decoder_num_attention_heads=4,
decoder_attention_dropout=0.0,
decoder_hidden_act="quick_gelu",
decoder_intermediate_size=2048,
conditional_layer=0,
use_complex_transposed_convolution=False,
**kwargs,
):
super().__init__(**kwargs)
text_config_dict = kwargs.pop("text_config_dict", None)
vision_config_dict = kwargs.pop("vision_config_dict", None)
if text_config_dict is not None:
text_config = text_config_dict
if vision_config_dict is not None:
vision_config = vision_config_dict
if text_config is None:
text_config = {}
logger.info("text_config is None. Initializing the CLIPSegTextConfig with default values.")
if vision_config is None:
vision_config = {}
logger.info("vision_config is None. initializing the CLIPSegVisionConfig with default values.")
self.text_config = CLIPSegTextConfig(**text_config)
self.vision_config = CLIPSegVisionConfig(**vision_config)
self.projection_dim = projection_dim
self.logit_scale_init_value = logit_scale_init_value
self.extract_layers = extract_layers
self.reduce_dim = reduce_dim
self.decoder_num_attention_heads = decoder_num_attention_heads
self.decoder_attention_dropout = decoder_attention_dropout
self.decoder_hidden_act = decoder_hidden_act
self.decoder_intermediate_size = decoder_intermediate_size
self.conditional_layer = conditional_layer
self.initializer_factor = 1.0
self.use_complex_transposed_convolution = use_complex_transposed_convolution
@classmethod
def from_text_vision_configs(cls, text_config: CLIPSegTextConfig, vision_config: CLIPSegVisionConfig, **kwargs):
r"""
Instantiate a [`CLIPSegConfig`] (or a derived class) from clipseg text model configuration and clipseg vision
model configuration.
Returns:
[`CLIPSegConfig`]: An instance of a configuration object
"""
return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs)
def to_dict(self):
"""
Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`].
Returns:
`Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
"""
output = copy.deepcopy(self.__dict__)
output["text_config"] = self.text_config.to_dict()
output["vision_config"] = self.vision_config.to_dict()
output["model_type"] = self.__class__.model_type
return output
|
27182812/ChatGLM-LLaMA-chinese-insturct | 64,756 | src/transformers/models/clipseg/modeling_clipseg.py | # coding=utf-8
# Copyright 2022 The OpenAI Team Authors and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch CLIPSeg model."""
import copy
import math
from dataclasses import dataclass
from typing import Any, Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from ...activations import ACT2FN
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
from ...modeling_utils import PreTrainedModel
from ...utils import (
ModelOutput,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from .configuration_clipseg import CLIPSegConfig, CLIPSegTextConfig, CLIPSegVisionConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "CIDAS/clipseg-rd64-refined"
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST = [
"CIDAS/clipseg-rd64-refined",
# See all CLIPSeg models at https://huggingface.co/models?filter=clipseg
]
# Copied from transformers.models.bart.modeling_bart._expand_mask
def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
"""
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
"""
bsz, src_len = mask.size()
tgt_len = tgt_len if tgt_len is not None else src_len
expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
inverted_mask = 1.0 - expanded_mask
return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
# contrastive loss function, adapted from
# https://sachinruk.github.io/blog/pytorch/pytorch%20lightning/loss%20function/gpu/2021/03/07/CLIP.html
def contrastive_loss(logits: torch.Tensor) -> torch.Tensor:
return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device))
# Copied from transformers.models.clip.modeling_clip.clip_loss with clip->clipseg
def clipseg_loss(similarity: torch.Tensor) -> torch.Tensor:
caption_loss = contrastive_loss(similarity)
image_loss = contrastive_loss(similarity.t())
return (caption_loss + image_loss) / 2.0
@dataclass
# Copied from transformers.models.clip.modeling_clip.CLIPOutput with CLIP->CLIPSeg
class CLIPSegOutput(ModelOutput):
"""
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
Contrastive loss for image-text similarity.
logits_per_image:(`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`):
The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text
similarity scores.
logits_per_text:(`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`):
The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image
similarity scores.
text_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`):
The text embeddings obtained by applying the projection layer to the pooled output of [`CLIPSegTextModel`].
image_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`):
The image embeddings obtained by applying the projection layer to the pooled output of
[`CLIPSegVisionModel`].
text_model_output(`BaseModelOutputWithPooling`):
The output of the [`CLIPSegTextModel`].
vision_model_output(`BaseModelOutputWithPooling`):
The output of the [`CLIPSegVisionModel`].
"""
loss: Optional[torch.FloatTensor] = None
logits_per_image: torch.FloatTensor = None
logits_per_text: torch.FloatTensor = None
text_embeds: torch.FloatTensor = None
image_embeds: torch.FloatTensor = None
text_model_output: BaseModelOutputWithPooling = None
vision_model_output: BaseModelOutputWithPooling = None
def to_tuple(self) -> Tuple[Any]:
return tuple(
self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple()
for k in self.keys()
)
@dataclass
class CLIPSegDecoderOutput(ModelOutput):
"""
Args:
logits (`torch.FloatTensor` of shape `(batch_size, height, width)`):
Classification scores for each pixel.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
the self-attention heads.
"""
logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class CLIPSegImageSegmentationOutput(ModelOutput):
"""
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
Contrastive loss for image-text similarity.
...
vision_model_output (`BaseModelOutputWithPooling`):
The output of the [`CLIPSegVisionModel`].
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
conditional_embeddings: torch.FloatTensor = None
pooled_output: torch.FloatTensor = None
vision_model_output: BaseModelOutputWithPooling = None
decoder_output: CLIPSegDecoderOutput = None
def to_tuple(self) -> Tuple[Any]:
return tuple(
self[k] if k not in ["vision_model_output", "decoder_output"] else getattr(self, k).to_tuple()
for k in self.keys()
)
class CLIPSegVisionEmbeddings(nn.Module):
# Copied from transformers.models.clip.modeling_clip.CLIPVisionEmbeddings.__init__
def __init__(self, config: CLIPSegVisionConfig):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.image_size = config.image_size
self.patch_size = config.patch_size
self.class_embedding = nn.Parameter(torch.randn(self.embed_dim))
self.patch_embedding = nn.Conv2d(
in_channels=config.num_channels,
out_channels=self.embed_dim,
kernel_size=self.patch_size,
stride=self.patch_size,
bias=False,
)
self.num_patches = (self.image_size // self.patch_size) ** 2
self.num_positions = self.num_patches + 1
self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim)
self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1)))
def interpolate_position_embeddings(self, new_size):
if len(new_size) != 2:
raise ValueError("new_size should consist of 2 values")
num_patches_one_direction = int(self.num_patches**0.5)
# we interpolate the position embeddings in 2D
a = self.position_embedding.weight[1:].T.view(
1, self.config.hidden_size, num_patches_one_direction, num_patches_one_direction
)
b = (
nn.functional.interpolate(a, new_size, mode="bicubic", align_corners=False)
.squeeze(0)
.view(self.config.hidden_size, new_size[0] * new_size[1])
.T
)
result = torch.cat([self.position_embedding.weight[:1], b])
return result
def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
batch_size = pixel_values.shape[0]
patch_embeds = self.patch_embedding(pixel_values) # shape = [*, width, grid, grid]
patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
class_embeds = self.class_embedding.expand(batch_size, 1, -1)
embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
if embeddings.shape[1] != self.num_positions:
new_shape = int(math.sqrt(embeddings.shape[1] - 1))
embeddings = embeddings + self.interpolate_position_embeddings((new_shape, new_shape))
embeddings = embeddings.to(embeddings.dtype)
else:
embeddings = embeddings + self.position_embedding(self.position_ids)
return embeddings
# Copied from transformers.models.clip.modeling_clip.CLIPTextEmbeddings with CLIP->CLIPSeg
class CLIPSegTextEmbeddings(nn.Module):
def __init__(self, config: CLIPSegTextConfig):
super().__init__()
embed_dim = config.hidden_size
self.token_embedding = nn.Embedding(config.vocab_size, embed_dim)
self.position_embedding = nn.Embedding(config.max_position_embeddings, embed_dim)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
) -> torch.Tensor:
seq_length = input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2]
if position_ids is None:
position_ids = self.position_ids[:, :seq_length]
if inputs_embeds is None:
inputs_embeds = self.token_embedding(input_ids)
position_embeddings = self.position_embedding(position_ids)
embeddings = inputs_embeds + position_embeddings
return embeddings
# Copied from transformers.models.clip.modeling_clip.CLIPAttention with CLIP->CLIPSeg
class CLIPSegAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.embed_dim // self.num_heads
if self.head_dim * self.num_heads != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
f" {self.num_heads})."
)
self.scale = self.head_dim**-0.5
self.dropout = config.attention_dropout
self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
causal_attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
bsz, tgt_len, embed_dim = hidden_states.size()
# get query proj
query_states = self.q_proj(hidden_states) * self.scale
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
key_states = key_states.view(*proj_shape)
value_states = value_states.view(*proj_shape)
src_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
raise ValueError(
f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
f" {attn_weights.size()}"
)
# apply the causal_attention_mask first
if causal_attention_mask is not None:
if causal_attention_mask.size() != (bsz, 1, tgt_len, src_len):
raise ValueError(
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is"
f" {causal_attention_mask.size()}"
)
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + causal_attention_mask
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if attention_mask is not None:
if attention_mask.size() != (bsz, 1, tgt_len, src_len):
raise ValueError(
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
)
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
if output_attentions:
# this operation is a bit akward, but it's required to
# make sure that attn_weights keeps its gradient.
# In order to do so, attn_weights have to reshaped
# twice and have to be reused in the following
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
else:
attn_weights_reshaped = None
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_probs, value_states)
if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
f" {attn_output.size()}"
)
attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
attn_output = attn_output.transpose(1, 2)
attn_output = attn_output.reshape(bsz, tgt_len, embed_dim)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights_reshaped
# Copied from transformers.models.clip.modeling_clip.CLIPMLP with CLIP->CLIPSeg
class CLIPSegMLP(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.activation_fn = ACT2FN[config.hidden_act]
self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.fc1(hidden_states)
hidden_states = self.activation_fn(hidden_states)
hidden_states = self.fc2(hidden_states)
return hidden_states
# Copied from transformers.models.clip.modeling_clip.CLIPEncoderLayer with CLIP->CLIPSeg
class CLIPSegEncoderLayer(nn.Module):
def __init__(self, config: CLIPSegConfig):
super().__init__()
self.embed_dim = config.hidden_size
self.self_attn = CLIPSegAttention(config)
self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
self.mlp = CLIPSegMLP(config)
self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
causal_attention_mask: torch.Tensor,
output_attentions: Optional[bool] = False,
) -> Tuple[torch.FloatTensor]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
`(config.encoder_attention_heads,)`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states = self.layer_norm1(hidden_states)
hidden_states, attn_weights = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
causal_attention_mask=causal_attention_mask,
output_attentions=output_attentions,
)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.layer_norm2(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
class CLIPSegPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = CLIPSegConfig
base_model_prefix = "clip"
supports_gradient_checkpointing = True
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
"""Initialize the weights"""
factor = self.config.initializer_factor
if isinstance(module, CLIPSegTextEmbeddings):
module.token_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02)
module.position_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02)
elif isinstance(module, CLIPSegVisionEmbeddings):
factor = self.config.initializer_factor
nn.init.normal_(module.class_embedding, mean=0.0, std=module.embed_dim**-0.5 * factor)
nn.init.normal_(module.patch_embedding.weight, std=module.config.initializer_range * factor)
nn.init.normal_(module.position_embedding.weight, std=module.config.initializer_range * factor)
elif isinstance(module, CLIPSegAttention):
factor = self.config.initializer_factor
in_proj_std = (module.embed_dim**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor
out_proj_std = (module.embed_dim**-0.5) * factor
nn.init.normal_(module.q_proj.weight, std=in_proj_std)
nn.init.normal_(module.k_proj.weight, std=in_proj_std)
nn.init.normal_(module.v_proj.weight, std=in_proj_std)
nn.init.normal_(module.out_proj.weight, std=out_proj_std)
elif isinstance(module, CLIPSegMLP):
factor = self.config.initializer_factor
in_proj_std = (
(module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor
)
fc_std = (2 * module.config.hidden_size) ** -0.5 * factor
nn.init.normal_(module.fc1.weight, std=fc_std)
nn.init.normal_(module.fc2.weight, std=in_proj_std)
elif isinstance(module, CLIPSegModel):
nn.init.normal_(
module.text_projection.weight,
std=module.text_embed_dim**-0.5 * self.config.initializer_factor,
)
nn.init.normal_(
module.visual_projection.weight,
std=module.vision_embed_dim**-0.5 * self.config.initializer_factor,
)
if isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, CLIPSegEncoder):
module.gradient_checkpointing = value
CLIPSEG_START_DOCSTRING = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`CLIPSegConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
CLIPSEG_TEXT_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
CLIPSEG_VISION_INPUTS_DOCSTRING = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
[`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
CLIPSEG_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
[`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.
return_loss (`bool`, *optional*):
Whether or not to return the contrastive loss.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
# Copied from transformers.models.clip.modeling_clip.CLIPEncoder with CLIP->CLIPSeg
class CLIPSegEncoder(nn.Module):
"""
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
[`CLIPSegEncoderLayer`].
Args:
config: CLIPSegConfig
"""
def __init__(self, config: CLIPSegConfig):
super().__init__()
self.config = config
self.layers = nn.ModuleList([CLIPSegEncoderLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(
self,
inputs_embeds,
attention_mask: Optional[torch.Tensor] = None,
causal_attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BaseModelOutput]:
r"""
Args:
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Causal mask for the text model. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
hidden_states = inputs_embeds
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if self.gradient_checkpointing and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(encoder_layer),
hidden_states,
attention_mask,
causal_attention_mask,
)
else:
layer_outputs = encoder_layer(
hidden_states,
attention_mask,
causal_attention_mask,
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
)
class CLIPSegTextTransformer(nn.Module):
# Copied from transformers.models.clip.modeling_clip.CLIPTextTransformer.__init__ with CLIP->CLIPSeg
def __init__(self, config: CLIPSegTextConfig):
super().__init__()
self.config = config
embed_dim = config.hidden_size
self.embeddings = CLIPSegTextEmbeddings(config)
self.encoder = CLIPSegEncoder(config)
self.final_layer_norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
@add_start_docstrings_to_model_forward(CLIPSEG_TEXT_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=CLIPSegTextConfig)
# Copied from transformers.models.clip.modeling_clip.CLIPTextTransformer.forward with clip->clipseg, CLIP->CLIPSeg
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BaseModelOutputWithPooling]:
r"""
Returns:
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is None:
raise ValueError("You have to specify input_ids")
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
hidden_states = self.embeddings(input_ids=input_ids, position_ids=position_ids)
bsz, seq_len = input_shape
# CLIPSeg's text model uses causal mask, prepare it here.
# https://github.com/openai/CLIPSeg/blob/cfcffb90e69f37bf2ff1e988237a0fbe41f33c04/clipseg/model.py#L324
causal_attention_mask = self._build_causal_attention_mask(bsz, seq_len, hidden_states.dtype).to(
hidden_states.device
)
# expand attention_mask
if attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
attention_mask = _expand_mask(attention_mask, hidden_states.dtype)
encoder_outputs = self.encoder(
inputs_embeds=hidden_states,
attention_mask=attention_mask,
causal_attention_mask=causal_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
last_hidden_state = encoder_outputs[0]
last_hidden_state = self.final_layer_norm(last_hidden_state)
# text_embeds.shape = [batch_size, sequence_length, transformer.width]
# take features from the eot embedding (eot_token is the highest number in each sequence)
# casting to torch.int for onnx compatibility: argmax doesn't support int64 inputs with opset 14
pooled_output = last_hidden_state[
torch.arange(last_hidden_state.shape[0], device=last_hidden_state.device),
input_ids.to(dtype=torch.int, device=last_hidden_state.device).argmax(dim=-1),
]
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPooling(
last_hidden_state=last_hidden_state,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
def _build_causal_attention_mask(self, bsz, seq_len, dtype):
# lazily create causal attention mask, with full attention between the vision tokens
# pytorch uses additive attention mask; fill with -inf
mask = torch.empty(bsz, seq_len, seq_len, dtype=dtype)
mask.fill_(torch.tensor(torch.finfo(dtype).min))
mask.triu_(1) # zero out the lower diagonal
mask = mask.unsqueeze(1) # expand mask
return mask
class CLIPSegTextModel(CLIPSegPreTrainedModel):
config_class = CLIPSegTextConfig
_no_split_modules = ["CLIPSegEncoderLayer"]
def __init__(self, config: CLIPSegTextConfig):
super().__init__(config)
self.text_model = CLIPSegTextTransformer(config)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self) -> nn.Module:
return self.text_model.embeddings.token_embedding
def set_input_embeddings(self, value):
self.text_model.embeddings.token_embedding = value
@add_start_docstrings_to_model_forward(CLIPSEG_TEXT_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=CLIPSegTextConfig)
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BaseModelOutputWithPooling]:
r"""
Returns:
Examples:
```python
>>> from transformers import AutoTokenizer, CLIPSegTextModel
>>> tokenizer = AutoTokenizer.from_pretrained("CIDAS/clipseg-rd64-refined")
>>> model = CLIPSegTextModel.from_pretrained("CIDAS/clipseg-rd64-refined")
>>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_state = outputs.last_hidden_state
>>> pooled_output = outputs.pooler_output # pooled (EOS token) states
```"""
return self.text_model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
class CLIPSegVisionTransformer(nn.Module):
# Copied from transformers.models.clip.modeling_clip.CLIPVisionTransformer.__init__ with CLIP->CLIPSeg
def __init__(self, config: CLIPSegVisionConfig):
super().__init__()
self.config = config
embed_dim = config.hidden_size
self.embeddings = CLIPSegVisionEmbeddings(config)
self.pre_layrnorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
self.encoder = CLIPSegEncoder(config)
self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
@add_start_docstrings_to_model_forward(CLIPSEG_VISION_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=CLIPSegVisionConfig)
# Copied from transformers.models.clip.modeling_clip.CLIPVisionTransformer.forward
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BaseModelOutputWithPooling]:
r"""
Returns:
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
hidden_states = self.embeddings(pixel_values)
hidden_states = self.pre_layrnorm(hidden_states)
encoder_outputs = self.encoder(
inputs_embeds=hidden_states,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
last_hidden_state = encoder_outputs[0]
pooled_output = last_hidden_state[:, 0, :]
pooled_output = self.post_layernorm(pooled_output)
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPooling(
last_hidden_state=last_hidden_state,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
class CLIPSegVisionModel(CLIPSegPreTrainedModel):
config_class = CLIPSegVisionConfig
main_input_name = "pixel_values"
def __init__(self, config: CLIPSegVisionConfig):
super().__init__(config)
self.vision_model = CLIPSegVisionTransformer(config)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self) -> nn.Module:
return self.vision_model.embeddings.patch_embedding
@add_start_docstrings_to_model_forward(CLIPSEG_VISION_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=CLIPSegVisionConfig)
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BaseModelOutputWithPooling]:
r"""
Returns:
Examples:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoProcessor, CLIPSegVisionModel
>>> processor = AutoProcessor.from_pretrained("CIDAS/clipseg-rd64-refined")
>>> model = CLIPSegVisionModel.from_pretrained("CIDAS/clipseg-rd64-refined")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(images=image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_state = outputs.last_hidden_state
>>> pooled_output = outputs.pooler_output # pooled CLS states
```"""
return self.vision_model(
pixel_values=pixel_values,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
@add_start_docstrings(CLIPSEG_START_DOCSTRING)
class CLIPSegModel(CLIPSegPreTrainedModel):
config_class = CLIPSegConfig
def __init__(self, config: CLIPSegConfig):
super().__init__(config)
if not isinstance(config.text_config, CLIPSegTextConfig):
raise ValueError(
"config.text_config is expected to be of type CLIPSegTextConfig but is of type"
f" {type(config.text_config)}."
)
if not isinstance(config.vision_config, CLIPSegVisionConfig):
raise ValueError(
"config.vision_config is expected to be of type CLIPSegVisionConfig but is of type"
f" {type(config.vision_config)}."
)
text_config = config.text_config
vision_config = config.vision_config
self.projection_dim = config.projection_dim
self.text_embed_dim = text_config.hidden_size
self.vision_embed_dim = vision_config.hidden_size
self.text_model = CLIPSegTextTransformer(text_config)
self.vision_model = CLIPSegVisionTransformer(vision_config)
self.visual_projection = nn.Linear(self.vision_embed_dim, self.projection_dim, bias=False)
self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim, bias=False)
self.logit_scale = nn.Parameter(torch.ones([]) * self.config.logit_scale_init_value)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(CLIPSEG_TEXT_INPUTS_DOCSTRING)
def get_text_features(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> torch.FloatTensor:
r"""
Returns:
text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by
applying the projection layer to the pooled output of [`CLIPSegTextModel`].
Examples:
```python
>>> from transformers import AutoTokenizer, CLIPSegModel
>>> tokenizer = AutoTokenizer.from_pretrained("CIDAS/clipseg-rd64-refined")
>>> model = CLIPSegModel.from_pretrained("CIDAS/clipseg-rd64-refined")
>>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
>>> text_features = model.get_text_features(**inputs)
```"""
# Use CLIPSEG model's config for some fields (if specified) instead of those of vision & text components.
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
text_outputs = self.text_model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = text_outputs[1]
text_features = self.text_projection(pooled_output)
return text_features
@add_start_docstrings_to_model_forward(CLIPSEG_VISION_INPUTS_DOCSTRING)
def get_image_features(
self,
pixel_values: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> torch.FloatTensor:
r"""
Returns:
image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by
applying the projection layer to the pooled output of [`CLIPSegVisionModel`].
Examples:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoProcessor, CLIPSegModel
>>> processor = AutoProcessor.from_pretrained("CIDAS/clipseg-rd64-refined")
>>> model = CLIPSegModel.from_pretrained("CIDAS/clipseg-rd64-refined")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(images=image, return_tensors="pt")
>>> image_features = model.get_image_features(**inputs)
```"""
# Use CLIPSEG model's config for some fields (if specified) instead of those of vision & text components.
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
vision_outputs = self.vision_model(
pixel_values=pixel_values,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = vision_outputs[1] # pooled_output
image_features = self.visual_projection(pooled_output)
return image_features
@add_start_docstrings_to_model_forward(CLIPSEG_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=CLIPSegOutput, config_class=CLIPSegConfig)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
return_loss: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, CLIPSegOutput]:
r"""
Returns:
Examples:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoProcessor, CLIPSegModel
>>> processor = AutoProcessor.from_pretrained("CIDAS/clipseg-rd64-refined")
>>> model = CLIPSegModel.from_pretrained("CIDAS/clipseg-rd64-refined")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(
... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True
... )
>>> outputs = model(**inputs)
>>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score
>>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
```"""
# Use CLIPSEG model's config for some fields (if specified) instead of those of vision & text components.
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
vision_outputs = self.vision_model(
pixel_values=pixel_values,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
text_outputs = self.text_model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
image_embeds = vision_outputs[1]
image_embeds = self.visual_projection(image_embeds)
text_embeds = text_outputs[1]
text_embeds = self.text_projection(text_embeds)
# normalized features
image_embeds = image_embeds / image_embeds.norm(p=2, dim=-1, keepdim=True)
text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True)
# cosine similarity as logits
logit_scale = self.logit_scale.exp()
logits_per_text = torch.matmul(text_embeds, image_embeds.t()) * logit_scale
logits_per_image = logits_per_text.t()
loss = None
if return_loss:
loss = clipseg_loss(logits_per_text)
if not return_dict:
output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs)
return ((loss,) + output) if loss is not None else output
return CLIPSegOutput(
loss=loss,
logits_per_image=logits_per_image,
logits_per_text=logits_per_text,
text_embeds=text_embeds,
image_embeds=image_embeds,
text_model_output=text_outputs,
vision_model_output=vision_outputs,
)
class CLIPSegDecoderLayer(nn.Module):
"""
CLIPSeg decoder layer, which is identical to `CLIPSegEncoderLayer`, except that normalization is applied after
self-attention/MLP, rather than before.
"""
# Copied from transformers.models.clip.modeling_clip.CLIPEncoderLayer.__init__ with CLIP->CLIPSeg
def __init__(self, config: CLIPSegConfig):
super().__init__()
self.embed_dim = config.hidden_size
self.self_attn = CLIPSegAttention(config)
self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
self.mlp = CLIPSegMLP(config)
self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
causal_attention_mask: torch.Tensor,
output_attentions: Optional[bool] = False,
) -> Tuple[torch.FloatTensor]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
`(config.encoder_attention_heads,)`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states, attn_weights = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
causal_attention_mask=causal_attention_mask,
output_attentions=output_attentions,
)
hidden_states = residual + hidden_states
hidden_states = self.layer_norm1(hidden_states)
residual = hidden_states
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
hidden_states = self.layer_norm2(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
class CLIPSegDecoder(CLIPSegPreTrainedModel):
def __init__(self, config: CLIPSegConfig):
super().__init__(config)
self.conditional_layer = config.conditional_layer
self.film_mul = nn.Linear(config.projection_dim, config.reduce_dim)
self.film_add = nn.Linear(config.projection_dim, config.reduce_dim)
if config.use_complex_transposed_convolution:
transposed_kernels = (config.vision_config.patch_size // 4, config.vision_config.patch_size // 4)
self.transposed_convolution = nn.Sequential(
nn.Conv2d(config.reduce_dim, config.reduce_dim, kernel_size=3, padding=1),
nn.ReLU(),
nn.ConvTranspose2d(
config.reduce_dim,
config.reduce_dim // 2,
kernel_size=transposed_kernels[0],
stride=transposed_kernels[0],
),
nn.ReLU(),
nn.ConvTranspose2d(
config.reduce_dim // 2, 1, kernel_size=transposed_kernels[1], stride=transposed_kernels[1]
),
)
else:
self.transposed_convolution = nn.ConvTranspose2d(
config.reduce_dim, 1, config.vision_config.patch_size, stride=config.vision_config.patch_size
)
depth = len(config.extract_layers)
self.reduces = nn.ModuleList(
[nn.Linear(config.vision_config.hidden_size, config.reduce_dim) for _ in range(depth)]
)
decoder_config = copy.deepcopy(config.vision_config)
decoder_config.hidden_size = config.reduce_dim
decoder_config.num_attention_heads = config.decoder_num_attention_heads
decoder_config.intermediate_size = config.decoder_intermediate_size
decoder_config.hidden_act = "relu"
self.layers = nn.ModuleList([CLIPSegDecoderLayer(decoder_config) for _ in range(len(config.extract_layers))])
def forward(
self,
hidden_states: Tuple[torch.Tensor],
conditional_embeddings: torch.Tensor,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = True,
):
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
activations = hidden_states[::-1]
output = None
for i, (activation, layer, reduce) in enumerate(zip(activations, self.layers, self.reduces)):
if output is not None:
output = reduce(activation) + output
else:
output = reduce(activation)
if i == self.conditional_layer:
output = self.film_mul(conditional_embeddings) * output.permute(1, 0, 2) + self.film_add(
conditional_embeddings
)
output = output.permute(1, 0, 2)
layer_outputs = layer(
output, attention_mask=None, causal_attention_mask=None, output_attentions=output_attentions
)
output = layer_outputs[0]
if output_hidden_states:
all_hidden_states += (output,)
if output_attentions:
all_attentions += (layer_outputs[1],)
output = output[:, 1:, :].permute(0, 2, 1) # remove cls token and reshape to [batch_size, reduce_dim, seq_len]
size = int(math.sqrt(output.shape[2]))
batch_size = conditional_embeddings.shape[0]
output = output.view(batch_size, output.shape[1], size, size)
logits = self.transposed_convolution(output).squeeze()
if not return_dict:
return tuple(v for v in [logits, all_hidden_states, all_attentions] if v is not None)
return CLIPSegDecoderOutput(
logits=logits,
hidden_states=all_hidden_states,
attentions=all_attentions,
)
@add_start_docstrings(
"""
CLIPSeg model with a Transformer-based decoder on top for zero-shot and one-shot image segmentation.
""",
CLIPSEG_START_DOCSTRING,
)
class CLIPSegForImageSegmentation(CLIPSegPreTrainedModel):
config_class = CLIPSegConfig
def __init__(self, config: CLIPSegConfig):
super().__init__(config)
self.config = config
self.clip = CLIPSegModel(config)
self.extract_layers = config.extract_layers
self.decoder = CLIPSegDecoder(config)
# Initialize weights and apply final processing
self.post_init()
def get_conditional_embeddings(
self,
batch_size: int = None,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
conditional_pixel_values: Optional[torch.Tensor] = None,
):
if input_ids is not None:
# compute conditional embeddings from texts
if len(input_ids) != batch_size:
raise ValueError("Make sure to pass as many prompt texts as there are query images")
with torch.no_grad():
conditional_embeddings = self.clip.get_text_features(
input_ids, attention_mask=attention_mask, position_ids=position_ids
)
elif conditional_pixel_values is not None:
# compute conditional embeddings from images
if len(conditional_pixel_values) != batch_size:
raise ValueError("Make sure to pass as many prompt images as there are query images")
with torch.no_grad():
conditional_embeddings = self.clip.get_image_features(conditional_pixel_values)
else:
raise ValueError(
"Invalid conditional, should be either provided as `input_ids` or `conditional_pixel_values`"
)
return conditional_embeddings
@add_start_docstrings_to_model_forward(CLIPSEG_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=CLIPSegImageSegmentationOutput, config_class=CLIPSegTextConfig)
def forward(
self,
input_ids: Optional[torch.FloatTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
conditional_pixel_values: Optional[torch.FloatTensor] = None,
conditional_embeddings: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
labels: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, CLIPSegOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Returns:
Examples:
```python
>>> from transformers import AutoProcessor, CLIPSegForImageSegmentation
>>> from PIL import Image
>>> import requests
>>> processor = AutoProcessor.from_pretrained("CIDAS/clipseg-rd64-refined")
>>> model = CLIPSegForImageSegmentation.from_pretrained("CIDAS/clipseg-rd64-refined")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> texts = ["a cat", "a remote", "a blanket"]
>>> inputs = processor(text=texts, images=[image] * len(texts), padding=True, return_tensors="pt")
>>> outputs = model(**inputs)
>>> logits = outputs.logits
>>> print(logits.shape)
torch.Size([3, 352, 352])
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# step 1: forward the query images through the frozen CLIP vision encoder
with torch.no_grad():
vision_outputs = self.clip.vision_model(
pixel_values=pixel_values,
output_attentions=output_attentions,
output_hidden_states=True, # we need the intermediate hidden states
return_dict=return_dict,
)
pooled_output = self.clip.visual_projection(vision_outputs[1])
hidden_states = vision_outputs.hidden_states if return_dict else vision_outputs[2]
# we add +1 here as the hidden states also include the initial embeddings
activations = [hidden_states[i + 1] for i in self.extract_layers]
# update vision_outputs
if return_dict:
vision_outputs = BaseModelOutputWithPooling(
last_hidden_state=vision_outputs.last_hidden_state,
pooler_output=vision_outputs.pooler_output,
hidden_states=vision_outputs.hidden_states if output_hidden_states else None,
attentions=vision_outputs.attentions,
)
else:
vision_outputs = (
vision_outputs[:2] + vision_outputs[3:] if not output_hidden_states else vision_outputs
)
# step 2: compute conditional embeddings, either from text, images or an own provided embedding
if conditional_embeddings is None:
conditional_embeddings = self.get_conditional_embeddings(
batch_size=pixel_values.shape[0],
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
conditional_pixel_values=conditional_pixel_values,
)
else:
if conditional_embeddings.shape[0] != pixel_values.shape[0]:
raise ValueError(
"Make sure to pass as many conditional embeddings as there are query images in the batch"
)
if conditional_embeddings.shape[1] != self.config.projection_dim:
raise ValueError(
"Make sure that the feature dimension of the conditional embeddings matches"
" `config.projection_dim`."
)
# step 3: forward both the pooled output and the activations through the lightweight decoder to predict masks
decoder_outputs = self.decoder(
activations,
conditional_embeddings,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
logits = decoder_outputs.logits if return_dict else decoder_outputs[0]
loss = None
if labels is not None:
loss_fn = nn.BCEWithLogitsLoss()
loss = loss_fn(logits, labels)
if not return_dict:
output = (logits, conditional_embeddings, pooled_output, vision_outputs, decoder_outputs)
return ((loss,) + output) if loss is not None else output
return CLIPSegImageSegmentationOutput(
loss=loss,
logits=logits,
conditional_embeddings=conditional_embeddings,
pooled_output=pooled_output,
vision_model_output=vision_outputs,
decoder_output=decoder_outputs,
)
|
27182812/ChatGLM-LLaMA-chinese-insturct | 1,868 | src/transformers/models/nllb/__init__.py | # Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_import_structure = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["tokenization_nllb"] = ["NllbTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["tokenization_nllb_fast"] = ["NllbTokenizerFast"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
27182812/ChatGLM-LLaMA-chinese-insturct | 19,301 | src/transformers/models/nllb/tokenization_nllb.py | # coding=utf-8
# Copyright 2022 The Facebook AI Research Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
logger = logging.get_logger(__name__)
SPIECE_UNDERLINE = "▁"
VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model"}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model"
),
}
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"facebook/nllb-200-distilled-600M": 1024,
}
# fmt: off
FAIRSEQ_LANGUAGE_CODES = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
# fmt: on
class NllbTokenizer(PreTrainedTokenizer):
"""
Construct an NLLB tokenizer.
Adapted from [`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on
[SentencePiece](https://github.com/google/sentencepiece).
The tokenization method is `<tokens> <eos> <language code>` for source language documents, and `<language code>
<tokens> <eos>` for target language documents.
Examples:
```python
>>> from transformers import NllbTokenizer
>>> tokenizer = NllbTokenizer.from_pretrained(
... "facebook/nllb-200-distilled-600M", src_lang="eng_Latn", tgt_lang="fra_Latn"
... )
>>> example_english_phrase = " UN Chief Says There Is No Military Solution in Syria"
>>> expected_translation_french = "Le chef de l'ONU affirme qu'il n'y a pas de solution militaire en Syrie."
>>> inputs = tokenizer(example_english_phrase, text_target=expected_translation_french, return_tensors="pt")
```
Args:
vocab_file (`str`):
Path to the vocabulary file.
bos_token (`str`, *optional*, defaults to `"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the `cls_token`.
</Tip>
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
sep_token (`str`, *optional*, defaults to `"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
cls_token (`str`, *optional*, defaults to `"<s>"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
mask_token (`str`, *optional*, defaults to `"<mask>"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
tokenizer_file (`str`, *optional*):
The path to a tokenizer file to use instead of the vocab file.
src_lang (`str`, *optional*):
The language to use as source language for translation.
tgt_lang (`str`, *optional*):
The language to use as target language for translation.
sp_model_kwargs (`Dict[str, str]`):
Additional keyword arguments to pass to the model initialization.
"""
vocab_files_names = VOCAB_FILES_NAMES
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
model_input_names = ["input_ids", "attention_mask"]
prefix_tokens: List[int] = []
suffix_tokens: List[int] = []
def __init__(
self,
vocab_file,
bos_token="<s>",
eos_token="</s>",
sep_token="</s>",
cls_token="<s>",
unk_token="<unk>",
pad_token="<pad>",
mask_token="<mask>",
tokenizer_file=None,
src_lang=None,
tgt_lang=None,
sp_model_kwargs: Optional[Dict[str, Any]] = None,
additional_special_tokens=None,
**kwargs,
):
# Mask token behave like a normal word, i.e. include the space before it
mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=bos_token,
eos_token=eos_token,
unk_token=unk_token,
sep_token=sep_token,
cls_token=cls_token,
pad_token=pad_token,
mask_token=mask_token,
tokenizer_file=tokenizer_file,
src_lang=src_lang,
tgt_lang=tgt_lang,
additional_special_tokens=additional_special_tokens,
sp_model_kwargs=self.sp_model_kwargs,
**kwargs,
)
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(vocab_file))
self.vocab_file = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
self.fairseq_tokens_to_ids = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
self.fairseq_offset = 1
self.sp_model_size = len(self.sp_model)
self.lang_code_to_id = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(FAIRSEQ_LANGUAGE_CODES)
}
self.id_to_lang_code = {v: k for k, v in self.lang_code_to_id.items()}
self.fairseq_tokens_to_ids["<mask>"] = len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id)
self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
self._additional_special_tokens = list(self.lang_code_to_id.keys())
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens]
)
self._src_lang = src_lang if src_lang is not None else "eng_Latn"
self.cur_lang_code_id = self.lang_code_to_id[self._src_lang]
self.tgt_lang = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
def __getstate__(self):
state = self.__dict__.copy()
state["sp_model"] = None
state["sp_model_proto"] = self.sp_model.serialized_model_proto()
return state
def __setstate__(self, d):
self.__dict__ = d
# for backward compatibility
if not hasattr(self, "sp_model_kwargs"):
self.sp_model_kwargs = {}
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
@property
def vocab_size(self):
return len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def src_lang(self) -> str:
return self._src_lang
@src_lang.setter
def src_lang(self, new_src_lang: str) -> None:
self._src_lang = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def get_special_tokens_mask(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
) -> List[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
)
prefix_ones = [1] * len(self.prefix_tokens)
suffix_ones = [1] * len(self.suffix_tokens)
if token_ids_1 is None:
return prefix_ones + ([0] * len(token_ids_0)) + suffix_ones
return prefix_ones + ([0] * len(token_ids_0)) + ([0] * len(token_ids_1)) + suffix_ones
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. An NLLB sequence has the following format, where `X` represents the sequence:
- `input_ids` (for encoder) `X [eos, src_lang_code]`
- `decoder_input_ids`: (for decoder) `X [eos, tgt_lang_code]`
BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a
separator.
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
if token_ids_1 is None:
return self.prefix_tokens + token_ids_0 + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens
def create_token_type_ids_from_sequences(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. nllb does not
make use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of zeros.
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
def _build_translation_inputs(
self, raw_inputs, return_tensors: str, src_lang: Optional[str], tgt_lang: Optional[str], **extra_kwargs
):
"""Used by translation pipeline, to prepare inputs for the generate function"""
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
self.src_lang = src_lang
inputs = self(raw_inputs, add_special_tokens=True, return_tensors=return_tensors, **extra_kwargs)
tgt_lang_id = self.convert_tokens_to_ids(tgt_lang)
inputs["forced_bos_token_id"] = tgt_lang_id
return inputs
def get_vocab(self):
vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def _tokenize(self, text: str) -> List[str]:
return self.sp_model.encode(text, out_type=str)
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
spm_id = self.sp_model.PieceToId(token)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (strings for sub-words) in a single string."""
out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip()
return out_string
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
if not os.path.isdir(save_directory):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
out_vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file, out_vocab_file)
elif not os.path.isfile(self.vocab_file):
with open(out_vocab_file, "wb") as fi:
content_spiece_model = self.sp_model.serialized_model_proto()
fi.write(content_spiece_model)
return (out_vocab_file,)
def prepare_seq2seq_batch(
self,
src_texts: List[str],
src_lang: str = "eng_Latn",
tgt_texts: Optional[List[str]] = None,
tgt_lang: str = "fra_Latn",
**kwargs,
) -> BatchEncoding:
self.src_lang = src_lang
self.tgt_lang = tgt_lang
return super().prepare_seq2seq_batch(src_texts, tgt_texts, **kwargs)
def _switch_to_input_mode(self):
return self.set_src_lang_special_tokens(self.src_lang)
def _switch_to_target_mode(self):
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def set_src_lang_special_tokens(self, src_lang) -> None:
"""Reset the special tokens to the source lang setting. No prefix and suffix=[eos, src_lang_code]."""
self.cur_lang_code = self.lang_code_to_id[src_lang]
self.prefix_tokens = []
self.suffix_tokens = [self.eos_token_id, self.cur_lang_code]
def set_tgt_lang_special_tokens(self, lang: str) -> None:
"""Reset the special tokens to the target language setting. No prefix and suffix=[eos, tgt_lang_code]."""
self.cur_lang_code = self.lang_code_to_id[lang]
self.prefix_tokens = []
self.suffix_tokens = [self.eos_token_id, self.cur_lang_code]
|
27182812/ChatGLM-LLaMA-chinese-insturct | 16,314 | src/transformers/models/nllb/tokenization_nllb_fast.py | # coding=utf-8
# Copyright 2022 The Facebook AI Research Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
NllbTokenizer = None
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"
),
},
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"facebook/nllb-large-en-ro": 1024,
"facebook/nllb-200-distilled-600M": 1024,
}
# fmt: off
FAIRSEQ_LANGUAGE_CODES = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
# fmt: on
class NllbTokenizerFast(PreTrainedTokenizerFast):
"""
Construct a "fast" NLLB tokenizer (backed by HuggingFace's *tokenizers* library). Based on
[BPE](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=BPE#models).
This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
The tokenization method is `<tokens> <eos> <language code>` for source language documents, and `<language code>
<tokens> <eos>` for target language documents.
Examples:
```python
>>> from transformers import NllbTokenizerFast
>>> tokenizer = NllbTokenizerFast.from_pretrained(
... "facebook/nllb-200-distilled-600M", src_lang="eng_Latn", tgt_lang="fra_Latn"
... )
>>> example_english_phrase = " UN Chief Says There Is No Military Solution in Syria"
>>> expected_translation_french = "Le chef de l'ONU affirme qu'il n'y a pas de solution militaire en Syrie."
>>> inputs = tokenizer(example_english_phrase, text_target=expected_translation_french, return_tensors="pt")
```
Args:
vocab_file (`str`):
Path to the vocabulary file.
bos_token (`str`, *optional*, defaults to `"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the `cls_token`.
</Tip>
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
sep_token (`str`, *optional*, defaults to `"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
cls_token (`str`, *optional*, defaults to `"<s>"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
mask_token (`str`, *optional*, defaults to `"<mask>"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
tokenizer_file (`str`, *optional*):
The path to a tokenizer file to use instead of the vocab file.
src_lang (`str`, *optional*):
The language to use as source language for translation.
tgt_lang (`str`, *optional*):
The language to use as target language for translation.
"""
vocab_files_names = VOCAB_FILES_NAMES
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
model_input_names = ["input_ids", "attention_mask"]
slow_tokenizer_class = NllbTokenizer
prefix_tokens: List[int] = []
suffix_tokens: List[int] = []
def __init__(
self,
vocab_file=None,
tokenizer_file=None,
bos_token="<s>",
eos_token="</s>",
sep_token="</s>",
cls_token="<s>",
unk_token="<unk>",
pad_token="<pad>",
mask_token="<mask>",
src_lang=None,
tgt_lang=None,
additional_special_tokens=None,
**kwargs,
):
# Mask token behave like a normal word, i.e. include the space before it
mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
super().__init__(
vocab_file=vocab_file,
tokenizer_file=tokenizer_file,
bos_token=bos_token,
eos_token=eos_token,
sep_token=sep_token,
cls_token=cls_token,
unk_token=unk_token,
pad_token=pad_token,
mask_token=mask_token,
src_lang=src_lang,
tgt_lang=tgt_lang,
additional_special_tokens=additional_special_tokens,
**kwargs,
)
self.vocab_file = vocab_file
self.can_save_slow_tokenizer = False if not self.vocab_file else True
_additional_special_tokens = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens]
)
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens})
self.lang_code_to_id = {
lang_code: self.convert_tokens_to_ids(lang_code) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
self._src_lang = src_lang if src_lang is not None else "eng_Latn"
self.cur_lang_code = self.convert_tokens_to_ids(self._src_lang)
self.tgt_lang = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def src_lang(self) -> str:
return self._src_lang
@src_lang.setter
def src_lang(self, new_src_lang: str) -> None:
self._src_lang = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. The special tokens depend on calling set_lang.
An NLLB sequence has the following format, where `X` represents the sequence:
- `input_ids` (for encoder) `X [eos, src_lang_code]`
- `decoder_input_ids`: (for decoder) `X [eos, tgt_lang_code]`
BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a
separator.
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
if token_ids_1 is None:
return self.prefix_tokens + token_ids_0 + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens
def create_token_type_ids_from_sequences(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. nllb does not
make use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of zeros.
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
def _build_translation_inputs(
self, raw_inputs, return_tensors: str, src_lang: Optional[str], tgt_lang: Optional[str], **extra_kwargs
):
"""Used by translation pipeline, to prepare inputs for the generate function"""
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
self.src_lang = src_lang
inputs = self(raw_inputs, add_special_tokens=True, return_tensors=return_tensors, **extra_kwargs)
tgt_lang_id = self.convert_tokens_to_ids(tgt_lang)
inputs["forced_bos_token_id"] = tgt_lang_id
return inputs
def prepare_seq2seq_batch(
self,
src_texts: List[str],
src_lang: str = "eng_Latn",
tgt_texts: Optional[List[str]] = None,
tgt_lang: str = "fra_Latn",
**kwargs,
) -> BatchEncoding:
self.src_lang = src_lang
self.tgt_lang = tgt_lang
return super().prepare_seq2seq_batch(src_texts, tgt_texts, **kwargs)
def _switch_to_input_mode(self):
return self.set_src_lang_special_tokens(self.src_lang)
def _switch_to_target_mode(self):
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def set_src_lang_special_tokens(self, src_lang) -> None:
"""Reset the special tokens to the source lang setting. No prefix and suffix=[eos, src_lang_code]."""
self.cur_lang_code = self.convert_tokens_to_ids(src_lang)
self.prefix_tokens = []
self.suffix_tokens = [self.eos_token_id, self.cur_lang_code]
prefix_tokens_str = self.convert_ids_to_tokens(self.prefix_tokens)
suffix_tokens_str = self.convert_ids_to_tokens(self.suffix_tokens)
self._tokenizer.post_processor = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str,
pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str,
special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens)),
)
def set_tgt_lang_special_tokens(self, lang: str) -> None:
"""Reset the special tokens to the target language setting. No prefix and suffix=[eos, tgt_lang_code]."""
self.cur_lang_code = self.convert_tokens_to_ids(lang)
self.prefix_tokens = []
self.suffix_tokens = [self.eos_token_id, self.cur_lang_code]
prefix_tokens_str = self.convert_ids_to_tokens(self.prefix_tokens)
suffix_tokens_str = self.convert_ids_to_tokens(self.suffix_tokens)
self._tokenizer.post_processor = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str,
pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str,
special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens)),
)
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer."
)
if not os.path.isdir(save_directory):
logger.error(f"Vocabulary path ({save_directory}) should be a directory.")
return
out_vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
copyfile(self.vocab_file, out_vocab_file)
return (out_vocab_file,)
|
27182812/ChatGLM-LLaMA-chinese-insturct | 9,312 | src/transformers/models/speech_to_text/configuration_speech_to_text.py | # coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Speech2Text model configuration"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)
SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class Speech2TextConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Speech2TextModel`]. It is used to instantiate an
Speech2Text model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the Speech2Text
[facebook/s2t-small-librispeech-asr](https://huggingface.co/facebook/s2t-small-librispeech-asr) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50265):
Vocabulary size of the Speech2Text model. Defines the number of different tokens that can be represented by
the `inputs_ids` passed when calling [`Speech2TextModel`]
d_model (`int`, *optional*, defaults to 1024):
Dimensionality of the layers and the pooler layer.
encoder_layers (`int`, *optional*, defaults to 12):
Number of encoder layers.
decoder_layers (`int`, *optional*, defaults to 12):
Number of decoder layers.
encoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
decoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer decoder.
decoder_ffn_dim (`int`, *optional*, defaults to 4096):
Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
encoder_ffn_dim (`int`, *optional*, defaults to 4096):
Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
activation_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for activations inside the fully connected layer.
init_std (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
encoder_layerdrop (`float`, *optional*, defaults to 0.0):
The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
for more details.
decoder_layerdrop (`float`, *optional*, defaults to 0.0):
The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
for more details.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
max_source_positions (`int`, *optional*, defaults to 6000):
The maximum sequence length of log-mel filter-bank features that this model might ever be used with.
max_target_positions (`int`, *optional*, defaults to 1024):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
num_conv_layers (`int`, *optional*, defaults to 2):
Number of 1D convolutional layers in the conv module.
conv_kernel_sizes (`Tuple[int]`, *optional*, defaults to `(5, 5)`):
A tuple of integers defining the kernel size of each 1D convolutional layer in the conv module. The length
of `conv_kernel_sizes` has to match `num_conv_layers`.
conv_channels (`int`, *optional*, defaults to 1024):
An integer defining the number of output channels of each convolution layers except the final one in the
conv module.
input_feat_per_channel (`int`, *optional*, defaults to 80):
An integer specifying the size of feature vector. This is also the dimensions of log-mel filter-bank
features.
input_channels (`int`, *optional*, defaults to 1):
An integer specifying number of input channels of the input feature vector.
Example:
```python
>>> from transformers import Speech2TextConfig, Speech2TextModel
>>> # Initializing a Speech2Text s2t_transformer_s style configuration
>>> configuration = Speech2TextConfig()
>>> # Initializing a model (with random weights) from the s2t_transformer_s style configuration
>>> model = Speech2TextModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "speech_to_text"
keys_to_ignore_at_inference = ["past_key_values"]
attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__(
self,
vocab_size=10000,
encoder_layers=12,
encoder_ffn_dim=2048,
encoder_attention_heads=4,
decoder_layers=6,
decoder_ffn_dim=2048,
decoder_attention_heads=4,
encoder_layerdrop=0.0,
decoder_layerdrop=0.0,
use_cache=True,
is_encoder_decoder=True,
activation_function="relu",
d_model=256,
dropout=0.1,
attention_dropout=0.0,
activation_dropout=0.0,
init_std=0.02,
decoder_start_token_id=2,
scale_embedding=True,
pad_token_id=1,
bos_token_id=0,
eos_token_id=2,
max_source_positions=6000,
max_target_positions=1024,
num_conv_layers=2,
conv_kernel_sizes=(5, 5),
conv_channels=1024,
input_feat_per_channel=80,
input_channels=1,
**kwargs,
):
self.vocab_size = vocab_size
self.d_model = d_model
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.activation_function = activation_function
self.init_std = init_std
self.encoder_layerdrop = encoder_layerdrop
self.decoder_layerdrop = decoder_layerdrop
self.use_cache = use_cache
self.num_hidden_layers = encoder_layers
self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
self.max_source_positions = max_source_positions
self.max_target_positions = max_target_positions
self.num_conv_layers = num_conv_layers
self.conv_kernel_sizes = list(conv_kernel_sizes)
self.conv_channels = conv_channels
self.input_feat_per_channel = input_feat_per_channel
self.input_channels = input_channels
if len(self.conv_kernel_sizes) != self.num_conv_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` "
f"but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes)}`, "
f"`config.num_conv_layers = {self.num_conv_layers}`."
)
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
is_encoder_decoder=is_encoder_decoder,
decoder_start_token_id=decoder_start_token_id,
**kwargs,
)
|
27182812/ChatGLM-LLaMA-chinese-insturct | 4,817 | src/transformers/models/speech_to_text/processing_speech_to_text.py | # coding=utf-8
# Copyright 2021 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Speech processor class for Speech2Text
"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class Speech2TextProcessor(ProcessorMixin):
r"""
Constructs a Speech2Text processor which wraps a Speech2Text feature extractor and a Speech2Text tokenizer into a
single processor.
[`Speech2TextProcessor`] offers all the functionalities of [`Speech2TextFeatureExtractor`] and
[`Speech2TextTokenizer`]. See the [`~Speech2TextProcessor.__call__`] and [`~Speech2TextProcessor.decode`] for more
information.
Args:
feature_extractor (`Speech2TextFeatureExtractor`):
An instance of [`Speech2TextFeatureExtractor`]. The feature extractor is a required input.
tokenizer (`Speech2TextTokenizer`):
An instance of [`Speech2TextTokenizer`]. The tokenizer is a required input.
"""
feature_extractor_class = "Speech2TextFeatureExtractor"
tokenizer_class = "Speech2TextTokenizer"
def __init__(self, feature_extractor, tokenizer):
super().__init__(feature_extractor, tokenizer)
self.current_processor = self.feature_extractor
self._in_target_context_manager = False
def __call__(self, *args, **kwargs):
"""
When used in normal mode, this method forwards all its arguments to Speech2TextFeatureExtractor's
[`~Speech2TextFeatureExtractor.__call__`] and returns its output. If used in the context
[`~Speech2TextProcessor.as_target_processor`] this method forwards all its arguments to Speech2TextTokenizer's
[`~Speech2TextTokenizer.__call__`]. Please refer to the doctsring of the above two methods for more
information.
"""
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*args, **kwargs)
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.")
audio = kwargs.pop("raw_speech")
else:
audio = kwargs.pop("audio", None)
sampling_rate = kwargs.pop("sampling_rate", None)
text = kwargs.pop("text", None)
if len(args) > 0:
audio = args[0]
args = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process.")
if audio is not None:
inputs = self.feature_extractor(audio, *args, sampling_rate=sampling_rate, **kwargs)
if text is not None:
encodings = self.tokenizer(text, **kwargs)
if text is None:
return inputs
elif audio is None:
return encodings
else:
inputs["labels"] = encodings["input_ids"]
return inputs
def batch_decode(self, *args, **kwargs):
"""
This method forwards all its arguments to Speech2TextTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please
refer to the docstring of this method for more information.
"""
return self.tokenizer.batch_decode(*args, **kwargs)
def decode(self, *args, **kwargs):
"""
This method forwards all its arguments to Speech2TextTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer
to the docstring of this method for more information.
"""
return self.tokenizer.decode(*args, **kwargs)
@contextmanager
def as_target_processor(self):
"""
Temporarily sets the tokenizer for processing the input. Useful for encoding the labels when fine-tuning
Speech2Text.
"""
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call."
)
self._in_target_context_manager = True
self.current_processor = self.tokenizer
yield
self.current_processor = self.feature_extractor
self._in_target_context_manager = False
|
27182812/ChatGLM-LLaMA-chinese-insturct | 3,845 | src/transformers/models/speech_to_text/__init__.py | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
_import_structure = {
"configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"],
"processing_speech_to_text": ["Speech2TextProcessor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["tokenization_speech_to_text"] = ["Speech2TextTokenizer"]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["feature_extraction_speech_to_text"] = ["Speech2TextFeatureExtractor"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["modeling_tf_speech_to_text"] = [
"TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSpeech2TextForConditionalGeneration",
"TFSpeech2TextModel",
"TFSpeech2TextPreTrainedModel",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["modeling_speech_to_text"] = [
"SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Speech2TextForConditionalGeneration",
"Speech2TextModel",
"Speech2TextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, Speech2TextConfig
from .processing_speech_to_text import Speech2TextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import Speech2TextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import Speech2TextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeech2TextForConditionalGeneration,
TFSpeech2TextModel,
TFSpeech2TextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
Speech2TextForConditionalGeneration,
Speech2TextModel,
Speech2TextPreTrainedModel,
)
else:
import sys
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
27182812/ChatGLM-LLaMA-chinese-insturct | 67,047 | src/transformers/models/speech_to_text/modeling_speech_to_text.py | # coding=utf-8
# Copyright 2021 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch Speech2Text model."""
import math
import random
from typing import Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ...activations import ACT2FN
from ...modeling_outputs import (
BaseModelOutput,
BaseModelOutputWithPastAndCrossAttentions,
Seq2SeqLMOutput,
Seq2SeqModelOutput,
)
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
from .configuration_speech_to_text import Speech2TextConfig
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "Speech2TextConfig"
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"facebook/s2t-small-librispeech-asr",
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
]
# Copied from transformers.models.bart.modeling_bart.shift_tokens_right
def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):
"""
Shift input ids one token to the right.
"""
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()
shifted_input_ids[:, 0] = decoder_start_token_id
if pad_token_id is None:
raise ValueError("self.model.config.pad_token_id has to be defined.")
# replace possible -100 values in labels by `pad_token_id`
shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
return shifted_input_ids
# Copied from transformers.models.bart.modeling_bart._make_causal_mask
def _make_causal_mask(input_ids_shape: torch.Size, dtype: torch.dtype, past_key_values_length: int = 0):
"""
Make causal mask used for bi-directional self-attention.
"""
bsz, tgt_len = input_ids_shape
mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min))
mask_cond = torch.arange(mask.size(-1))
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
mask = mask.to(dtype)
if past_key_values_length > 0:
mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype), mask], dim=-1)
return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
# Copied from transformers.models.bart.modeling_bart._expand_mask
def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
"""
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
"""
bsz, src_len = mask.size()
tgt_len = tgt_len if tgt_len is not None else src_len
expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
inverted_mask = 1.0 - expanded_mask
return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
class Conv1dSubsampler(nn.Module):
"""
Convolutional subsampler: a stack of 1D convolution (along temporal dimension) followed by non-linear activation
via gated linear units (https://arxiv.org/abs/1911.08460)
"""
def __init__(self, config):
super(Conv1dSubsampler, self).__init__()
self.config = config
self.num_layers = config.num_conv_layers
self.in_channels = config.input_feat_per_channel * config.input_channels
self.mid_channels = config.conv_channels
self.out_channels = config.d_model
self.kernel_sizes = config.conv_kernel_sizes
self.conv_layers = nn.ModuleList(
nn.Conv1d(
self.in_channels if i == 0 else self.mid_channels // 2,
self.mid_channels if i < self.num_layers - 1 else self.out_channels * 2,
kernel_size=k,
stride=2,
padding=k // 2,
)
for i, k in enumerate(self.kernel_sizes)
)
def forward(self, input_features):
hidden_states = input_features.transpose(1, 2).contiguous() # -> B x (C x D) x T
for conv in self.conv_layers:
hidden_states = conv(hidden_states)
hidden_states = nn.functional.glu(hidden_states, dim=1)
hidden_states = hidden_states.transpose(1, 2).contiguous() # -> T x B x (C x D)
return hidden_states
class Speech2TextSinusoidalPositionalEmbedding(nn.Module):
"""This module produces sinusoidal positional embeddings of any length."""
def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None):
super().__init__()
self.offset = 2
self.embedding_dim = embedding_dim
self.padding_idx = padding_idx
self.make_weights(num_positions + self.offset, embedding_dim, padding_idx)
def make_weights(self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None):
emb_weights = self.get_embedding(num_embeddings, embedding_dim, padding_idx)
if hasattr(self, "weights"):
# in forward put the weights on the correct dtype and device of the param
emb_weights = emb_weights.to(dtype=self.weights.dtype, device=self.weights.device)
self.weights = nn.Parameter(emb_weights)
self.weights.requires_grad = False
self.weights.detach_()
@staticmethod
def get_embedding(num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None):
"""
Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the
description in Section 3.5 of "Attention Is All You Need".
"""
half_dim = embedding_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb)
emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(1) * emb.unsqueeze(0)
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1)
if embedding_dim % 2 == 1:
# zero pad
emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
if padding_idx is not None:
emb[padding_idx, :] = 0
return emb.to(torch.get_default_dtype())
@torch.no_grad()
def forward(self, input_ids: torch.Tensor, past_key_values_length: int = 0):
bsz, seq_len = input_ids.size()
# Create the position ids from the input token ids. Any padded tokens remain padded.
position_ids = self.create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length).to(
input_ids.device
)
# expand embeddings if needed
max_pos = self.padding_idx + 1 + seq_len
if max_pos > self.weights.size(0):
self.make_weights(max_pos + self.offset, self.embedding_dim, self.padding_idx)
return self.weights.index_select(0, position_ids.view(-1)).view(bsz, seq_len, -1).detach()
def create_position_ids_from_input_ids(
self, input_ids: torch.Tensor, padding_idx: int, past_key_values_length: Optional[int] = 0
):
"""
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding
symbols are ignored. This is modified from fairseq's `utils.make_positions`.
Args:
x: torch.Tensor x:
Returns: torch.Tensor
"""
# The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
mask = input_ids.ne(padding_idx).int()
incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
return incremental_indices.long() + padding_idx
# Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->Speech2Text
class Speech2TextAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
is_decoder: bool = False,
bias: bool = True,
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
if (self.head_dim * num_heads) != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
f" and `num_heads`: {num_heads})."
)
self.scaling = self.head_dim**-0.5
self.is_decoder = is_decoder
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def forward(
self,
hidden_states: torch.Tensor,
key_value_states: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
bsz, tgt_len, _ = hidden_states.size()
# get query proj
query_states = self.q_proj(hidden_states) * self.scaling
# get key, value proj
# `past_key_value[0].shape[2] == key_value_states.shape[1]`
# is checking that the `sequence_length` of the `past_key_value` is the same as
# the provided `key_value_states` to support prefix tuning
if (
is_cross_attention
and past_key_value is not None
and past_key_value[0].shape[2] == key_value_states.shape[1]
):
# reuse k,v, cross_attentions
key_states = past_key_value[0]
value_states = past_key_value[1]
elif is_cross_attention:
# cross_attentions
key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
elif past_key_value is not None:
# reuse k, v, self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
key_states = torch.cat([past_key_value[0], key_states], dim=2)
value_states = torch.cat([past_key_value[1], value_states], dim=2)
else:
# self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_states, value_states)
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
key_states = key_states.reshape(*proj_shape)
value_states = value_states.reshape(*proj_shape)
src_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
raise ValueError(
f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
f" {attn_weights.size()}"
)
if attention_mask is not None:
if attention_mask.size() != (bsz, 1, tgt_len, src_len):
raise ValueError(
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
)
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
if layer_head_mask is not None:
if layer_head_mask.size() != (self.num_heads,):
raise ValueError(
f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
f" {layer_head_mask.size()}"
)
attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if output_attentions:
# this operation is a bit awkward, but it's required to
# make sure that attn_weights keeps its gradient.
# In order to do so, attn_weights have to be reshaped
# twice and have to be reused in the following
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
else:
attn_weights_reshaped = None
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_probs, value_states)
if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is"
f" {attn_output.size()}"
)
attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
attn_output = attn_output.transpose(1, 2)
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# partitioned across GPUs when using tensor-parallelism.
attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights_reshaped, past_key_value
# Copied from transformers.models.mbart.modeling_mbart.MBartEncoderLayer with MBart->Speech2Text
class Speech2TextEncoderLayer(nn.Module):
def __init__(self, config: Speech2TextConfig):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = Speech2TextAttention(
embed_dim=self.embed_dim,
num_heads=config.encoder_attention_heads,
dropout=config.attention_dropout,
)
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
layer_head_mask: torch.Tensor,
output_attentions: bool = False,
) -> torch.Tensor:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
`(encoder_attention_heads,)`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
hidden_states, attn_weights, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
if hidden_states.dtype == torch.float16 and (
torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()
):
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
# Copied from transformers.models.mbart.modeling_mbart.MBartDecoderLayer with MBart->Speech2Text
class Speech2TextDecoderLayer(nn.Module):
def __init__(self, config: Speech2TextConfig):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = Speech2TextAttention(
embed_dim=self.embed_dim,
num_heads=config.decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.encoder_attn = Speech2TextAttention(
self.embed_dim,
config.decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
)
self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
cross_attn_layer_head_mask: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = True,
) -> torch.Tensor:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
encoder_hidden_states (`torch.FloatTensor`):
cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
`(encoder_attention_heads,)`.
cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of
size `(decoder_attention_heads,)`.
past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
# Self Attention
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
# add present self-attn cache to positions 1,2 of present_key_value tuple
hidden_states, self_attn_weights, present_key_value = self.self_attn(
hidden_states=hidden_states,
past_key_value=self_attn_past_key_value,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
# Cross-Attention Block
cross_attn_present_key_value = None
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
hidden_states = self.encoder_attn_layer_norm(hidden_states)
# cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
hidden_states=hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
layer_head_mask=cross_attn_layer_head_mask,
past_key_value=cross_attn_past_key_value,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
# add cross-attn to positions 3,4 of present_key_value tuple
present_key_value = present_key_value + cross_attn_present_key_value
# Fully Connected
residual = hidden_states
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights, cross_attn_weights)
if use_cache:
outputs += (present_key_value,)
return outputs
class Speech2TextPreTrainedModel(PreTrainedModel):
config_class = Speech2TextConfig
base_model_prefix = "model"
main_input_name = "input_features"
supports_gradient_checkpointing = True
def _init_weights(self, module):
std = self.config.init_std
if isinstance(module, (nn.Linear, nn.Conv1d)):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, (Speech2TextDecoder, Speech2TextEncoder)):
module.gradient_checkpointing = value
def _get_feat_extract_output_lengths(self, input_lengths: torch.LongTensor):
"""
Computes the output length of the convolutional layers
"""
for i in range(self.config.num_conv_layers):
input_lengths = (input_lengths - 1) // 2 + 1
return input_lengths
def _get_feature_vector_attention_mask(self, feature_vector_length, attention_mask):
# generate creates 3D attention mask, because of the shape of input_features
# convert it to 2D if thats the case
if len(attention_mask.shape) > 2:
attention_mask = attention_mask[:, :, -1]
subsampled_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1))
bsz = attention_mask.size()[0]
attention_mask = torch.zeros(
(bsz, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device
)
# these two operations makes sure that all values
# before the output lengths indices are attended to
attention_mask[(torch.arange(bsz, device=attention_mask.device), subsampled_lengths - 1)] = 1
attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).long()
return attention_mask
SPEECH_TO_TEXT_START_DOCSTRING = r"""
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
and behavior.
Parameters:
config ([`Speech2TextConfig`]):
Model configuration class with all the parameters of the model. Initializing with a config file does not
load the weights associated with the model, only the configuration. Check out the
[`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
SPEECH_TO_TEXT_INPUTS_DOCSTRING = r"""
Args:
input_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, feature_size)`):
Float values of fbank features extracted from the raw speech waveform. Raw speech waveform can be obtained
by loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a `numpy.ndarray`, *e.g.*
via the soundfile library (`pip install soundfile`). To prepare the array into `input_features`, the
[`AutoFeatureExtractor`] should be used for extracting the fbank features, padding and conversion into a
tensor of type `torch.FloatTensor`. See [`~Speech2TextFeatureExtractor.__call__`]
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing convolution and attention on padding token indices. Mask values selected in `[0,
1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`SpeechToTextTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
SpeechToText uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If
`past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
`past_key_values`).
decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
If you want to change padding behavior, you should read
[`modeling_speech_to_text._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the
paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy.
head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
`last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
`decoder_input_ids` of shape `(batch_size, sequence_length)`.
decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be
input (see `past_key_values`). This is useful if you want more control over how to convert
`decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
`past_key_values`).
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
class Speech2TextEncoder(Speech2TextPreTrainedModel):
"""
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
[`Speech2TextEncoderLayer`].
Args:
config: Speech2TextConfig
embed_tokens (nn.Embedding): output embedding
"""
def __init__(self, config: Speech2TextConfig):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.encoder_layerdrop
embed_dim = config.d_model
self.padding_idx = config.pad_token_id
self.max_source_positions = config.max_source_positions
self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
self.conv = Conv1dSubsampler(config)
self.embed_positions = Speech2TextSinusoidalPositionalEmbedding(
self.max_source_positions,
embed_dim,
self.padding_idx,
)
self.layers = nn.ModuleList([Speech2TextEncoderLayer(config) for _ in range(config.encoder_layers)])
self.layer_norm = nn.LayerNorm(config.d_model)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def forward(
self,
input_features,
attention_mask=None,
head_mask=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Args:
input_features (`torch.LongTensor` of shape `(batch_size, sequence_length, feature_size)`):
Float values of fbank features extracted from the raw speech waveform. Raw speech waveform can be
obtained by loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a
`numpy.ndarray`, *e.g.* via the soundfile library (`pip install soundfile`). To prepare the array into
`input_features`, the [`AutoFeatureExtractor`] should be used for extracting the fbank features,
padding and conversion into a tensor of type `torch.FloatTensor`. See
[`~Speech2TextFeatureExtractor.__call__`]
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing convolution and attention on padding token indices. Mask values selected in
`[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
inputs_embeds = self.conv(input_features)
inputs_embeds = self.embed_scale * inputs_embeds
# subsample attention mask if necessary
if attention_mask is not None:
attention_mask = self._get_feature_vector_attention_mask(inputs_embeds.shape[1], attention_mask)
padding_mask = attention_mask.ne(1).long()
else:
padding_mask = torch.zeros(inputs_embeds.shape[:2], dtype=torch.long, device=inputs_embeds.device)
embed_pos = self.embed_positions(padding_mask)
hidden_states = inputs_embeds + embed_pos
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
# expand attention_mask
if attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
attention_mask = _expand_mask(attention_mask, inputs_embeds.dtype)
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
# check if head_mask has a correct number of layers specified if desired
if head_mask is not None:
assert head_mask.size()[0] == (
len(self.layers)
), f"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}."
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop): # skip the layer
layer_outputs = (None, None)
else:
if self.gradient_checkpointing and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(encoder_layer),
hidden_states,
attention_mask,
(head_mask[idx] if head_mask is not None else None),
)
else:
layer_outputs = encoder_layer(
hidden_states,
attention_mask,
layer_head_mask=(head_mask[idx] if head_mask is not None else None),
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
hidden_states = self.layer_norm(hidden_states)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
)
class Speech2TextDecoder(Speech2TextPreTrainedModel):
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`Speech2TextDecoderLayer`]
Args:
config: Speech2TextConfig
embed_tokens (nn.Embedding): output embedding
"""
def __init__(self, config: Speech2TextConfig):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.decoder_layerdrop
self.padding_idx = config.pad_token_id
self.max_target_positions = config.max_target_positions
self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx)
self.embed_positions = Speech2TextSinusoidalPositionalEmbedding(
self.max_target_positions,
config.d_model,
self.padding_idx,
)
self.layers = nn.ModuleList([Speech2TextDecoderLayer(config) for _ in range(config.decoder_layers)])
self.layer_norm = nn.LayerNorm(config.d_model)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embed_tokens
def set_input_embeddings(self, value):
self.embed_tokens = value
def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
# create causal mask
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
combined_attention_mask = None
if input_shape[-1] > 1:
combined_attention_mask = _make_causal_mask(
input_shape, inputs_embeds.dtype, past_key_values_length=past_key_values_length
).to(inputs_embeds.device)
if attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
combined_attention_mask = (
expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
)
return combined_attention_mask
def forward(
self,
input_ids=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
head_mask=None,
cross_attn_head_mask=None,
past_key_values=None,
inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`Speech2TextTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules in encoder to avoid performing cross-attention
on hidden heads. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of
shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing
`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more
control over how to convert `input_ids` indices into associated vectors than the model's internal
embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# retrieve input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
attention_mask = self._prepare_decoder_attention_mask(
attention_mask, input_shape, inputs_embeds, past_key_values_length
)
# expand encoder attention mask
if encoder_hidden_states is not None and encoder_attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
encoder_attention_mask = _expand_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
# embed positions
positions = self.embed_positions(input_ids, past_key_values_length=past_key_values_length)
hidden_states = inputs_embeds + positions
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
next_decoder_cache = () if use_cache else None
# check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired
for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]):
if attn_mask is not None:
assert attn_mask.size()[0] == (len(self.layers)), (
f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for"
f" {head_mask.size()[0]}."
)
for idx, decoder_layer in enumerate(self.layers):
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
if output_hidden_states:
all_hidden_states += (hidden_states,)
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop):
continue
past_key_value = past_key_values[idx] if past_key_values is not None else None
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning(
"`use_cache = True` is incompatible with gradient checkpointing. Setting `use_cache ="
" False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
# None for past_key_value
return module(*inputs, output_attentions, use_cache)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(decoder_layer),
hidden_states,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
head_mask[idx] if head_mask is not None else None,
cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None,
None,
)
else:
layer_outputs = decoder_layer(
hidden_states,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
layer_head_mask=(head_mask[idx] if head_mask is not None else None),
cross_attn_layer_head_mask=(
cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None
),
past_key_value=past_key_value,
output_attentions=output_attentions,
use_cache=use_cache,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[3 if output_attentions else 1],)
if output_attentions:
all_self_attns += (layer_outputs[1],)
if encoder_hidden_states is not None:
all_cross_attentions += (layer_outputs[2],)
hidden_states = self.layer_norm(hidden_states)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
next_cache = next_decoder_cache if use_cache else None
if not return_dict:
return tuple(
v
for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_cache,
hidden_states=all_hidden_states,
attentions=all_self_attns,
cross_attentions=all_cross_attentions,
)
@add_start_docstrings(
"The bare Speech2Text Model outputting raw hidden-states without any specific head on top.",
SPEECH_TO_TEXT_START_DOCSTRING,
)
class Speech2TextModel(Speech2TextPreTrainedModel):
def __init__(self, config: Speech2TextConfig):
super().__init__(config)
self.encoder = Speech2TextEncoder(config)
self.decoder = Speech2TextDecoder(config)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.decoder.embed_tokens
def set_input_embeddings(self, value):
self.decoder.embed_tokens = value
def get_encoder(self):
return self.encoder
def get_decoder(self):
return self.decoder
@add_start_docstrings_to_model_forward(SPEECH_TO_TEXT_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_features: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.Tensor] = None,
decoder_head_mask: Optional[torch.Tensor] = None,
cross_attn_head_mask: Optional[torch.Tensor] = None,
encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]:
r"""
Returns:
Example:
```python
>>> import torch
>>> from transformers import Speech2TextModel, AutoFeatureExtractor
>>> from datasets import load_dataset
>>> model = Speech2TextModel.from_pretrained("facebook/s2t-small-librispeech-asr")
>>> feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/s2t-small-librispeech-asr")
>>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
>>> inputs = feature_extractor(
... ds[0]["audio"]["array"], sampling_rate=ds[0]["audio"]["sampling_rate"], return_tensors="pt"
... )
>>> input_features = inputs.input_features
>>> decoder_input_ids = torch.tensor([[1, 1]]) * model.config.decoder_start_token_id
>>> last_hidden_state = model(input_features, decoder_input_ids=decoder_input_ids).last_hidden_state
>>> list(last_hidden_state.shape)
[1, 2, 256]
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if encoder_outputs is None:
encoder_outputs = self.encoder(
input_features,
attention_mask=attention_mask,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
# If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
# downsample encoder attention mask
if attention_mask is not None:
encoder_attention_mask = self._get_feature_vector_attention_mask(
encoder_outputs[0].shape[1], attention_mask
)
else:
encoder_attention_mask = None
# decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
encoder_hidden_states=encoder_outputs[0],
encoder_attention_mask=encoder_attention_mask,
head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
past_key_values=past_key_values,
inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if not return_dict:
return decoder_outputs + encoder_outputs
return Seq2SeqModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
@add_start_docstrings(
"The Speech2Text Model with a language modeling head. Can be used for summarization.",
SPEECH_TO_TEXT_START_DOCSTRING,
)
class Speech2TextForConditionalGeneration(Speech2TextPreTrainedModel):
base_model_prefix = "model"
_keys_to_ignore_on_load_missing = [
r"encoder.version",
r"decoder.version",
r"model.encoder.embed_positions.weights",
r"model.decoder.embed_positions.weights",
r"lm_head.weight",
]
_keys_to_ignore_on_save = [
r"model.encoder.embed_positions.weights",
r"model.decoder.embed_positions.weights",
]
def __init__(self, config: Speech2TextConfig):
super().__init__(config)
self.model = Speech2TextModel(config)
self.lm_head = nn.Linear(config.d_model, self.config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
def get_encoder(self):
return self.model.get_encoder()
def get_decoder(self):
return self.model.get_decoder()
def resize_token_embeddings(self, new_num_tokens: int) -> nn.Embedding:
new_embeddings = super().resize_token_embeddings(new_num_tokens)
return new_embeddings
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
@add_start_docstrings_to_model_forward(SPEECH_TO_TEXT_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_features: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.Tensor] = None,
decoder_head_mask: Optional[torch.Tensor] = None,
cross_attn_head_mask: Optional[torch.Tensor] = None,
encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the language modeling loss. Indices should either be in `[0, ..., config.vocab_size]`
or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is
only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Returns:
Example:
```python
>>> import torch
>>> from transformers import Speech2TextProcessor, Speech2TextForConditionalGeneration
>>> from datasets import load_dataset
>>> model = Speech2TextForConditionalGeneration.from_pretrained("facebook/s2t-small-librispeech-asr")
>>> processor = Speech2TextProcessor.from_pretrained("facebook/s2t-small-librispeech-asr")
>>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
>>> inputs = processor(
... ds[0]["audio"]["array"], sampling_rate=ds[0]["audio"]["sampling_rate"], return_tensors="pt"
... )
>>> input_features = inputs.input_features
>>> generated_ids = model.generate(inputs=input_features)
>>> transcription = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
>>> transcription
'mister quilter is the apostle of the middle classes and we are glad to welcome his gospel'
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
if decoder_input_ids is None and decoder_inputs_embeds is None:
decoder_input_ids = shift_tokens_right(
labels, self.config.pad_token_id, self.config.decoder_start_token_id
)
outputs = self.model(
input_features,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
encoder_outputs=encoder_outputs,
decoder_attention_mask=decoder_attention_mask,
head_mask=head_mask,
decoder_head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
past_key_values=past_key_values,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
lm_logits = self.lm_head(outputs[0])
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (lm_logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return Seq2SeqLMOutput(
loss=loss,
logits=lm_logits,
past_key_values=outputs.past_key_values,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
)
def prepare_inputs_for_generation(
self,
decoder_input_ids,
past_key_values=None,
attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
use_cache=None,
encoder_outputs=None,
**kwargs,
):
# cut decoder_input_ids if past is used
if past_key_values is not None:
decoder_input_ids = decoder_input_ids[:, -1:]
return {
"encoder_outputs": encoder_outputs,
"past_key_values": past_key_values,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
"use_cache": use_cache, # change this to avoid caching (presumably for debugging)
}
@staticmethod
def _reorder_cache(past_key_values, beam_idx):
reordered_past = ()
for layer_past in past_key_values:
reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
return reordered_past
|
27182812/ChatGLM-LLaMA-chinese-insturct | 11,651 | src/transformers/models/speech_to_text/tokenization_speech_to_text.py | # coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes for Speech2Text."""
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
logger = logging.get_logger(__name__)
SPIECE_UNDERLINE = "▁"
VOCAB_FILES_NAMES = {
"vocab_file": "vocab.json",
"spm_file": "sentencepiece.bpe.model",
}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json"
),
},
"spm_file": {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model"
)
},
}
MAX_MODEL_INPUT_SIZES = {
"facebook/s2t-small-librispeech-asr": 1024,
}
MUSTC_LANGS = ["pt", "fr", "ru", "nl", "ro", "it", "es", "de"]
LANGUAGES = {"mustc": MUSTC_LANGS}
class Speech2TextTokenizer(PreTrainedTokenizer):
"""
Construct an Speech2Text tokenizer.
This tokenizer inherits from [`PreTrainedTokenizer`] which contains some of the main methods. Users should refer to
the superclass for more information regarding such methods.
Args:
vocab_file (`str`):
File containing the vocabulary.
spm_file (`str`):
Path to the [SentencePiece](https://github.com/google/sentencepiece) model file
bos_token (`str`, *optional*, defaults to `"<s>"`):
The beginning of sentence token.
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sentence token.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
do_upper_case (`bool`, *optional*, defaults to `False`):
Whether or not to uppercase the output when decoding.
do_lower_case (`bool`, *optional*, defaults to `False`):
Whether or not to lowercase the input when tokenizing.
tgt_lang (`str`, *optional*):
A string representing the target language.
sp_model_kwargs (`dict`, *optional*):
Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
to set:
- `enable_sampling`: Enable subword regularization.
- `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
- `nbest_size = {0,1}`: No sampling is performed.
- `nbest_size > 1`: samples from the nbest_size results.
- `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
using forward-filtering-and-backward-sampling algorithm.
- `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
BPE-dropout.
**kwargs
Additional keyword arguments passed along to [`PreTrainedTokenizer`]
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = MAX_MODEL_INPUT_SIZES
model_input_names = ["input_ids", "attention_mask"]
prefix_tokens: List[int] = []
def __init__(
self,
vocab_file,
spm_file,
bos_token="<s>",
eos_token="</s>",
pad_token="<pad>",
unk_token="<unk>",
do_upper_case=False,
do_lower_case=False,
tgt_lang=None,
lang_codes=None,
sp_model_kwargs: Optional[Dict[str, Any]] = None,
**kwargs,
) -> None:
self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=bos_token,
eos_token=eos_token,
unk_token=unk_token,
pad_token=pad_token,
do_upper_case=do_upper_case,
do_lower_case=do_lower_case,
tgt_lang=tgt_lang,
lang_codes=lang_codes,
sp_model_kwargs=self.sp_model_kwargs,
**kwargs,
)
self.do_upper_case = do_upper_case
self.do_lower_case = do_lower_case
self.encoder = load_json(vocab_file)
self.decoder = {v: k for k, v in self.encoder.items()}
self.spm_file = spm_file
self.sp_model = load_spm(spm_file, self.sp_model_kwargs)
if lang_codes is not None:
self.lang_codes = lang_codes
self.langs = LANGUAGES[lang_codes]
self.lang_tokens = [f"<lang:{lang}>" for lang in self.langs]
self.lang_code_to_id = {lang: self.sp_model.PieceToId(f"<lang:{lang}>") for lang in self.langs}
self._additional_special_tokens = self.lang_tokens
self._tgt_lang = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang)
else:
self.lang_code_to_id = {}
@property
def vocab_size(self) -> int:
return len(self.encoder)
@property
def tgt_lang(self) -> str:
return self._tgt_lang
@tgt_lang.setter
def tgt_lang(self, new_tgt_lang) -> None:
self._tgt_lang = new_tgt_lang
self.set_tgt_lang_special_tokens(new_tgt_lang)
def set_tgt_lang_special_tokens(self, tgt_lang: str) -> None:
"""Reset the special tokens to the target language setting. prefix=[eos, tgt_lang_code] and suffix=[eos]."""
lang_code_id = self.lang_code_to_id[tgt_lang]
self.prefix_tokens = [lang_code_id]
def _tokenize(self, text: str) -> List[str]:
return self.sp_model.encode(text, out_type=str)
def _convert_token_to_id(self, token):
return self.encoder.get(token, self.encoder[self.unk_token])
def _convert_id_to_token(self, index: int) -> str:
"""Converts an index (integer) in a token (str) using the decoder."""
return self.decoder.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens: List[str]) -> str:
"""Converts a sequence of tokens (strings for sub-words) in a single string."""
current_sub_tokens = []
out_string = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
decoded = self.sp_model.decode(current_sub_tokens)
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
current_sub_tokens = []
else:
current_sub_tokens.append(token)
decoded = self.sp_model.decode(current_sub_tokens)
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None) -> List[int]:
"""Build model inputs from a sequence by appending eos_token_id."""
if token_ids_1 is None:
return self.prefix_tokens + token_ids_0 + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_0 + token_ids_1 + [self.eos_token_id]
def get_special_tokens_mask(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
) -> List[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
)
prefix_ones = [1] * len(self.prefix_tokens)
suffix_ones = [1]
if token_ids_1 is None:
return prefix_ones + ([0] * len(token_ids_0)) + suffix_ones
return prefix_ones + ([0] * len(token_ids_0)) + ([0] * len(token_ids_1)) + suffix_ones
def get_vocab(self) -> Dict:
vocab = self.encoder.copy()
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__(self) -> Dict:
state = self.__dict__.copy()
state["sp_model"] = None
return state
def __setstate__(self, d: Dict) -> None:
self.__dict__ = d
# for backward compatibility
if not hasattr(self, "sp_model_kwargs"):
self.sp_model_kwargs = {}
self.sp_model = load_spm(self.spm_file, self.sp_model_kwargs)
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
save_dir = Path(save_directory)
assert save_dir.is_dir(), f"{save_directory} should be a directory"
vocab_save_path = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
spm_save_path = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder, vocab_save_path)
if os.path.abspath(self.spm_file) != os.path.abspath(spm_save_path) and os.path.isfile(self.spm_file):
copyfile(self.spm_file, spm_save_path)
elif not os.path.isfile(self.spm_file):
with open(spm_save_path, "wb") as fi:
content_spiece_model = self.sp_model.serialized_model_proto()
fi.write(content_spiece_model)
return (str(vocab_save_path), str(spm_save_path))
def load_spm(path: str, sp_model_kwargs: Dict[str, Any]) -> sentencepiece.SentencePieceProcessor:
spm = sentencepiece.SentencePieceProcessor(**sp_model_kwargs)
spm.Load(str(path))
return spm
def load_json(path: str) -> Union[Dict, List]:
with open(path, "r") as f:
return json.load(f)
def save_json(data, path: str) -> None:
with open(path, "w") as f:
json.dump(data, f, indent=2)
|
27182812/ChatGLM-LLaMA-chinese-insturct | 69,684 | src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py | # coding=utf-8
# Copyright 2021 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" TensorFlow Speech2Text model."""
import random
from typing import Dict, Optional, Tuple, Union
import numpy as np
import tensorflow as tf
from ...activations_tf import get_tf_activation, glu
from ...modeling_tf_outputs import (
TFBaseModelOutput,
TFBaseModelOutputWithPastAndCrossAttentions,
TFSeq2SeqLMOutput,
TFSeq2SeqModelOutput,
)
from ...modeling_tf_utils import (
TFCausalLanguageModelingLoss,
TFModelInputType,
TFPreTrainedModel,
TFSharedEmbeddings,
keras_serializable,
unpack_inputs,
)
from ...tf_utils import shape_list, stable_softmax
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from .configuration_speech_to_text import Speech2TextConfig
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "Speech2TextConfig"
_CHECKPOINT_FOR_DOC = "facebook/s2t-small-librispeech-asr"
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"facebook/s2t-small-librispeech-asr",
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
]
LARGE_NEGATIVE = -1e8
# Copied from transformers.models.bart.modeling_tf_bart.shift_tokens_right
def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, decoder_start_token_id: int):
pad_token_id = tf.cast(pad_token_id, input_ids.dtype)
decoder_start_token_id = tf.cast(decoder_start_token_id, input_ids.dtype)
start_tokens = tf.fill(
(shape_list(input_ids)[0], 1), tf.convert_to_tensor(decoder_start_token_id, input_ids.dtype)
)
shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1)
# replace possible -100 values in labels by `pad_token_id`
shifted_input_ids = tf.where(
shifted_input_ids == -100,
tf.fill(shape_list(shifted_input_ids), tf.convert_to_tensor(pad_token_id, input_ids.dtype)),
shifted_input_ids,
)
# "Verify that `labels` has only positive values and -100"
assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0, dtype=input_ids.dtype))
# Make sure the assertion op is called by wrapping the result in an identity no-op
with tf.control_dependencies([assert_gte0]):
shifted_input_ids = tf.identity(shifted_input_ids)
return shifted_input_ids
# Copied from transformers.models.bart.modeling_tf_bart._make_causal_mask
def _make_causal_mask(input_ids_shape: tf.TensorShape, past_key_values_length: int = 0):
"""
Make causal mask used for bi-directional self-attention.
"""
bsz = input_ids_shape[0]
tgt_len = input_ids_shape[1]
mask = tf.ones((tgt_len, tgt_len)) * LARGE_NEGATIVE
mask_cond = tf.range(shape_list(mask)[-1])
mask = tf.where(mask_cond < tf.reshape(mask_cond + 1, (shape_list(mask)[-1], 1)), 0.0, mask)
if past_key_values_length > 0:
mask = tf.concat([tf.zeros((tgt_len, past_key_values_length)), mask], axis=-1)
return tf.tile(mask[None, None, :, :], (bsz, 1, 1, 1))
# Copied from transformers.models.bart.modeling_tf_bart._expand_mask
def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int] = None):
"""
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
"""
src_len = shape_list(mask)[1]
tgt_len = tgt_len if tgt_len is not None else src_len
one_cst = tf.constant(1.0)
mask = tf.cast(mask, dtype=one_cst.dtype)
expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1))
return (one_cst - expanded_mask) * LARGE_NEGATIVE
class TFConv1dSubsampler(tf.keras.layers.Layer):
"""
Convolutional subsampler: a stack of 1D convolution (along temporal dimension) followed by non-linear activation
via gated linear units (https://arxiv.org/abs/1911.08460)
"""
def __init__(self, config: Speech2TextConfig, **kwargs):
super().__init__(**kwargs)
self.config = config
self.num_layers = config.num_conv_layers
self.in_channels = config.input_feat_per_channel * config.input_channels
self.mid_channels = config.conv_channels
self.out_channels = config.d_model
self.kernel_sizes = config.conv_kernel_sizes
self.conv_layers = [
tf.keras.layers.Conv1D(
filters=self.mid_channels if i < self.num_layers - 1 else self.out_channels * 2,
kernel_size=k,
strides=2,
name=f"conv_layers.{i}",
)
for i, k in enumerate(self.kernel_sizes)
]
def call(self, input_features: tf.Tensor) -> tf.Tensor:
# TF Conv1D assumes Batch x Time x Channels, same as the input
hidden_states = tf.cast(input_features, tf.float32)
for i, conv in enumerate(self.conv_layers):
# equivalent to `padding=k // 2` on PT's `nn.Conv1d`
pad_len = self.kernel_sizes[i] // 2
hidden_shapes = shape_list(hidden_states)
hidden_states = tf.concat(
(
tf.zeros((hidden_shapes[0], pad_len, hidden_shapes[2])),
hidden_states,
tf.zeros((hidden_shapes[0], pad_len, hidden_shapes[2])),
),
axis=1,
)
hidden_states = conv(hidden_states)
hidden_states = glu(hidden_states, axis=2) # GLU over the Channel dimension
return hidden_states
class TFSpeech2TextSinusoidalPositionalEmbedding(tf.keras.layers.Layer):
"""This module produces sinusoidal positional embeddings of any length."""
def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None, **kwargs):
super().__init__(**kwargs)
self.offset = 2
self.embedding_dim = embedding_dim
self.padding_idx = padding_idx
self.embedding_weights = self._get_embedding(num_positions + self.offset, embedding_dim, padding_idx)
@staticmethod
def _get_embedding(num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None) -> tf.Tensor:
"""
Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the
description in Section 3.5 of "Attention Is All You Need".
"""
half_dim = embedding_dim // 2
emb = tf.math.log(10000.0) / (half_dim - 1)
emb = tf.math.exp(tf.range(half_dim, dtype=tf.float32) * -emb)
emb = tf.expand_dims(tf.range(num_embeddings, dtype=tf.float32), axis=1) * tf.expand_dims(emb, axis=0)
emb = tf.reshape(tf.concat([tf.math.sin(emb), tf.math.cos(emb)], axis=1), shape=[num_embeddings, -1])
if embedding_dim % 2 == 1:
# zero pad
emb = tf.concat([emb, tf.zeros(num_embeddings, 1)], axis=1)
if padding_idx is not None:
emb = tf.concat([emb[:padding_idx, :], tf.zeros((1, tf.shape(emb)[1])), emb[padding_idx + 1 :, :]], axis=0)
return emb
def build(self, input_shape: tf.TensorShape):
"""
Build shared token embedding layer Shared weights logic adapted from
https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24
"""
self.embeddings = self.add_weight(
name="weights", # name also used in PT
shape=tf.shape(self.embedding_weights),
trainable=False,
)
self.embeddings.assign(self.embedding_weights)
super().build(input_shape)
def call(self, input_ids: tf.Tensor, past_key_values_length: int = 0) -> tf.Tensor:
bsz, seq_len = shape_list(input_ids)
# Create the position ids from the input token ids. Any padded tokens remain padded.
position_ids = self.create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length)
# expand embeddings if needed
max_pos = self.padding_idx + 1 + seq_len
if max_pos > shape_list(self.embeddings)[0]:
self.embedding_weights = self._get_embedding(max_pos + self.offset, self.embedding_dim, self.padding_idx)
self.embeddings.assign(self.embedding_weights)
return tf.reshape(tf.gather(self.embeddings, tf.reshape(position_ids, (-1,)), axis=0), (bsz, seq_len, -1))
@staticmethod
def create_position_ids_from_input_ids(
input_ids: tf.Tensor, padding_idx: int, past_key_values_length: Optional[int] = 0
) -> tf.Tensor:
"""
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding
symbols are ignored. This is modified from fairseq's `utils.make_positions`.
Args:
x: tf.Tensor x:
Returns: tf.Tensor
"""
mask = tf.cast(tf.math.not_equal(input_ids, padding_idx), dtype=tf.int32)
incremental_indices = (tf.math.cumsum(mask, axis=1) + past_key_values_length) * mask
return tf.cast(incremental_indices, dtype=tf.int64) + padding_idx
# Copied from transformers.models.bart.modeling_tf_bart.TFBartAttention with Bart->Speech2Text
class TFSpeech2TextAttention(tf.keras.layers.Layer):
"""Multi-headed attention from "Attention Is All You Need"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
is_decoder: bool = False,
bias: bool = True,
**kwargs,
):
super().__init__(**kwargs)
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = tf.keras.layers.Dropout(dropout)
self.head_dim = embed_dim // num_heads
if (self.head_dim * num_heads) != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
f" and `num_heads`: {num_heads})."
)
self.scaling = self.head_dim**-0.5
self.is_decoder = is_decoder
self.k_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="k_proj")
self.q_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="q_proj")
self.v_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="v_proj")
self.out_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="out_proj")
def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int):
return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), (0, 2, 1, 3))
def call(
self,
hidden_states: tf.Tensor,
key_value_states: Optional[tf.Tensor] = None,
past_key_value: Optional[Tuple[Tuple[tf.Tensor]]] = None,
attention_mask: Optional[tf.Tensor] = None,
layer_head_mask: Optional[tf.Tensor] = None,
training: Optional[bool] = False,
) -> Tuple[tf.Tensor, Optional[tf.Tensor]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
bsz, tgt_len, embed_dim = shape_list(hidden_states)
# get query proj
query_states = self.q_proj(hidden_states) * self.scaling
# get key, value proj
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_states = past_key_value[0]
value_states = past_key_value[1]
elif is_cross_attention:
# cross_attentions
key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
elif past_key_value is not None:
# reuse k, v, self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
key_states = tf.concat([past_key_value[0], key_states], axis=2)
value_states = tf.concat([past_key_value[1], value_states], axis=2)
else:
# self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
if self.is_decoder:
# if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_states, value_states)
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = tf.reshape(self._shape(query_states, tgt_len, bsz), proj_shape)
key_states = tf.reshape(key_states, proj_shape)
value_states = tf.reshape(value_states, proj_shape)
src_len = shape_list(key_states)[1]
attn_weights = tf.matmul(query_states, key_states, transpose_b=True)
tf.debugging.assert_equal(
shape_list(attn_weights),
[bsz * self.num_heads, tgt_len, src_len],
message=(
f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
f" {shape_list(attn_weights)}"
),
)
if attention_mask is not None:
tf.debugging.assert_equal(
shape_list(attention_mask),
[bsz, 1, tgt_len, src_len],
message=(
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is"
f" {shape_list(attention_mask)}"
),
)
attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype)
attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask
attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))
attn_weights = stable_softmax(attn_weights, axis=-1)
if layer_head_mask is not None:
tf.debugging.assert_equal(
shape_list(layer_head_mask),
[self.num_heads],
message=(
f"Head mask for a single layer should be of size {(self.num_heads)}, but is"
f" {shape_list(layer_head_mask)}"
),
)
attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape(
attn_weights, (bsz, self.num_heads, tgt_len, src_len)
)
attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))
attn_probs = self.dropout(attn_weights, training=training)
attn_output = tf.matmul(attn_probs, value_states)
tf.debugging.assert_equal(
shape_list(attn_output),
[bsz * self.num_heads, tgt_len, self.head_dim],
message=(
f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
f" {shape_list(attn_output)}"
),
)
attn_output = tf.transpose(
tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3)
)
attn_output = tf.reshape(attn_output, (bsz, tgt_len, embed_dim))
attn_output = self.out_proj(attn_output)
attn_weights: tf.Tensor = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len))
return attn_output, attn_weights, past_key_value
class TFSpeech2TextEncoderLayer(tf.keras.layers.Layer):
def __init__(self, config: Speech2TextConfig, **kwargs):
super().__init__(**kwargs)
self.embed_dim = config.d_model
self.self_attn = TFSpeech2TextAttention(
self.embed_dim, config.encoder_attention_heads, dropout=config.attention_dropout, name="self_attn"
)
self.self_attn_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm")
self.dropout = tf.keras.layers.Dropout(config.dropout)
self.activation_fn = get_tf_activation(config.activation_function)
self.activation_dropout = tf.keras.layers.Dropout(config.activation_dropout)
self.fc1 = tf.keras.layers.Dense(config.encoder_ffn_dim, name="fc1")
self.fc2 = tf.keras.layers.Dense(self.embed_dim, name="fc2")
self.final_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm")
def call(
self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, layer_head_mask: tf.Tensor, training: bool = False
):
"""
Args:
hidden_states (`tf.Tensor`): input to the layer of shape `(seq_len, batch, embed_dim)`
attention_mask (`tf.Tensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size
`(encoder_attention_heads,)`
"""
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
hidden_states, self_attn_weights, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
training=training,
)
tf.debugging.assert_equal(
shape_list(hidden_states),
shape_list(residual),
message=f"Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}",
)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = self.activation_dropout(hidden_states, training=training)
hidden_states = self.fc2(hidden_states)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = residual + hidden_states
return hidden_states, self_attn_weights
class TFSpeech2TextDecoderLayer(tf.keras.layers.Layer):
def __init__(self, config: Speech2TextConfig, **kwargs):
super().__init__(**kwargs)
self.embed_dim = config.d_model
self.self_attn = TFSpeech2TextAttention(
embed_dim=self.embed_dim,
num_heads=config.decoder_attention_heads,
dropout=config.attention_dropout,
name="self_attn",
is_decoder=True,
)
self.dropout = tf.keras.layers.Dropout(config.dropout)
self.activation_fn = get_tf_activation(config.activation_function)
self.activation_dropout = tf.keras.layers.Dropout(config.activation_dropout)
self.self_attn_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm")
self.encoder_attn = TFSpeech2TextAttention(
self.embed_dim,
config.decoder_attention_heads,
dropout=config.attention_dropout,
name="encoder_attn",
is_decoder=True,
)
self.encoder_attn_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="encoder_attn_layer_norm")
self.fc1 = tf.keras.layers.Dense(config.decoder_ffn_dim, name="fc1")
self.fc2 = tf.keras.layers.Dense(self.embed_dim, name="fc2")
self.final_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm")
def call(
self,
hidden_states,
attention_mask: Optional[tf.Tensor] = None,
encoder_hidden_states: Optional[tf.Tensor] = None,
encoder_attention_mask: Optional[tf.Tensor] = None,
layer_head_mask: Optional[tf.Tensor] = None,
cross_attn_layer_head_mask: Optional[tf.Tensor] = None,
past_key_value: Optional[Tuple[tf.Tensor]] = None,
training=False,
) -> Tuple[tf.Tensor, tf.Tensor, Tuple[Tuple[tf.Tensor]]]:
"""
Args:
hidden_states (`tf.Tensor`): input to the layer of shape `(seq_len, batch, embed_dim)`
attention_mask (`tf.Tensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
encoder_hidden_states (`tf.Tensor`):
cross attention input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_attention_mask (`tf.Tensor`): encoder attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size
`(decoder_attention_heads,)`
cross_attn_layer_head_mask (`tf.Tensor`): mask for heads of the cross-attention module.
`(decoder_attention_heads,)`
past_key_value (`Tuple(tf.Tensor)`): cached past key and value projection states
"""
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
# Self Attention
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
# add present self-attn cache to positions 1,2 of present_key_value tuple
hidden_states, self_attn_weights, present_key_value = self.self_attn(
hidden_states=hidden_states,
past_key_value=self_attn_past_key_value,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
training=training,
)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = residual + hidden_states
# Cross-Attention Block
cross_attn_present_key_value = None
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
hidden_states = self.encoder_attn_layer_norm(hidden_states)
# cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
hidden_states=hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
layer_head_mask=cross_attn_layer_head_mask,
past_key_value=cross_attn_past_key_value,
training=training,
)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = residual + hidden_states
# add cross-attn to positions 3,4 of present_key_value tuple
present_key_value = present_key_value + cross_attn_present_key_value
# Fully Connected
residual = hidden_states
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = self.activation_dropout(hidden_states, training=training)
hidden_states = self.fc2(hidden_states)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = residual + hidden_states
return (
hidden_states,
self_attn_weights,
cross_attn_weights,
present_key_value,
)
class TFSpeech2TextPreTrainedModel(TFPreTrainedModel):
config_class = Speech2TextConfig
base_model_prefix = "model"
main_input_name = "input_features"
# Overwritten property due to different expected input shape and type
@property
def dummy_inputs(self) -> Dict[str, tf.Tensor]:
"""
Dummy inputs to build the network.
Returns:
`Dict[str, tf.Tensor]`: The dummy inputs.
"""
return {
self.main_input_name: tf.random.uniform(
[
1,
random.randint(1, self.config.max_source_positions), # time
self.config.input_feat_per_channel * self.config.input_channels, # input channels
]
),
"decoder_input_ids": tf.constant([[2, 3]], dtype=tf.int32),
}
def _get_feat_extract_output_lengths(self, input_lengths: tf.Tensor):
"""
Computes the output length of the convolutional layers
"""
for _ in range(self.config.num_conv_layers):
input_lengths = (input_lengths - 1) // 2 + 1
return input_lengths
@tf.function(
input_signature=[
{
"input_features": tf.TensorSpec((None, None, None), tf.float32, name="input_features"),
"attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"),
"decoder_input_ids": tf.TensorSpec((None, None), tf.int32, name="decoder_input_ids"),
"decoder_attention_mask": tf.TensorSpec((None, None), tf.int32, name="decoder_attention_mask"),
}
]
)
def serving(self, inputs):
output = self.call(inputs)
return self.serving_output(output)
SPEECH_TO_TEXT_START_DOCSTRING = r"""
This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
behavior.
<Tip>
TensorFlow models and layers in `transformers` accept two formats as input:
- having all inputs as keyword arguments (like PyTorch models), or
- having all inputs as a list, tuple or dict in the first positional argument.
The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
positional argument:
- a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
`model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
`model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
Note that when creating models and layers with
[subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
about any of this, as you can just pass inputs like you would to any other Python function!
</Tip>
Parameters:
config ([`Speech2TextConfig`]):
Model configuration class with all the parameters of the model. Initializing with a config file does not
load the weights associated with the model, only the configuration. Check out the
[`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
SPEECH_TO_TEXT_INPUTS_DOCSTRING = r"""
Args:
input_features (`tf.Tensor` of shape `(batch_size, sequence_length, feature_size)`):
Float values of fbank features extracted from the raw speech waveform. Raw speech waveform can be obtained
by loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a `numpy.ndarray`, *e.g.*
via the soundfile library (`pip install soundfile`). To prepare the array into `input_features`, the
[`AutoFeatureExtractor`] should be used for extracting the fbank features, padding and conversion into a
tensor of floats. See [`~Speech2TextFeatureExtractor.__call__`]
attention_mask (`tf.Tensor` of shape `({0})`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
decoder_input_ids (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`Speech2TextTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
SpeechToText uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If
`past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
`past_key_values`).
For translation and summarization training, `decoder_input_ids` should be provided. If no
`decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right
for denoising pre-training following the paper.
decoder_attention_mask (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*):
will be made by default and ignore pad tokens. It is not recommended to set this for most use cases.
head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
decoder_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
encoder_outputs (`tf.FloatTensor`, *optional*):
hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
of shape `(batch_size, sequence_length, hidden_size)` is a sequence of
past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`)
contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
`decoder_input_ids` of shape `(batch_size, sequence_length)`.
decoder_inputs_embeds (`tf.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be
input (see `past_key_values`). This is useful if you want more control over how to convert
`decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
`past_key_values`).
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
config will be used instead.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
used instead.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
eager mode, in graph mode the value will always be set to True.
training (`bool`, *optional*, defaults to `False`):
Whether or not to use the model in training mode (some modules like dropout modules have different
behaviors between training and evaluation).
"""
@keras_serializable
class TFSpeech2TextEncoder(tf.keras.layers.Layer):
config_class = Speech2TextConfig
"""
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
[`TFSpeech2TextEncoderLayer`].
Args:
config: Speech2TextConfig
"""
def __init__(self, config: Speech2TextConfig, **kwargs):
super().__init__(**kwargs)
self.config = config
self.dropout = tf.keras.layers.Dropout(config.dropout)
self.layerdrop = config.encoder_layerdrop
embed_dim = config.d_model
self.padding_idx = config.pad_token_id
self.max_source_positions = config.max_source_positions
self.embed_scale = tf.math.sqrt(float(embed_dim)) if config.scale_embedding else 1.0
self.conv = TFConv1dSubsampler(config, name="conv")
self.embed_positions = TFSpeech2TextSinusoidalPositionalEmbedding(
num_positions=config.max_source_positions,
embedding_dim=embed_dim,
padding_idx=self.padding_idx,
name="embed_positions",
)
self.layers = [TFSpeech2TextEncoderLayer(config, name=f"layers.{i}") for i in range(config.encoder_layers)]
self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="layer_norm")
def _get_feat_extract_output_lengths(self, input_lengths: tf.Tensor):
"""
Computes the output length of the convolutional layers
"""
for _ in range(self.config.num_conv_layers):
input_lengths = (input_lengths - 1) // 2 + 1
return input_lengths
def _get_feature_vector_attention_mask(self, feature_vector_length, attention_mask):
# generate creates 3D attention mask, because of the shape of input_features
# convert it to 2D if thats the case
if len(attention_mask.shape) > 2:
attention_mask = attention_mask[:, :, -1]
subsampled_lengths = self._get_feat_extract_output_lengths(tf.math.reduce_sum(attention_mask, -1))
bsz = shape_list(attention_mask)[0]
indices = tf.concat(
(
tf.expand_dims(tf.range(bsz, dtype=attention_mask.dtype), -1),
tf.expand_dims(subsampled_lengths - 1, -1),
),
axis=-1,
)
attention_mask = tf.scatter_nd(indices=indices, updates=tf.ones(bsz), shape=[bsz, feature_vector_length])
attention_mask = tf.cast(tf.reverse(tf.math.cumsum(tf.reverse(attention_mask, [-1]), -1), [-1]), tf.int64)
return attention_mask
@unpack_inputs
def call(
self,
input_features=None,
attention_mask=None,
head_mask=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
training=False,
):
"""
Args:
input_features (`tf.Tensor` of shape `(batch_size, sequence_length, feature_size)`):
Float values of fbank features extracted from the raw speech waveform. Raw speech waveform can be
obtained by loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a
`numpy.ndarray`, *e.g.* via the soundfile library (`pip install soundfile`). To prepare the array into
`input_features`, the [`AutoFeatureExtractor`] should be used for extracting the fbank features,
padding and conversion into a tensor of floats. See [`~Speech2TextFeatureExtractor.__call__`]
attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, `optional):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
if input_features is None:
raise ValueError("You have to specify input_features")
inputs_embeds = self.conv(input_features)
inputs_embeds = self.embed_scale * inputs_embeds
# subsample attention mask if necessary
if attention_mask is not None:
attention_mask = self._get_feature_vector_attention_mask(tf.shape(inputs_embeds)[1], attention_mask)
padding_mask = tf.cast(tf.math.not_equal(attention_mask, 1), tf.int64)
else:
padding_mask = tf.zeros(tf.shape(inputs_embeds)[:-1], dtype=tf.int64)
embed_pos = self.embed_positions(padding_mask)
hidden_states = inputs_embeds + embed_pos
hidden_states = self.dropout(hidden_states, training=training)
# check attention mask and invert
if attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
attention_mask = _expand_mask(attention_mask)
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
# check if head_mask has a correct number of layers specified if desired
if head_mask is not None:
tf.debugging.assert_equal(
shape_list(head_mask)[0],
len(self.layers),
message=(
f"The head_mask should be specified for {len(self.layers)} layers, but it is for"
f" {shape_list(head_mask)[0]}."
),
)
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = random.uniform(0, 1)
if training and (dropout_probability < self.layerdrop): # skip the layer
continue
hidden_states, attn = encoder_layer(
hidden_states,
attention_mask,
head_mask[idx] if head_mask is not None else None,
training=training,
)
if output_attentions:
all_attentions += (attn,)
hidden_states = self.layer_norm(hidden_states)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
return TFBaseModelOutput(
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
)
@keras_serializable
class TFSpeech2TextDecoder(tf.keras.layers.Layer):
config_class = Speech2TextConfig
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`TFSpeech2TextDecoderLayer`]
Args:
config: Speech2TextConfig
"""
def __init__(self, config: Speech2TextConfig, **kwargs):
super().__init__(**kwargs)
self.config = config
self.layerdrop = config.decoder_layerdrop
self.padding_idx = config.pad_token_id
self.max_target_positions = config.max_target_positions
self.embed_scale = tf.math.sqrt(float(config.d_model)) if config.scale_embedding else 1.0
self.embed_tokens = TFSharedEmbeddings(config.vocab_size, config.d_model, name="embed_tokens")
self.embed_positions = TFSpeech2TextSinusoidalPositionalEmbedding(
num_positions=config.max_target_positions,
embedding_dim=config.d_model,
padding_idx=self.padding_idx,
name="embed_positions",
)
self.layers = [TFSpeech2TextDecoderLayer(config, name=f"layers.{i}") for i in range(config.decoder_layers)]
self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="layer_norm")
self.dropout = tf.keras.layers.Dropout(config.dropout)
def get_embed_tokens(self):
return self.embed_tokens
def set_embed_tokens(self, embed_tokens):
self.embed_tokens = embed_tokens
@unpack_inputs
def call(
self,
input_ids=None,
inputs_embeds=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
head_mask=None,
cross_attn_head_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
training=False,
):
r"""
Args:
input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`Speech2TextTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
encoder_hidden_states (`tf.Tensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (`tf.Tensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up
decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`tf.Tensor` of shape
`(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids`
you can choose to directly pass an embedded representation. This is useful if you want more control
over how to convert `input_ids` indices into associated vectors than the model's internal embedding
lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
elif input_ids is not None:
input_shape = shape_list(input_ids)
elif inputs_embeds is not None:
input_shape = shape_list(inputs_embeds)[:-1]
else:
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
# past_key_values_length
past_key_values_length = shape_list(past_key_values[0][0])[2] if past_key_values is not None else 0
if inputs_embeds is None:
# Note: tf.gather, on which the embedding layer is based, won't check positive out of bound
# indices on GPU, returning zeros instead. This is a dangerous silent behavior.
tf.debugging.assert_less(
input_ids,
tf.cast(self.embed_tokens.vocab_size, dtype=input_ids.dtype),
message=(
"input_ids must be smaller than the embedding layer's input dimension (got"
f" {tf.math.reduce_max(input_ids)} >= {self.embed_tokens.vocab_size})"
),
)
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
else:
inputs_embeds = inputs_embeds
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
if input_shape[-1] > 1:
combined_attention_mask = _make_causal_mask(input_shape, past_key_values_length=past_key_values_length)
else:
combined_attention_mask = _expand_mask(
tf.ones((input_shape[0], input_shape[1] + past_key_values_length)), tgt_len=input_shape[-1]
)
if attention_mask is not None:
combined_attention_mask = combined_attention_mask + _expand_mask(attention_mask, tgt_len=input_shape[-1])
# expand encoder attention mask
if encoder_hidden_states is not None and encoder_attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
encoder_attention_mask = _expand_mask(encoder_attention_mask, tgt_len=input_shape[-1])
# embed positions
positions = self.embed_positions(input_ids, past_key_values_length=past_key_values_length)
hidden_states = inputs_embeds + positions
hidden_states = self.dropout(hidden_states, training=training)
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attns = () if (output_attentions and encoder_hidden_states is not None) else None
next_decoder_cache = () if use_cache else None
# check if head_mask and cross_attn_head_mask have a correct number of layers specified if desired
for attn_mask_name, attn_mask in [("head_mask", head_mask), ("cross_attn_head_mask", cross_attn_head_mask)]:
if attn_mask is not None:
tf.debugging.assert_equal(
shape_list(attn_mask)[0],
len(self.layers),
message=(
f"The {attn_mask_name} should be specified for {len(self.layers)} layers, but it is for"
f" {shape_list(attn_mask)[0]}."
),
)
for idx, decoder_layer in enumerate(self.layers):
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
if output_hidden_states:
all_hidden_states += (hidden_states,)
dropout_probability = random.uniform(0, 1)
if training and (dropout_probability < self.layerdrop):
continue
past_key_value = past_key_values[idx] if past_key_values is not None else None
cross_attn_layer_head_mask = cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None
hidden_states, layer_self_attn, layer_cross_attn, present_key_value = decoder_layer(
hidden_states,
attention_mask=combined_attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
layer_head_mask=head_mask[idx] if head_mask is not None else None,
cross_attn_layer_head_mask=cross_attn_layer_head_mask,
past_key_value=past_key_value,
)
if use_cache:
next_decoder_cache += (present_key_value,)
if output_attentions:
all_self_attns += (layer_self_attn,)
if encoder_hidden_states is not None:
all_cross_attns += (layer_cross_attn,)
hidden_states = self.layer_norm(hidden_states)
if output_hidden_states:
all_hidden_states += (hidden_states,)
next_cache = next_decoder_cache if use_cache else None
if not return_dict:
return hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attns
else:
return TFBaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_cache,
hidden_states=all_hidden_states,
attentions=all_self_attns,
cross_attentions=all_cross_attns,
)
@keras_serializable
class TFSpeech2TextMainLayer(tf.keras.layers.Layer):
config_class = Speech2TextConfig
def __init__(self, config: Speech2TextConfig, **kwargs):
super().__init__(**kwargs)
self.config = config
self.encoder = TFSpeech2TextEncoder(config, name="encoder")
self.decoder = TFSpeech2TextDecoder(config, name="decoder")
def get_input_embeddings(self):
return self.decoder.embed_tokens
def set_input_embeddings(self, new_embeddings):
self.decoder.embed_tokens = new_embeddings
@unpack_inputs
def call(
self,
input_features=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
encoder_outputs=None,
past_key_values=None,
decoder_inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if encoder_outputs is None:
encoder_outputs = self.encoder(
input_features=input_features,
attention_mask=attention_mask,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
# If the user passed a tuple for encoder_outputs, we wrap it in a TFBaseModelOutput when return_dict=True
elif return_dict and not isinstance(encoder_outputs, TFBaseModelOutput):
encoder_outputs = TFBaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
# If the user passed a TFBaseModelOutput for encoder_outputs, we wrap it in a tuple when return_dict=False
elif not return_dict and not isinstance(encoder_outputs, tuple):
encoder_outputs = encoder_outputs.to_tuple()
# downsample encoder attention mask
if attention_mask is not None:
encoder_attention_mask = self.encoder._get_feature_vector_attention_mask(
tf.shape(encoder_outputs[0])[1], attention_mask
)
else:
encoder_attention_mask = None
# decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
encoder_hidden_states=encoder_outputs[0],
encoder_attention_mask=encoder_attention_mask,
head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
past_key_values=past_key_values,
inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
if not return_dict:
return decoder_outputs + encoder_outputs
return TFSeq2SeqModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
@add_start_docstrings(
"The bare Speech2Text Model outputting raw hidden-states without any specific head on top.",
SPEECH_TO_TEXT_START_DOCSTRING,
)
class TFSpeech2TextModel(TFSpeech2TextPreTrainedModel):
def __init__(self, config: Speech2TextConfig, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.model = TFSpeech2TextMainLayer(config, name="model")
def get_encoder(self):
return self.model.encoder
def get_decoder(self):
return self.model.decoder
@unpack_inputs
@add_start_docstrings_to_model_forward(SPEECH_TO_TEXT_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFSeq2SeqModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_features: Optional[TFModelInputType] = None,
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
decoder_input_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
decoder_attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
decoder_head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
cross_attn_head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
encoder_outputs: Optional[Union[np.ndarray, tf.Tensor]] = None,
past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
decoder_inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: bool = False,
**kwargs,
) -> Union[Tuple, TFSeq2SeqModelOutput]:
outputs = self.model(
input_features=input_features,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
head_mask=head_mask,
decoder_head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
encoder_outputs=encoder_outputs,
past_key_values=past_key_values,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
return outputs
def serving_output(self, output):
pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None
dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None
dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None
cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None
enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None
enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None
return TFSeq2SeqModelOutput(
last_hidden_state=output.last_hidden_state,
past_key_values=pkv,
decoder_hidden_states=dec_hs,
decoder_attentions=dec_attns,
cross_attentions=cross_attns,
encoder_last_hidden_state=output.encoder_last_hidden_state,
encoder_hidden_states=enc_hs,
encoder_attentions=enc_attns,
)
@add_start_docstrings(
"The Speech2Text Model with a language modeling head. Can be used for summarization.",
SPEECH_TO_TEXT_START_DOCSTRING,
)
class TFSpeech2TextForConditionalGeneration(TFSpeech2TextPreTrainedModel, TFCausalLanguageModelingLoss):
def __init__(self, config: Speech2TextConfig):
super().__init__(config)
self.model = TFSpeech2TextMainLayer(config, name="model")
self.lm_head = tf.keras.layers.Dense(self.config.vocab_size, use_bias=False, name="lm_head")
# TODO (Joao): investigate why Speech2Text has numerical issues in XLA generate
self.supports_xla_generation = False
def get_encoder(self):
return self.model.encoder
def get_decoder(self):
return self.model.decoder
def resize_token_embeddings(self, new_num_tokens: int) -> tf.Variable:
new_embeddings = super().resize_token_embeddings(new_num_tokens)
return new_embeddings
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
@unpack_inputs
@add_start_docstrings_to_model_forward(SPEECH_TO_TEXT_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=TFSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
def call(
self,
input_features: Optional[TFModelInputType] = None,
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
decoder_input_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
decoder_attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
decoder_head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
cross_attn_head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
encoder_outputs: Optional[Union[np.ndarray, tf.Tensor]] = None,
past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
decoder_inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[Tuple, TFSeq2SeqLMOutput]:
r"""
labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Returns:
Example:
```python
>>> import tensorflow as tf
>>> from transformers import Speech2TextProcessor, TFSpeech2TextForConditionalGeneration
>>> from datasets import load_dataset
>>> import soundfile as sf
>>> model = TFSpeech2TextForConditionalGeneration.from_pretrained(
... "facebook/s2t-small-librispeech-asr", from_pt=True
... )
>>> processor = Speech2TextProcessor.from_pretrained("facebook/s2t-small-librispeech-asr")
>>> def map_to_array(batch):
... speech, _ = sf.read(batch["file"])
... batch["speech"] = speech
... return batch
>>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
>>> ds = ds.map(map_to_array)
>>> ds.set_format(type="tf")
>>> input_features = processor(
... ds["speech"][0], sampling_rate=16000, return_tensors="tf"
... ).input_features # Batch size 1
>>> generated_ids = model.generate(input_features)
>>> transcription = processor.batch_decode(generated_ids)
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
if decoder_input_ids is None and decoder_inputs_embeds is None:
decoder_input_ids = shift_tokens_right(
labels, self.config.pad_token_id, self.config.decoder_start_token_id
)
outputs = self.model(
input_features=input_features,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
encoder_outputs=encoder_outputs,
decoder_attention_mask=decoder_attention_mask,
head_mask=head_mask,
decoder_head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
past_key_values=past_key_values,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
lm_logits = self.lm_head(outputs[0])
masked_lm_loss = None if labels is None else self.hf_compute_loss(labels, lm_logits)
if not return_dict:
output = (lm_logits,) + outputs[1:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return TFSeq2SeqLMOutput(
loss=masked_lm_loss,
logits=lm_logits,
past_key_values=outputs.past_key_values,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
)
def serving_output(self, output):
pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None
dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None
dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None
cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None
enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None
enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None
return TFSeq2SeqLMOutput(
logits=output.logits,
past_key_values=pkv,
decoder_hidden_states=dec_hs,
decoder_attentions=dec_attns,
cross_attentions=cross_attns,
encoder_last_hidden_state=output.encoder_last_hidden_state,
encoder_hidden_states=enc_hs,
encoder_attentions=enc_attns,
)
def prepare_inputs_for_generation(
self,
decoder_input_ids,
past_key_values=None,
attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
use_cache=None,
encoder_outputs=None,
**kwargs,
):
# cut decoder_input_ids if past is used
if past_key_values is not None:
decoder_input_ids = decoder_input_ids[:, -1:]
return {
"input_features": None, # needs to be passed to make Keras.layer.__call__ happy
"encoder_outputs": encoder_outputs,
"past_key_values": past_key_values,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
"use_cache": use_cache, # change this to avoid caching (presumably for debugging)
}
|
27182812/ChatGLM-LLaMA-chinese-insturct | 4,478 | src/transformers/models/speech_to_text/convert_s2t_fairseq_to_tfms.py | # Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import torch
from torch import nn
from transformers import Speech2TextConfig, Speech2TextForConditionalGeneration
def remove_ignore_keys_(state_dict):
ignore_keys = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(k, None)
def rename_keys(s_dict):
keys = list(s_dict.keys())
for key in keys:
if "transformer_layers" in key:
s_dict[key.replace("transformer_layers", "layers")] = s_dict.pop(key)
elif "subsample" in key:
s_dict[key.replace("subsample", "conv")] = s_dict.pop(key)
def make_linear_from_emb(emb):
vocab_size, emb_size = emb.weight.shape
lin_layer = nn.Linear(vocab_size, emb_size, bias=False)
lin_layer.weight.data = emb.weight.data
return lin_layer
def convert_fairseq_s2t_checkpoint_to_tfms(checkpoint_path, pytorch_dump_folder_path):
m2m_100 = torch.load(checkpoint_path, map_location="cpu")
args = m2m_100["args"]
state_dict = m2m_100["model"]
lm_head_weights = state_dict["decoder.output_projection.weight"]
remove_ignore_keys_(state_dict)
rename_keys(state_dict)
vocab_size = state_dict["decoder.embed_tokens.weight"].shape[0]
tie_embeds = args.share_decoder_input_output_embed
conv_kernel_sizes = [int(i) for i in args.conv_kernel_sizes.split(",")]
config = Speech2TextConfig(
vocab_size=vocab_size,
max_source_positions=args.max_source_positions,
max_target_positions=args.max_target_positions,
encoder_layers=args.encoder_layers,
decoder_layers=args.decoder_layers,
encoder_attention_heads=args.encoder_attention_heads,
decoder_attention_heads=args.decoder_attention_heads,
encoder_ffn_dim=args.encoder_ffn_embed_dim,
decoder_ffn_dim=args.decoder_ffn_embed_dim,
d_model=args.encoder_embed_dim,
dropout=args.dropout,
attention_dropout=args.attention_dropout,
activation_dropout=args.activation_dropout,
activation_function="relu",
num_conv_layers=len(conv_kernel_sizes),
conv_channels=args.conv_channels,
conv_kernel_sizes=conv_kernel_sizes,
input_feat_per_channel=args.input_feat_per_channel,
input_channels=args.input_channels,
tie_word_embeddings=tie_embeds,
num_beams=5,
max_length=200,
use_cache=True,
decoder_start_token_id=2,
early_stopping=True,
)
model = Speech2TextForConditionalGeneration(config)
missing, unexpected = model.model.load_state_dict(state_dict, strict=False)
if len(missing) > 0 and not set(missing) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
f" but all the following weights are missing {missing}"
)
if tie_embeds:
model.lm_head = make_linear_from_emb(model.model.decoder.embed_tokens)
else:
model.lm_head.weight.data = lm_head_weights
model.save_pretrained(pytorch_dump_folder_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--fairseq_path", type=str, help="Path to the fairseq model (.pt) file.")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
args = parser.parse_args()
convert_fairseq_s2t_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
|
27182812/ChatGLM-LLaMA-chinese-insturct | 11,494 | src/transformers/models/speech_to_text/feature_extraction_speech_to_text.py | # coding=utf-8
# Copyright 2021 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Feature extractor class for Speech2Text
"""
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
logger = logging.get_logger(__name__)
class Speech2TextFeatureExtractor(SequenceFeatureExtractor):
r"""
Constructs a Speech2Text feature extractor.
This feature extractor inherits from [`Speech2TextFeatureExtractor`] which contains most of the main methods. Users
should refer to this superclass for more information regarding those methods.
This class extracts mel-filter bank features from raw speech using TorchAudio and applies utterance-level cepstral
mean and variance normalization to the extracted features.
Args:
feature_size (`int`, defaults to 80):
The feature dimension of the extracted features.
sampling_rate (`int`, defaults to 16000):
The sampling rate at which the audio files should be digitalized expressed in hertz (Hz).
num_mel_bins (`int`, defaults to 80):
Number of Mel-frequency bins.
padding_value (`float`, defaults to 0.0):
The value that is used to fill the padding vectors.
do_ceptral_normalize (`bool`, *optional*, defaults to `True`):
Whether or not to apply utterance-level cepstral mean and variance normalization to extracted features.
normalize_means (`bool`, *optional*, defaults to `True`):
Whether or not to zero-mean normalize the extracted features.
normalize_vars (`bool`, *optional*, defaults to `True`):
Whether or not to unit-variance normalize the extracted features.
"""
model_input_names = ["input_features", "attention_mask"]
def __init__(
self,
feature_size=80,
sampling_rate=16000,
num_mel_bins=80,
padding_value=0.0,
do_ceptral_normalize=True,
normalize_means=True,
normalize_vars=True,
**kwargs,
):
super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs)
self.num_mel_bins = num_mel_bins
self.do_ceptral_normalize = do_ceptral_normalize
self.normalize_means = normalize_means
self.normalize_vars = normalize_vars
self.return_attention_mask = True
def _extract_fbank_features(
self,
waveform: np.ndarray,
) -> np.ndarray:
"""
Get mel-filter bank features using TorchAudio. Note that TorchAudio requires 16-bit signed integers as inputs
and hence the waveform should not be normalized before feature extraction.
"""
waveform = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
waveform = torch.from_numpy(waveform).unsqueeze(0)
features = ta_kaldi.fbank(waveform, num_mel_bins=self.num_mel_bins, sample_frequency=self.sampling_rate)
return features.numpy()
@staticmethod
def utterance_cmvn(
x: np.ndarray,
input_length: int,
normalize_means: Optional[bool] = True,
normalize_vars: Optional[bool] = True,
padding_value: float = 0.0,
) -> np.ndarray:
# make sure we normalize float32 arrays
if normalize_means:
mean = x[:input_length].mean(axis=0)
x = np.subtract(x, mean)
if normalize_vars:
std = x[:input_length].std(axis=0)
x = np.divide(x, std)
if input_length < x.shape[0]:
x[input_length:] = padding_value
# make sure array is in float32
x = x.astype(np.float32)
return x
def normalize(
self, input_features: List[np.ndarray], attention_mask: Optional[np.ndarray] = None
) -> List[np.ndarray]:
lengths = attention_mask.sum(-1) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(x, n, self.normalize_means, self.normalize_vars, self.padding_value)
for x, n in zip(input_features, lengths)
]
def __call__(
self,
raw_speech: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]],
padding: Union[bool, str, PaddingStrategy] = False,
max_length: Optional[int] = None,
truncation: bool = False,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
sampling_rate: Optional[int] = None,
return_attention_mask: Optional[bool] = None,
**kwargs,
) -> BatchFeature:
"""
Main method to featurize and prepare for the model one or several sequence(s).
Args:
raw_speech (`np.ndarray`, `List[float]`, `List[np.ndarray]`, `List[List[float]]`):
The sequence or batch of sequences to be padded. Each sequence can be a numpy array, a list of float
values, a list of numpy arrays or a list of list of float values.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding
index) among:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
max_length (`int`, *optional*):
Maximum length of the returned list and optionally padding length (see above).
truncation (`bool`):
Activates truncation to cut input sequences longer than *max_length* to *max_length*.
pad_to_multiple_of (`int`, *optional*):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability
`>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128.
return_attention_mask (`bool`, *optional*):
Whether to return the attention mask. If left to the default, will return the attention mask according
to the specific feature_extractor's default.
[What are attention masks?](../glossary#attention-mask)
<Tip>
For Speech2TextTransformer models, `attention_mask` should always be passed for batched inference, to
avoid subtle bugs.
</Tip>
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
sampling_rate (`int`, *optional*):
The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass
`sampling_rate` at the forward call to prevent silent errors.
padding_value (`float`, defaults to 0.0):
The value that is used to fill the padding values / vectors.
"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}."
)
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug."
)
is_batched = bool(
isinstance(raw_speech, (list, tuple))
and (isinstance(raw_speech[0], np.ndarray) or isinstance(raw_speech[0], (tuple, list)))
)
if is_batched:
raw_speech = [np.asarray(speech, dtype=np.float32) for speech in raw_speech]
elif not is_batched and not isinstance(raw_speech, np.ndarray):
raw_speech = np.asarray(raw_speech, dtype=np.float32)
elif isinstance(raw_speech, np.ndarray) and raw_speech.dtype is np.dtype(np.float64):
raw_speech = raw_speech.astype(np.float32)
# always return batch
if not is_batched:
raw_speech = [raw_speech]
# extract fbank features
features = [self._extract_fbank_features(waveform) for waveform in raw_speech]
# convert into correct format for padding
encoded_inputs = BatchFeature({"input_features": features})
padded_inputs = self.pad(
encoded_inputs,
padding=padding,
max_length=max_length,
truncation=truncation,
pad_to_multiple_of=pad_to_multiple_of,
return_attention_mask=return_attention_mask,
**kwargs,
)
# make sure list is in array format
input_features = padded_inputs.get("input_features")
if isinstance(input_features[0], list):
padded_inputs["input_features"] = [np.asarray(feature, dtype=np.float32) for feature in input_features]
attention_mask = padded_inputs.get("attention_mask")
if attention_mask is not None:
padded_inputs["attention_mask"] = [np.asarray(array, dtype=np.int32) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
attention_mask = (
np.array(attention_mask, dtype=np.int32)
if self._get_padding_strategies(padding, max_length=max_length) is not PaddingStrategy.DO_NOT_PAD
else None
)
padded_inputs["input_features"] = self.normalize(
padded_inputs["input_features"], attention_mask=attention_mask
)
if return_tensors is not None:
padded_inputs = padded_inputs.convert_to_tensors(return_tensors)
return padded_inputs
|
27182812/ChatGLM-LLaMA-chinese-insturct | 4,111 | src/transformers/models/pegasus/__init__.py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_import_structure = {"configuration_pegasus": ["PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP", "PegasusConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["tokenization_pegasus"] = ["PegasusTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["tokenization_pegasus_fast"] = ["PegasusTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["modeling_pegasus"] = [
"PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST",
"PegasusForCausalLM",
"PegasusForConditionalGeneration",
"PegasusModel",
"PegasusPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["modeling_tf_pegasus"] = [
"TFPegasusForConditionalGeneration",
"TFPegasusModel",
"TFPegasusPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["modeling_flax_pegasus"] = [
"FlaxPegasusForConditionalGeneration",
"FlaxPegasusModel",
"FlaxPegasusPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_pegasus import PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_pegasus import PegasusTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_pegasus_fast import PegasusTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus import (
PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusForCausalLM,
PegasusForConditionalGeneration,
PegasusModel,
PegasusPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_pegasus import TFPegasusForConditionalGeneration, TFPegasusModel, TFPegasusPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_pegasus import (
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
FlaxPegasusPreTrainedModel,
)
else:
import sys
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
2877025939/tabelVew-CollectionView | 35,575 | tabelVew嵌套CollectionView/tabelVew嵌套CollectionView.xcodeproj/project.pbxproj | // !$*UTF8*$!
{
archiveVersion = 1;
classes = {
};
objectVersion = 46;
objects = {
/* Begin PBXBuildFile section */
27FBBD101EE7A5D40060C325 /* main.m in Sources */ = {isa = PBXBuildFile; fileRef = 27FBBD0F1EE7A5D40060C325 /* main.m */; };
27FBBD131EE7A5D40060C325 /* AppDelegate.m in Sources */ = {isa = PBXBuildFile; fileRef = 27FBBD121EE7A5D40060C325 /* AppDelegate.m */; };
27FBBD191EE7A5D40060C325 /* Main.storyboard in Resources */ = {isa = PBXBuildFile; fileRef = 27FBBD171EE7A5D40060C325 /* Main.storyboard */; };
27FBBD1B1EE7A5D40060C325 /* Assets.xcassets in Resources */ = {isa = PBXBuildFile; fileRef = 27FBBD1A1EE7A5D40060C325 /* Assets.xcassets */; };
27FBBD1E1EE7A5D40060C325 /* LaunchScreen.storyboard in Resources */ = {isa = PBXBuildFile; fileRef = 27FBBD1C1EE7A5D40060C325 /* LaunchScreen.storyboard */; };
27FBBD291EE7A5D40060C325 /* tabelVew__CollectionViewTests.m in Sources */ = {isa = PBXBuildFile; fileRef = 27FBBD281EE7A5D40060C325 /* tabelVew__CollectionViewTests.m */; };
27FBBD341EE7A5D40060C325 /* tabelVew__CollectionViewUITests.m in Sources */ = {isa = PBXBuildFile; fileRef = 27FBBD331EE7A5D40060C325 /* tabelVew__CollectionViewUITests.m */; };
27FBBD591EE7DF100060C325 /* ViewController1.m in Sources */ = {isa = PBXBuildFile; fileRef = 27FBBD581EE7DF100060C325 /* ViewController1.m */; };
27FBBD611EEA71C00060C325 /* HomeTitle.m in Sources */ = {isa = PBXBuildFile; fileRef = 27FBBD601EEA71C00060C325 /* HomeTitle.m */; };
27FBBD801EEA73E40060C325 /* MASCompositeConstraint.m in Sources */ = {isa = PBXBuildFile; fileRef = 27FBBD661EEA73E40060C325 /* MASCompositeConstraint.m */; };
27FBBD811EEA73E40060C325 /* MASConstraint.m in Sources */ = {isa = PBXBuildFile; fileRef = 27FBBD691EEA73E40060C325 /* MASConstraint.m */; };
27FBBD821EEA73E40060C325 /* MASConstraintMaker.m in Sources */ = {isa = PBXBuildFile; fileRef = 27FBBD6B1EEA73E40060C325 /* MASConstraintMaker.m */; };
27FBBD831EEA73E40060C325 /* MASLayoutConstraint.m in Sources */ = {isa = PBXBuildFile; fileRef = 27FBBD6D1EEA73E40060C325 /* MASLayoutConstraint.m */; };
27FBBD841EEA73E40060C325 /* MASViewAttribute.m in Sources */ = {isa = PBXBuildFile; fileRef = 27FBBD711EEA73E40060C325 /* MASViewAttribute.m */; };
27FBBD851EEA73E40060C325 /* MASViewConstraint.m in Sources */ = {isa = PBXBuildFile; fileRef = 27FBBD731EEA73E40060C325 /* MASViewConstraint.m */; };
27FBBD861EEA73E40060C325 /* NSArray+MASAdditions.m in Sources */ = {isa = PBXBuildFile; fileRef = 27FBBD751EEA73E40060C325 /* NSArray+MASAdditions.m */; };
27FBBD871EEA73E40060C325 /* NSLayoutConstraint+MASDebugAdditions.m in Sources */ = {isa = PBXBuildFile; fileRef = 27FBBD781EEA73E40060C325 /* NSLayoutConstraint+MASDebugAdditions.m */; };
27FBBD881EEA73E40060C325 /* View+MASAdditions.m in Sources */ = {isa = PBXBuildFile; fileRef = 27FBBD7A1EEA73E40060C325 /* View+MASAdditions.m */; };
27FBBD891EEA73E40060C325 /* ViewController+MASAdditions.m in Sources */ = {isa = PBXBuildFile; fileRef = 27FBBD7D1EEA73E40060C325 /* ViewController+MASAdditions.m */; };
27FBBD8E1EEA77F70060C325 /* HomeTableViewCell.m in Sources */ = {isa = PBXBuildFile; fileRef = 27FBBD8C1EEA77F70060C325 /* HomeTableViewCell.m */; };
27FBBD8F1EEA77F70060C325 /* HomeTableViewCell.xib in Resources */ = {isa = PBXBuildFile; fileRef = 27FBBD8D1EEA77F70060C325 /* HomeTableViewCell.xib */; };
27FBBD9D1EEA79350060C325 /* HomeViewController.m in Sources */ = {isa = PBXBuildFile; fileRef = 27FBBD9C1EEA79350060C325 /* HomeViewController.m */; };
27FDC7D11F0E381D0016D57C /* HomeCollectionCell.m in Sources */ = {isa = PBXBuildFile; fileRef = 27FDC7CF1F0E381D0016D57C /* HomeCollectionCell.m */; };
27FDC7D21F0E381D0016D57C /* HomeCollectionCell.xib in Resources */ = {isa = PBXBuildFile; fileRef = 27FDC7D01F0E381D0016D57C /* HomeCollectionCell.xib */; };
27FDC7D41F0E3BA40016D57C /* MyTask@2x.png in Resources */ = {isa = PBXBuildFile; fileRef = 27FDC7D31F0E3BA40016D57C /* MyTask@2x.png */; };
/* End PBXBuildFile section */
/* Begin PBXContainerItemProxy section */
27FBBD251EE7A5D40060C325 /* PBXContainerItemProxy */ = {
isa = PBXContainerItemProxy;
containerPortal = 27FBBD031EE7A5D40060C325 /* Project object */;
proxyType = 1;
remoteGlobalIDString = 27FBBD0A1EE7A5D40060C325;
remoteInfo = "tabelVew嵌套CollectionView";
};
27FBBD301EE7A5D40060C325 /* PBXContainerItemProxy */ = {
isa = PBXContainerItemProxy;
containerPortal = 27FBBD031EE7A5D40060C325 /* Project object */;
proxyType = 1;
remoteGlobalIDString = 27FBBD0A1EE7A5D40060C325;
remoteInfo = "tabelVew嵌套CollectionView";
};
/* End PBXContainerItemProxy section */
/* Begin PBXFileReference section */
27AFF4991F8E423300BC3D1C /* Header.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = Header.h; sourceTree = "<group>"; };
27FBBD0B1EE7A5D40060C325 /* tabelVew嵌套CollectionView.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = "tabelVew嵌套CollectionView.app"; sourceTree = BUILT_PRODUCTS_DIR; };
27FBBD0F1EE7A5D40060C325 /* main.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = main.m; sourceTree = "<group>"; };
27FBBD111EE7A5D40060C325 /* AppDelegate.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = AppDelegate.h; sourceTree = "<group>"; };
27FBBD121EE7A5D40060C325 /* AppDelegate.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = AppDelegate.m; sourceTree = "<group>"; };
27FBBD181EE7A5D40060C325 /* Base */ = {isa = PBXFileReference; lastKnownFileType = file.storyboard; name = Base; path = Base.lproj/Main.storyboard; sourceTree = "<group>"; };
27FBBD1A1EE7A5D40060C325 /* Assets.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Assets.xcassets; sourceTree = "<group>"; };
27FBBD1D1EE7A5D40060C325 /* Base */ = {isa = PBXFileReference; lastKnownFileType = file.storyboard; name = Base; path = Base.lproj/LaunchScreen.storyboard; sourceTree = "<group>"; };
27FBBD1F1EE7A5D40060C325 /* Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = "<group>"; };
27FBBD241EE7A5D40060C325 /* tabelVew嵌套CollectionViewTests.xctest */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = "tabelVew嵌套CollectionViewTests.xctest"; sourceTree = BUILT_PRODUCTS_DIR; };
27FBBD281EE7A5D40060C325 /* tabelVew__CollectionViewTests.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = tabelVew__CollectionViewTests.m; sourceTree = "<group>"; };
27FBBD2A1EE7A5D40060C325 /* Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = "<group>"; };
27FBBD2F1EE7A5D40060C325 /* tabelVew嵌套CollectionViewUITests.xctest */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = "tabelVew嵌套CollectionViewUITests.xctest"; sourceTree = BUILT_PRODUCTS_DIR; };
27FBBD331EE7A5D40060C325 /* tabelVew__CollectionViewUITests.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = tabelVew__CollectionViewUITests.m; sourceTree = "<group>"; };
27FBBD351EE7A5D40060C325 /* Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = "<group>"; };
27FBBD571EE7DF100060C325 /* ViewController1.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ViewController1.h; sourceTree = "<group>"; };
27FBBD581EE7DF100060C325 /* ViewController1.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ViewController1.m; sourceTree = "<group>"; };
27FBBD5F1EEA71C00060C325 /* HomeTitle.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = HomeTitle.h; sourceTree = "<group>"; };
27FBBD601EEA71C00060C325 /* HomeTitle.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = HomeTitle.m; sourceTree = "<group>"; };
27FBBD651EEA73E40060C325 /* MASCompositeConstraint.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MASCompositeConstraint.h; sourceTree = "<group>"; };
27FBBD661EEA73E40060C325 /* MASCompositeConstraint.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = MASCompositeConstraint.m; sourceTree = "<group>"; };
27FBBD671EEA73E40060C325 /* MASConstraint+Private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "MASConstraint+Private.h"; sourceTree = "<group>"; };
27FBBD681EEA73E40060C325 /* MASConstraint.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MASConstraint.h; sourceTree = "<group>"; };
27FBBD691EEA73E40060C325 /* MASConstraint.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = MASConstraint.m; sourceTree = "<group>"; };
27FBBD6A1EEA73E40060C325 /* MASConstraintMaker.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MASConstraintMaker.h; sourceTree = "<group>"; };
27FBBD6B1EEA73E40060C325 /* MASConstraintMaker.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = MASConstraintMaker.m; sourceTree = "<group>"; };
27FBBD6C1EEA73E40060C325 /* MASLayoutConstraint.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MASLayoutConstraint.h; sourceTree = "<group>"; };
27FBBD6D1EEA73E40060C325 /* MASLayoutConstraint.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = MASLayoutConstraint.m; sourceTree = "<group>"; };
27FBBD6E1EEA73E40060C325 /* Masonry.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Masonry.h; sourceTree = "<group>"; };
27FBBD6F1EEA73E40060C325 /* MASUtilities.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MASUtilities.h; sourceTree = "<group>"; };
27FBBD701EEA73E40060C325 /* MASViewAttribute.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MASViewAttribute.h; sourceTree = "<group>"; };
27FBBD711EEA73E40060C325 /* MASViewAttribute.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = MASViewAttribute.m; sourceTree = "<group>"; };
27FBBD721EEA73E40060C325 /* MASViewConstraint.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MASViewConstraint.h; sourceTree = "<group>"; };
27FBBD731EEA73E40060C325 /* MASViewConstraint.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = MASViewConstraint.m; sourceTree = "<group>"; };
27FBBD741EEA73E40060C325 /* NSArray+MASAdditions.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "NSArray+MASAdditions.h"; sourceTree = "<group>"; };
27FBBD751EEA73E40060C325 /* NSArray+MASAdditions.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = "NSArray+MASAdditions.m"; sourceTree = "<group>"; };
27FBBD761EEA73E40060C325 /* NSArray+MASShorthandAdditions.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "NSArray+MASShorthandAdditions.h"; sourceTree = "<group>"; };
27FBBD771EEA73E40060C325 /* NSLayoutConstraint+MASDebugAdditions.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "NSLayoutConstraint+MASDebugAdditions.h"; sourceTree = "<group>"; };
27FBBD781EEA73E40060C325 /* NSLayoutConstraint+MASDebugAdditions.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = "NSLayoutConstraint+MASDebugAdditions.m"; sourceTree = "<group>"; };
27FBBD791EEA73E40060C325 /* View+MASAdditions.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "View+MASAdditions.h"; sourceTree = "<group>"; };
27FBBD7A1EEA73E40060C325 /* View+MASAdditions.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = "View+MASAdditions.m"; sourceTree = "<group>"; };
27FBBD7B1EEA73E40060C325 /* View+MASShorthandAdditions.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "View+MASShorthandAdditions.h"; sourceTree = "<group>"; };
27FBBD7C1EEA73E40060C325 /* ViewController+MASAdditions.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "ViewController+MASAdditions.h"; sourceTree = "<group>"; };
27FBBD7D1EEA73E40060C325 /* ViewController+MASAdditions.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = "ViewController+MASAdditions.m"; sourceTree = "<group>"; };
27FBBD8B1EEA77F70060C325 /* HomeTableViewCell.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = HomeTableViewCell.h; sourceTree = "<group>"; };
27FBBD8C1EEA77F70060C325 /* HomeTableViewCell.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = HomeTableViewCell.m; sourceTree = "<group>"; };
27FBBD8D1EEA77F70060C325 /* HomeTableViewCell.xib */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = file.xib; path = HomeTableViewCell.xib; sourceTree = "<group>"; };
27FBBD9B1EEA79350060C325 /* HomeViewController.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = HomeViewController.h; sourceTree = "<group>"; };
27FBBD9C1EEA79350060C325 /* HomeViewController.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = HomeViewController.m; sourceTree = "<group>"; };
27FDC7CE1F0E381D0016D57C /* HomeCollectionCell.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = HomeCollectionCell.h; sourceTree = "<group>"; };
27FDC7CF1F0E381D0016D57C /* HomeCollectionCell.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = HomeCollectionCell.m; sourceTree = "<group>"; };
27FDC7D01F0E381D0016D57C /* HomeCollectionCell.xib */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = file.xib; path = HomeCollectionCell.xib; sourceTree = "<group>"; };
27FDC7D31F0E3BA40016D57C /* MyTask@2x.png */ = {isa = PBXFileReference; lastKnownFileType = image.png; path = "MyTask@2x.png"; sourceTree = "<group>"; };
/* End PBXFileReference section */
/* Begin PBXFrameworksBuildPhase section */
27FBBD081EE7A5D40060C325 /* Frameworks */ = {
isa = PBXFrameworksBuildPhase;
buildActionMask = 2147483647;
files = (
);
runOnlyForDeploymentPostprocessing = 0;
};
27FBBD211EE7A5D40060C325 /* Frameworks */ = {
isa = PBXFrameworksBuildPhase;
buildActionMask = 2147483647;
files = (
);
runOnlyForDeploymentPostprocessing = 0;
};
27FBBD2C1EE7A5D40060C325 /* Frameworks */ = {
isa = PBXFrameworksBuildPhase;
buildActionMask = 2147483647;
files = (
);
runOnlyForDeploymentPostprocessing = 0;
};
/* End PBXFrameworksBuildPhase section */
/* Begin PBXGroup section */
27FBBD021EE7A5D40060C325 = {
isa = PBXGroup;
children = (
27FBBD0D1EE7A5D40060C325 /* tabelVew嵌套CollectionView */,
27FBBD271EE7A5D40060C325 /* tabelVew嵌套CollectionViewTests */,
27FBBD321EE7A5D40060C325 /* tabelVew嵌套CollectionViewUITests */,
27FBBD0C1EE7A5D40060C325 /* Products */,
);
sourceTree = "<group>";
};
27FBBD0C1EE7A5D40060C325 /* Products */ = {
isa = PBXGroup;
children = (
27FBBD0B1EE7A5D40060C325 /* tabelVew嵌套CollectionView.app */,
27FBBD241EE7A5D40060C325 /* tabelVew嵌套CollectionViewTests.xctest */,
27FBBD2F1EE7A5D40060C325 /* tabelVew嵌套CollectionViewUITests.xctest */,
);
name = Products;
sourceTree = "<group>";
};
27FBBD0D1EE7A5D40060C325 /* tabelVew嵌套CollectionView */ = {
isa = PBXGroup;
children = (
27FBBD111EE7A5D40060C325 /* AppDelegate.h */,
27FBBD121EE7A5D40060C325 /* AppDelegate.m */,
27FBBD9B1EEA79350060C325 /* HomeViewController.h */,
27FBBD9C1EEA79350060C325 /* HomeViewController.m */,
27FBBD5F1EEA71C00060C325 /* HomeTitle.h */,
27FBBD601EEA71C00060C325 /* HomeTitle.m */,
27FBBD8B1EEA77F70060C325 /* HomeTableViewCell.h */,
27FBBD8C1EEA77F70060C325 /* HomeTableViewCell.m */,
27FBBD8D1EEA77F70060C325 /* HomeTableViewCell.xib */,
27FBBD571EE7DF100060C325 /* ViewController1.h */,
27FBBD581EE7DF100060C325 /* ViewController1.m */,
27FDC7CE1F0E381D0016D57C /* HomeCollectionCell.h */,
27FDC7CF1F0E381D0016D57C /* HomeCollectionCell.m */,
27FDC7D01F0E381D0016D57C /* HomeCollectionCell.xib */,
27FBBD641EEA73E40060C325 /* Masonry */,
27FBBD171EE7A5D40060C325 /* Main.storyboard */,
27FDC7D31F0E3BA40016D57C /* MyTask@2x.png */,
27FBBD1A1EE7A5D40060C325 /* Assets.xcassets */,
27FBBD1C1EE7A5D40060C325 /* LaunchScreen.storyboard */,
27FBBD1F1EE7A5D40060C325 /* Info.plist */,
27FBBD0E1EE7A5D40060C325 /* Supporting Files */,
27AFF4991F8E423300BC3D1C /* Header.h */,
);
path = "tabelVew嵌套CollectionView";
sourceTree = "<group>";
};
27FBBD0E1EE7A5D40060C325 /* Supporting Files */ = {
isa = PBXGroup;
children = (
27FBBD0F1EE7A5D40060C325 /* main.m */,
);
name = "Supporting Files";
sourceTree = "<group>";
};
27FBBD271EE7A5D40060C325 /* tabelVew嵌套CollectionViewTests */ = {
isa = PBXGroup;
children = (
27FBBD281EE7A5D40060C325 /* tabelVew__CollectionViewTests.m */,
27FBBD2A1EE7A5D40060C325 /* Info.plist */,
);
path = "tabelVew嵌套CollectionViewTests";
sourceTree = "<group>";
};
27FBBD321EE7A5D40060C325 /* tabelVew嵌套CollectionViewUITests */ = {
isa = PBXGroup;
children = (
27FBBD331EE7A5D40060C325 /* tabelVew__CollectionViewUITests.m */,
27FBBD351EE7A5D40060C325 /* Info.plist */,
);
path = "tabelVew嵌套CollectionViewUITests";
sourceTree = "<group>";
};
27FBBD641EEA73E40060C325 /* Masonry */ = {
isa = PBXGroup;
children = (
27FBBD651EEA73E40060C325 /* MASCompositeConstraint.h */,
27FBBD661EEA73E40060C325 /* MASCompositeConstraint.m */,
27FBBD671EEA73E40060C325 /* MASConstraint+Private.h */,
27FBBD681EEA73E40060C325 /* MASConstraint.h */,
27FBBD691EEA73E40060C325 /* MASConstraint.m */,
27FBBD6A1EEA73E40060C325 /* MASConstraintMaker.h */,
27FBBD6B1EEA73E40060C325 /* MASConstraintMaker.m */,
27FBBD6C1EEA73E40060C325 /* MASLayoutConstraint.h */,
27FBBD6D1EEA73E40060C325 /* MASLayoutConstraint.m */,
27FBBD6E1EEA73E40060C325 /* Masonry.h */,
27FBBD6F1EEA73E40060C325 /* MASUtilities.h */,
27FBBD701EEA73E40060C325 /* MASViewAttribute.h */,
27FBBD711EEA73E40060C325 /* MASViewAttribute.m */,
27FBBD721EEA73E40060C325 /* MASViewConstraint.h */,
27FBBD731EEA73E40060C325 /* MASViewConstraint.m */,
27FBBD741EEA73E40060C325 /* NSArray+MASAdditions.h */,
27FBBD751EEA73E40060C325 /* NSArray+MASAdditions.m */,
27FBBD761EEA73E40060C325 /* NSArray+MASShorthandAdditions.h */,
27FBBD771EEA73E40060C325 /* NSLayoutConstraint+MASDebugAdditions.h */,
27FBBD781EEA73E40060C325 /* NSLayoutConstraint+MASDebugAdditions.m */,
27FBBD791EEA73E40060C325 /* View+MASAdditions.h */,
27FBBD7A1EEA73E40060C325 /* View+MASAdditions.m */,
27FBBD7B1EEA73E40060C325 /* View+MASShorthandAdditions.h */,
27FBBD7C1EEA73E40060C325 /* ViewController+MASAdditions.h */,
27FBBD7D1EEA73E40060C325 /* ViewController+MASAdditions.m */,
);
name = Masonry;
path = Masonry/Masonry;
sourceTree = "<group>";
};
/* End PBXGroup section */
/* Begin PBXNativeTarget section */
27FBBD0A1EE7A5D40060C325 /* tabelVew嵌套CollectionView */ = {
isa = PBXNativeTarget;
buildConfigurationList = 27FBBD381EE7A5D40060C325 /* Build configuration list for PBXNativeTarget "tabelVew嵌套CollectionView" */;
buildPhases = (
27FBBD071EE7A5D40060C325 /* Sources */,
27FBBD081EE7A5D40060C325 /* Frameworks */,
27FBBD091EE7A5D40060C325 /* Resources */,
);
buildRules = (
);
dependencies = (
);
name = "tabelVew嵌套CollectionView";
productName = "tabelVew嵌套CollectionView";
productReference = 27FBBD0B1EE7A5D40060C325 /* tabelVew嵌套CollectionView.app */;
productType = "com.apple.product-type.application";
};
27FBBD231EE7A5D40060C325 /* tabelVew嵌套CollectionViewTests */ = {
isa = PBXNativeTarget;
buildConfigurationList = 27FBBD3B1EE7A5D40060C325 /* Build configuration list for PBXNativeTarget "tabelVew嵌套CollectionViewTests" */;
buildPhases = (
27FBBD201EE7A5D40060C325 /* Sources */,
27FBBD211EE7A5D40060C325 /* Frameworks */,
27FBBD221EE7A5D40060C325 /* Resources */,
);
buildRules = (
);
dependencies = (
27FBBD261EE7A5D40060C325 /* PBXTargetDependency */,
);
name = "tabelVew嵌套CollectionViewTests";
productName = "tabelVew嵌套CollectionViewTests";
productReference = 27FBBD241EE7A5D40060C325 /* tabelVew嵌套CollectionViewTests.xctest */;
productType = "com.apple.product-type.bundle.unit-test";
};
27FBBD2E1EE7A5D40060C325 /* tabelVew嵌套CollectionViewUITests */ = {
isa = PBXNativeTarget;
buildConfigurationList = 27FBBD3E1EE7A5D40060C325 /* Build configuration list for PBXNativeTarget "tabelVew嵌套CollectionViewUITests" */;
buildPhases = (
27FBBD2B1EE7A5D40060C325 /* Sources */,
27FBBD2C1EE7A5D40060C325 /* Frameworks */,
27FBBD2D1EE7A5D40060C325 /* Resources */,
);
buildRules = (
);
dependencies = (
27FBBD311EE7A5D40060C325 /* PBXTargetDependency */,
);
name = "tabelVew嵌套CollectionViewUITests";
productName = "tabelVew嵌套CollectionViewUITests";
productReference = 27FBBD2F1EE7A5D40060C325 /* tabelVew嵌套CollectionViewUITests.xctest */;
productType = "com.apple.product-type.bundle.ui-testing";
};
/* End PBXNativeTarget section */
/* Begin PBXProject section */
27FBBD031EE7A5D40060C325 /* Project object */ = {
isa = PBXProject;
attributes = {
LastUpgradeCheck = 0830;
ORGANIZATIONNAME = Plan;
TargetAttributes = {
27FBBD0A1EE7A5D40060C325 = {
CreatedOnToolsVersion = 8.3.1;
DevelopmentTeam = HWQF636ATC;
ProvisioningStyle = Automatic;
};
27FBBD231EE7A5D40060C325 = {
CreatedOnToolsVersion = 8.3.1;
DevelopmentTeam = HWQF636ATC;
ProvisioningStyle = Automatic;
TestTargetID = 27FBBD0A1EE7A5D40060C325;
};
27FBBD2E1EE7A5D40060C325 = {
CreatedOnToolsVersion = 8.3.1;
DevelopmentTeam = HWQF636ATC;
ProvisioningStyle = Automatic;
TestTargetID = 27FBBD0A1EE7A5D40060C325;
};
};
};
buildConfigurationList = 27FBBD061EE7A5D40060C325 /* Build configuration list for PBXProject "tabelVew嵌套CollectionView" */;
compatibilityVersion = "Xcode 3.2";
developmentRegion = English;
hasScannedForEncodings = 0;
knownRegions = (
en,
Base,
);
mainGroup = 27FBBD021EE7A5D40060C325;
productRefGroup = 27FBBD0C1EE7A5D40060C325 /* Products */;
projectDirPath = "";
projectRoot = "";
targets = (
27FBBD0A1EE7A5D40060C325 /* tabelVew嵌套CollectionView */,
27FBBD231EE7A5D40060C325 /* tabelVew嵌套CollectionViewTests */,
27FBBD2E1EE7A5D40060C325 /* tabelVew嵌套CollectionViewUITests */,
);
};
/* End PBXProject section */
/* Begin PBXResourcesBuildPhase section */
27FBBD091EE7A5D40060C325 /* Resources */ = {
isa = PBXResourcesBuildPhase;
buildActionMask = 2147483647;
files = (
27FBBD8F1EEA77F70060C325 /* HomeTableViewCell.xib in Resources */,
27FBBD1E1EE7A5D40060C325 /* LaunchScreen.storyboard in Resources */,
27FDC7D21F0E381D0016D57C /* HomeCollectionCell.xib in Resources */,
27FBBD1B1EE7A5D40060C325 /* Assets.xcassets in Resources */,
27FBBD191EE7A5D40060C325 /* Main.storyboard in Resources */,
27FDC7D41F0E3BA40016D57C /* MyTask@2x.png in Resources */,
);
runOnlyForDeploymentPostprocessing = 0;
};
27FBBD221EE7A5D40060C325 /* Resources */ = {
isa = PBXResourcesBuildPhase;
buildActionMask = 2147483647;
files = (
);
runOnlyForDeploymentPostprocessing = 0;
};
27FBBD2D1EE7A5D40060C325 /* Resources */ = {
isa = PBXResourcesBuildPhase;
buildActionMask = 2147483647;
files = (
);
runOnlyForDeploymentPostprocessing = 0;
};
/* End PBXResourcesBuildPhase section */
/* Begin PBXSourcesBuildPhase section */
27FBBD071EE7A5D40060C325 /* Sources */ = {
isa = PBXSourcesBuildPhase;
buildActionMask = 2147483647;
files = (
27FDC7D11F0E381D0016D57C /* HomeCollectionCell.m in Sources */,
27FBBD851EEA73E40060C325 /* MASViewConstraint.m in Sources */,
27FBBD8E1EEA77F70060C325 /* HomeTableViewCell.m in Sources */,
27FBBD841EEA73E40060C325 /* MASViewAttribute.m in Sources */,
27FBBD9D1EEA79350060C325 /* HomeViewController.m in Sources */,
27FBBD611EEA71C00060C325 /* HomeTitle.m in Sources */,
27FBBD801EEA73E40060C325 /* MASCompositeConstraint.m in Sources */,
27FBBD821EEA73E40060C325 /* MASConstraintMaker.m in Sources */,
27FBBD881EEA73E40060C325 /* View+MASAdditions.m in Sources */,
27FBBD831EEA73E40060C325 /* MASLayoutConstraint.m in Sources */,
27FBBD891EEA73E40060C325 /* ViewController+MASAdditions.m in Sources */,
27FBBD811EEA73E40060C325 /* MASConstraint.m in Sources */,
27FBBD131EE7A5D40060C325 /* AppDelegate.m in Sources */,
27FBBD591EE7DF100060C325 /* ViewController1.m in Sources */,
27FBBD861EEA73E40060C325 /* NSArray+MASAdditions.m in Sources */,
27FBBD871EEA73E40060C325 /* NSLayoutConstraint+MASDebugAdditions.m in Sources */,
27FBBD101EE7A5D40060C325 /* main.m in Sources */,
);
runOnlyForDeploymentPostprocessing = 0;
};
27FBBD201EE7A5D40060C325 /* Sources */ = {
isa = PBXSourcesBuildPhase;
buildActionMask = 2147483647;
files = (
27FBBD291EE7A5D40060C325 /* tabelVew__CollectionViewTests.m in Sources */,
);
runOnlyForDeploymentPostprocessing = 0;
};
27FBBD2B1EE7A5D40060C325 /* Sources */ = {
isa = PBXSourcesBuildPhase;
buildActionMask = 2147483647;
files = (
27FBBD341EE7A5D40060C325 /* tabelVew__CollectionViewUITests.m in Sources */,
);
runOnlyForDeploymentPostprocessing = 0;
};
/* End PBXSourcesBuildPhase section */
/* Begin PBXTargetDependency section */
27FBBD261EE7A5D40060C325 /* PBXTargetDependency */ = {
isa = PBXTargetDependency;
target = 27FBBD0A1EE7A5D40060C325 /* tabelVew嵌套CollectionView */;
targetProxy = 27FBBD251EE7A5D40060C325 /* PBXContainerItemProxy */;
};
27FBBD311EE7A5D40060C325 /* PBXTargetDependency */ = {
isa = PBXTargetDependency;
target = 27FBBD0A1EE7A5D40060C325 /* tabelVew嵌套CollectionView */;
targetProxy = 27FBBD301EE7A5D40060C325 /* PBXContainerItemProxy */;
};
/* End PBXTargetDependency section */
/* Begin PBXVariantGroup section */
27FBBD171EE7A5D40060C325 /* Main.storyboard */ = {
isa = PBXVariantGroup;
children = (
27FBBD181EE7A5D40060C325 /* Base */,
);
name = Main.storyboard;
sourceTree = "<group>";
};
27FBBD1C1EE7A5D40060C325 /* LaunchScreen.storyboard */ = {
isa = PBXVariantGroup;
children = (
27FBBD1D1EE7A5D40060C325 /* Base */,
);
name = LaunchScreen.storyboard;
sourceTree = "<group>";
};
/* End PBXVariantGroup section */
/* Begin XCBuildConfiguration section */
27FBBD361EE7A5D40060C325 /* Debug */ = {
isa = XCBuildConfiguration;
buildSettings = {
ALWAYS_SEARCH_USER_PATHS = NO;
CLANG_ANALYZER_NONNULL = YES;
CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE;
CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x";
CLANG_CXX_LIBRARY = "libc++";
CLANG_ENABLE_MODULES = YES;
CLANG_ENABLE_OBJC_ARC = YES;
CLANG_WARN_BOOL_CONVERSION = YES;
CLANG_WARN_CONSTANT_CONVERSION = YES;
CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR;
CLANG_WARN_DOCUMENTATION_COMMENTS = YES;
CLANG_WARN_EMPTY_BODY = YES;
CLANG_WARN_ENUM_CONVERSION = YES;
CLANG_WARN_INFINITE_RECURSION = YES;
CLANG_WARN_INT_CONVERSION = YES;
CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR;
CLANG_WARN_SUSPICIOUS_MOVE = YES;
CLANG_WARN_UNREACHABLE_CODE = YES;
CLANG_WARN__DUPLICATE_METHOD_MATCH = YES;
"CODE_SIGN_IDENTITY[sdk=iphoneos*]" = "iPhone Developer";
COPY_PHASE_STRIP = NO;
DEBUG_INFORMATION_FORMAT = dwarf;
ENABLE_STRICT_OBJC_MSGSEND = YES;
ENABLE_TESTABILITY = YES;
GCC_C_LANGUAGE_STANDARD = gnu99;
GCC_DYNAMIC_NO_PIC = NO;
GCC_NO_COMMON_BLOCKS = YES;
GCC_OPTIMIZATION_LEVEL = 0;
GCC_PREPROCESSOR_DEFINITIONS = (
"DEBUG=1",
"$(inherited)",
);
GCC_WARN_64_TO_32_BIT_CONVERSION = YES;
GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR;
GCC_WARN_UNDECLARED_SELECTOR = YES;
GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE;
GCC_WARN_UNUSED_FUNCTION = YES;
GCC_WARN_UNUSED_VARIABLE = YES;
IPHONEOS_DEPLOYMENT_TARGET = 10.3;
MTL_ENABLE_DEBUG_INFO = YES;
ONLY_ACTIVE_ARCH = YES;
SDKROOT = iphoneos;
TARGETED_DEVICE_FAMILY = "1,2";
};
name = Debug;
};
27FBBD371EE7A5D40060C325 /* Release */ = {
isa = XCBuildConfiguration;
buildSettings = {
ALWAYS_SEARCH_USER_PATHS = NO;
CLANG_ANALYZER_NONNULL = YES;
CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE;
CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x";
CLANG_CXX_LIBRARY = "libc++";
CLANG_ENABLE_MODULES = YES;
CLANG_ENABLE_OBJC_ARC = YES;
CLANG_WARN_BOOL_CONVERSION = YES;
CLANG_WARN_CONSTANT_CONVERSION = YES;
CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR;
CLANG_WARN_DOCUMENTATION_COMMENTS = YES;
CLANG_WARN_EMPTY_BODY = YES;
CLANG_WARN_ENUM_CONVERSION = YES;
CLANG_WARN_INFINITE_RECURSION = YES;
CLANG_WARN_INT_CONVERSION = YES;
CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR;
CLANG_WARN_SUSPICIOUS_MOVE = YES;
CLANG_WARN_UNREACHABLE_CODE = YES;
CLANG_WARN__DUPLICATE_METHOD_MATCH = YES;
"CODE_SIGN_IDENTITY[sdk=iphoneos*]" = "iPhone Developer";
COPY_PHASE_STRIP = NO;
DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym";
ENABLE_NS_ASSERTIONS = NO;
ENABLE_STRICT_OBJC_MSGSEND = YES;
GCC_C_LANGUAGE_STANDARD = gnu99;
GCC_NO_COMMON_BLOCKS = YES;
GCC_WARN_64_TO_32_BIT_CONVERSION = YES;
GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR;
GCC_WARN_UNDECLARED_SELECTOR = YES;
GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE;
GCC_WARN_UNUSED_FUNCTION = YES;
GCC_WARN_UNUSED_VARIABLE = YES;
IPHONEOS_DEPLOYMENT_TARGET = 10.3;
MTL_ENABLE_DEBUG_INFO = NO;
SDKROOT = iphoneos;
TARGETED_DEVICE_FAMILY = "1,2";
VALIDATE_PRODUCT = YES;
};
name = Release;
};
27FBBD391EE7A5D40060C325 /* Debug */ = {
isa = XCBuildConfiguration;
buildSettings = {
ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon;
DEVELOPMENT_TEAM = HWQF636ATC;
INFOPLIST_FILE = "tabelVew嵌套CollectionView/Info.plist";
IPHONEOS_DEPLOYMENT_TARGET = 8.0;
LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks";
PRODUCT_BUNDLE_IDENTIFIER = "com.Plan.tabelVew--CollectionView";
PRODUCT_NAME = "$(TARGET_NAME)";
};
name = Debug;
};
27FBBD3A1EE7A5D40060C325 /* Release */ = {
isa = XCBuildConfiguration;
buildSettings = {
ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon;
DEVELOPMENT_TEAM = HWQF636ATC;
INFOPLIST_FILE = "tabelVew嵌套CollectionView/Info.plist";
IPHONEOS_DEPLOYMENT_TARGET = 8.0;
LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks";
PRODUCT_BUNDLE_IDENTIFIER = "com.Plan.tabelVew--CollectionView";
PRODUCT_NAME = "$(TARGET_NAME)";
};
name = Release;
};
27FBBD3C1EE7A5D40060C325 /* Debug */ = {
isa = XCBuildConfiguration;
buildSettings = {
BUNDLE_LOADER = "$(TEST_HOST)";
DEVELOPMENT_TEAM = HWQF636ATC;
INFOPLIST_FILE = "tabelVew嵌套CollectionViewTests/Info.plist";
LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks @loader_path/Frameworks";
PRODUCT_BUNDLE_IDENTIFIER = "com.Plan.tabelVew--CollectionViewTests";
PRODUCT_NAME = "$(TARGET_NAME)";
TEST_HOST = "$(BUILT_PRODUCTS_DIR)/tabelVew嵌套CollectionView.app/tabelVew嵌套CollectionView";
};
name = Debug;
};
27FBBD3D1EE7A5D40060C325 /* Release */ = {
isa = XCBuildConfiguration;
buildSettings = {
BUNDLE_LOADER = "$(TEST_HOST)";
DEVELOPMENT_TEAM = HWQF636ATC;
INFOPLIST_FILE = "tabelVew嵌套CollectionViewTests/Info.plist";
LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks @loader_path/Frameworks";
PRODUCT_BUNDLE_IDENTIFIER = "com.Plan.tabelVew--CollectionViewTests";
PRODUCT_NAME = "$(TARGET_NAME)";
TEST_HOST = "$(BUILT_PRODUCTS_DIR)/tabelVew嵌套CollectionView.app/tabelVew嵌套CollectionView";
};
name = Release;
};
27FBBD3F1EE7A5D40060C325 /* Debug */ = {
isa = XCBuildConfiguration;
buildSettings = {
DEVELOPMENT_TEAM = HWQF636ATC;
INFOPLIST_FILE = "tabelVew嵌套CollectionViewUITests/Info.plist";
LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks @loader_path/Frameworks";
PRODUCT_BUNDLE_IDENTIFIER = "com.Plan.tabelVew--CollectionViewUITests";
PRODUCT_NAME = "$(TARGET_NAME)";
TEST_TARGET_NAME = "tabelVew嵌套CollectionView";
};
name = Debug;
};
27FBBD401EE7A5D40060C325 /* Release */ = {
isa = XCBuildConfiguration;
buildSettings = {
DEVELOPMENT_TEAM = HWQF636ATC;
INFOPLIST_FILE = "tabelVew嵌套CollectionViewUITests/Info.plist";
LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks @loader_path/Frameworks";
PRODUCT_BUNDLE_IDENTIFIER = "com.Plan.tabelVew--CollectionViewUITests";
PRODUCT_NAME = "$(TARGET_NAME)";
TEST_TARGET_NAME = "tabelVew嵌套CollectionView";
};
name = Release;
};
/* End XCBuildConfiguration section */
/* Begin XCConfigurationList section */
27FBBD061EE7A5D40060C325 /* Build configuration list for PBXProject "tabelVew嵌套CollectionView" */ = {
isa = XCConfigurationList;
buildConfigurations = (
27FBBD361EE7A5D40060C325 /* Debug */,
27FBBD371EE7A5D40060C325 /* Release */,
);
defaultConfigurationIsVisible = 0;
defaultConfigurationName = Release;
};
27FBBD381EE7A5D40060C325 /* Build configuration list for PBXNativeTarget "tabelVew嵌套CollectionView" */ = {
isa = XCConfigurationList;
buildConfigurations = (
27FBBD391EE7A5D40060C325 /* Debug */,
27FBBD3A1EE7A5D40060C325 /* Release */,
);
defaultConfigurationIsVisible = 0;
defaultConfigurationName = Release;
};
27FBBD3B1EE7A5D40060C325 /* Build configuration list for PBXNativeTarget "tabelVew嵌套CollectionViewTests" */ = {
isa = XCConfigurationList;
buildConfigurations = (
27FBBD3C1EE7A5D40060C325 /* Debug */,
27FBBD3D1EE7A5D40060C325 /* Release */,
);
defaultConfigurationIsVisible = 0;
defaultConfigurationName = Release;
};
27FBBD3E1EE7A5D40060C325 /* Build configuration list for PBXNativeTarget "tabelVew嵌套CollectionViewUITests" */ = {
isa = XCConfigurationList;
buildConfigurations = (
27FBBD3F1EE7A5D40060C325 /* Debug */,
27FBBD401EE7A5D40060C325 /* Release */,
);
defaultConfigurationIsVisible = 0;
defaultConfigurationName = Release;
};
/* End XCConfigurationList section */
};
rootObject = 27FBBD031EE7A5D40060C325 /* Project object */;
}
|
27182812/ChatGLM-LLaMA-chinese-insturct | 13,707 | src/transformers/models/pegasus/tokenization_pegasus.py | # coding=utf-8
# Copyright 2020 Google and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SPIECE_UNDERLINE = "▁"
VOCAB_FILES_NAMES = {"vocab_file": "spiece.model"}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"}
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"google/pegasus-xsum": 512,
}
logger = logging.get_logger(__name__)
class PegasusTokenizer(PreTrainedTokenizer):
r"""
Construct a PEGASUS tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece).
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
[SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
contains the vocabulary necessary to instantiate a tokenizer.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
mask_token (`str`, *optional*, defaults to `"<mask_2>"`):
The token used for masking single token values. This is the token used when training this model with masked
language modeling (MLM). This is the token that the PEGASUS encoder will try to predict during pretraining.
It corresponds to *[MASK2]* in [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive
Summarization](https://arxiv.org/pdf/1912.08777.pdf).
mask_token_sent (`str`, *optional*, defaults to `"<mask_1>"`):
The token used for masking whole target sentences. This is the token used when training this model with gap
sentences generation (GSG). This is the sentence that the PEGASUS decoder will try to predict during
pretraining. It corresponds to *[MASK1]* in [PEGASUS: Pre-training with Extracted Gap-sentences for
Abstractive Summarization](https://arxiv.org/pdf/1912.08777.pdf).
additional_special_tokens (`List[str]`, *optional*):
Additional special tokens used by the tokenizer. If no additional_special_tokens are provided <mask_2> and
<unk_2, ..., unk_102> are used as additional special tokens corresponding to the [original PEGASUS
tokenizer](https://github.com/google-research/pegasus/blob/939830367bcf411193d2b5eca2f2f90f3f9260ca/pegasus/ops/pretrain_parsing_ops.cc#L66)
that uses the tokens 2 - 104 only for pretraining
sp_model_kwargs (`dict`, *optional*):
Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
to set:
- `enable_sampling`: Enable subword regularization.
- `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
- `nbest_size = {0,1}`: No sampling is performed.
- `nbest_size > 1`: samples from the nbest_size results.
- `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
using forward-filtering-and-backward-sampling algorithm.
- `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
BPE-dropout.
"""
vocab_files_names = VOCAB_FILES_NAMES
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ["input_ids", "attention_mask"]
def __init__(
self,
vocab_file,
pad_token="<pad>",
eos_token="</s>",
unk_token="<unk>",
mask_token="<mask_2>",
mask_token_sent="<mask_1>",
additional_special_tokens=None,
offset=103, # entries 2 - 104 are only used for pretraining
sp_model_kwargs: Optional[Dict[str, Any]] = None,
**kwargs,
) -> None:
self.offset = offset
if additional_special_tokens is not None:
if not isinstance(additional_special_tokens, list):
raise TypeError(
f"additional_special_tokens should be of type {type(list)}, but is"
f" {type(additional_special_tokens)}"
)
additional_special_tokens_extended = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"<unk_{i}>" for i in range(len(additional_special_tokens_extended), self.offset - 1)
]
if len(set(additional_special_tokens_extended)) != len(additional_special_tokens_extended):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
f" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}."
)
additional_special_tokens = additional_special_tokens_extended
else:
additional_special_tokens = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"<unk_{i}>" for i in range(2, self.offset)]
self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=eos_token,
unk_token=unk_token,
mask_token=mask_token,
pad_token=pad_token,
mask_token_sent=mask_token_sent,
offset=offset,
additional_special_tokens=additional_special_tokens,
sp_model_kwargs=self.sp_model_kwargs,
**kwargs,
)
self.mask_token_sent = mask_token_sent
self.vocab_file = vocab_file
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(vocab_file)
# add special tokens to encoder dict
self.encoder: Dict[int, str] = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
}
)
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1, self.offset - 1)})
self.decoder: Dict[str, int] = {v: k for k, v in self.encoder.items()}
@property
def vocab_size(self) -> int:
return len(self.sp_model) + self.offset
def get_vocab(self) -> Dict[str, int]:
vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__(self):
state = self.__dict__.copy()
state["sp_model"] = None
return state
def __setstate__(self, d):
self.__dict__ = d
# for backward compatibility
if not hasattr(self, "sp_model_kwargs"):
self.sp_model_kwargs = {}
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def _tokenize(self, text: str) -> List[str]:
"""Take as input a string and return a list of strings (tokens) for words/sub-words"""
return self.sp_model.encode(text, out_type=str)
def _convert_token_to_id(self, token: str) -> int:
"""Converts a token (str) to an id using the vocab."""
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
sp_id = self.sp_model.piece_to_id(token)
return sp_id + self.offset
def _convert_id_to_token(self, index: int) -> str:
"""Converts an index (integer) to a token (str) using the vocab."""
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
token = self.sp_model.IdToPiece(index - self.offset)
return token
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (string) in a single string."""
current_sub_tokens = []
out_string = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(current_sub_tokens) + token
current_sub_tokens = []
else:
current_sub_tokens.append(token)
out_string += self.sp_model.decode(current_sub_tokens)
return out_string.strip()
def num_special_tokens_to_add(self, pair=False):
"""Just EOS"""
return 1
def _special_token_mask(self, seq):
all_special_ids = set(self.all_special_ids) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def get_special_tokens_mask(
self, token_ids_0: List, token_ids_1: Optional[List] = None, already_has_special_tokens: bool = False
) -> List[int]:
"""Get list where entries are [1] if a token is [eos] or [pad] else 0."""
if already_has_special_tokens:
return self._special_token_mask(token_ids_0)
elif token_ids_1 is None:
return self._special_token_mask(token_ids_0) + [1]
else:
return self._special_token_mask(token_ids_0 + token_ids_1) + [1]
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None) -> List[int]:
"""
Build model inputs from a sequence or a pair of sequences for sequence classification tasks by concatenating
and adding special tokens. A PEGASUS sequence has the following format, where `X` represents the sequence:
- single sequence: `X </s>`
- pair of sequences: `A B </s>` (not intended use)
BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a
separator.
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
if token_ids_1 is None:
return token_ids_0 + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_0 + token_ids_1 + [self.eos_token_id]
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
if not os.path.isdir(save_directory):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
out_vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file, out_vocab_file)
elif not os.path.isfile(self.vocab_file):
with open(out_vocab_file, "wb") as fi:
content_spiece_model = self.sp_model.serialized_model_proto()
fi.write(content_spiece_model)
return (out_vocab_file,)
|
2877025939/tabelVew-CollectionView | 2,201 | tabelVew嵌套CollectionView/tabelVew嵌套CollectionView/HomeTitle.m | //
// HomeTitle.m
// tabelVew嵌套CollectionView
//
// Created by anan on 2017/6/9.
// Copyright © 2017年 Plan. All rights reserved.
//
#import "HomeTitle.h"
#import "Masonry.h"
@implementation HomeTitle
-(instancetype)initWithFrame:(CGRect)frame{
self = [super initWithFrame:frame];
if (self) {
[self createUI];
}
return self;
}
-(void)createUI{
[self addSubview:self.titleLable];
[self addSubview:self.line];
[self addSubview:self.mine];
[self.mine mas_makeConstraints:^(MASConstraintMaker *make) {
make.left.equalTo(self).offset(10);
make.bottom.equalTo(self).offset(0);
make.size.mas_equalTo(CGSizeMake(44, 44));
}];
[self.titleLable mas_makeConstraints:^(MASConstraintMaker *make) {
make.bottom.equalTo(self).offset(0);
make.centerX.equalTo(self);
make.size.mas_equalTo(CGSizeMake(100, 44));
}];
[self.line mas_makeConstraints:^(MASConstraintMaker *make) {
make.top.equalTo(self.titleLable.mas_bottom).offset(-0.3);
make.left.right.bottom.equalTo(self);
}];
}
//按钮事件
-(void)test:(UIButton *)sender{
NSLog(@"button");
//实现代码块
if (self.btnClick) {
self.btnClick();
}
}
-(UILabel *)titleLable{
if (!_titleLable) {
_titleLable = [[UILabel alloc]init];
_titleLable.backgroundColor = [UIColor clearColor];
_titleLable.textColor = [UIColor whiteColor];
_titleLable.textAlignment = NSTextAlignmentCenter;
_titleLable.font = [UIFont fontWithName:@"Helvetica-Bold" size:20];
_titleLable.text =@"plan";
}
return _titleLable;
}
-(UILabel *)line{
if (!_line) {
_line = [[UILabel alloc]init];
_line.backgroundColor = [UIColor clearColor];
}
return _line;
}
-(UIButton *)mine{
if (!_mine) {
_mine = [[UIButton alloc]init];
[_mine setImage:[UIImage imageNamed:@"111"] forState:UIControlStateNormal];
[_mine addTarget:self action:@selector(test:) forControlEvents:UIControlEventTouchUpInside];
}
return _mine;
}
@end
|
2877025939/tabelVew-CollectionView | 2,301 | tabelVew嵌套CollectionView/tabelVew嵌套CollectionView/AppDelegate.m | //
// AppDelegate.m
// tabelVew嵌套CollectionView
//
// Created by anan on 2017/6/7.
// Copyright © 2017年 Plan. All rights reserved.
//
#import "AppDelegate.h"
#import "HomeViewController.h"
@interface AppDelegate ()
@end
@implementation AppDelegate
- (BOOL)application:(UIApplication *)application didFinishLaunchingWithOptions:(NSDictionary *)launchOptions {
// Override point for customization after application launch.
HomeViewController *VC = [[HomeViewController alloc]init];
UINavigationController *nav = [[UINavigationController alloc]initWithRootViewController:VC];
self.window.rootViewController = nav;
return YES;
}
- (void)applicationWillResignActive:(UIApplication *)application {
// Sent when the application is about to move from active to inactive state. This can occur for certain types of temporary interruptions (such as an incoming phone call or SMS message) or when the user quits the application and it begins the transition to the background state.
// Use this method to pause ongoing tasks, disable timers, and invalidate graphics rendering callbacks. Games should use this method to pause the game.
}
- (void)applicationDidEnterBackground:(UIApplication *)application {
// Use this method to release shared resources, save user data, invalidate timers, and store enough application state information to restore your application to its current state in case it is terminated later.
// If your application supports background execution, this method is called instead of applicationWillTerminate: when the user quits.
}
- (void)applicationWillEnterForeground:(UIApplication *)application {
// Called as part of the transition from the background to the active state; here you can undo many of the changes made on entering the background.
}
- (void)applicationDidBecomeActive:(UIApplication *)application {
// Restart any tasks that were paused (or not yet started) while the application was inactive. If the application was previously in the background, optionally refresh the user interface.
}
- (void)applicationWillTerminate:(UIApplication *)application {
// Called when the application is about to terminate. Save data if appropriate. See also applicationDidEnterBackground:.
}
@end
|
27182812/ChatGLM-LLaMA-chinese-insturct | 66,013 | src/transformers/models/pegasus/modeling_flax_pegasus.py | # coding=utf-8
# Copyright 2021, Google and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Flax PEGASUS model."""
import math
import random
from functools import partial
from typing import Callable, Optional, Tuple
import flax.linen as nn
import jax
import jax.numpy as jnp
import numpy as np
from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
from flax.linen import combine_masks, make_causal_mask
from flax.linen.attention import dot_product_attention_weights
from flax.traverse_util import flatten_dict, unflatten_dict
from jax import lax
from jax.random import PRNGKey
from ...modeling_flax_outputs import (
FlaxBaseModelOutput,
FlaxBaseModelOutputWithPastAndCrossAttentions,
FlaxCausalLMOutputWithCrossAttentions,
FlaxSeq2SeqLMOutput,
FlaxSeq2SeqModelOutput,
)
from ...modeling_flax_utils import (
ACT2FN,
FlaxPreTrainedModel,
add_start_docstrings_to_model_forward,
append_call_sample_docstring,
append_replace_return_docstrings,
overwrite_call_docstring,
)
from ...utils import add_start_docstrings, logging, replace_return_docstrings
from .configuration_pegasus import PegasusConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "google/pegasus-large"
_CONFIG_FOR_DOC = "PegasusConfig"
PEGASUS_START_DOCSTRING = r"""
This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a Flax Linen
[flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a
regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.
Finally, this model supports inherent JAX features such as:
- [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
- [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
- [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
- [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
Parameters:
config ([`PegasusConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.
dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and
`jax.numpy.bfloat16` (on TPUs).
This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
specified all the computation will be performed with the given `dtype`.
**Note that this only specifies the dtype of the computation and does not influence the dtype of model
parameters.**
If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and
[`~FlaxPreTrainedModel.to_bf16`].
"""
PEGASUS_INPUTS_DOCSTRING = r"""
Args:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
If you want to change padding behavior, you should modify to your needs. See diagram 1 in [the
paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy.
position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
decoder_position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the
range `[0, config.max_position_embeddings - 1]`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
PEGASUS_ENCODE_INPUTS_DOCSTRING = r"""
Args:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
PEGASUS_DECODE_INPUTS_DOCSTRING = r"""
Args:
decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
encoder_outputs (`tuple(tuple(jnp.ndarray)`):
Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
`last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
encoder_attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
If you want to change padding behavior, you should modify to your needs. See diagram 1 in [the
paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy.
decoder_position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the
range `[0, config.max_position_embeddings - 1]`.
past_key_values (`Dict[str, np.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`):
Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast
auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
# Copied from transformers.models.bart.modeling_flax_bart.shift_tokens_right
def shift_tokens_right(input_ids: np.array, pad_token_id: int, decoder_start_token_id: int) -> np.ndarray:
"""
Shift input ids one token to the right.
"""
shifted_input_ids = np.zeros_like(input_ids)
shifted_input_ids[:, 1:] = input_ids[:, :-1]
shifted_input_ids[:, 0] = decoder_start_token_id
shifted_input_ids = np.where(shifted_input_ids == -100, pad_token_id, shifted_input_ids)
return shifted_input_ids
# Copied from transformers.models.marian.modeling_flax_marian.create_sinusoidal_positions
def create_sinusoidal_positions(n_pos, dim, dtype):
position_enc = np.array([[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)])
sentinel = dim // 2 + dim % 2
out = np.zeros_like(position_enc)
out[:, 0:sentinel] = np.sin(position_enc[:, 0::2])
out[:, sentinel:] = np.cos(position_enc[:, 1::2])
return jnp.array(out)
# Copied from transformers.models.bart.modeling_flax_bart.FlaxBartAttention with Bart->Pegasus
class FlaxPegasusAttention(nn.Module):
config: PegasusConfig
embed_dim: int
num_heads: int
dropout: float = 0.0
causal: bool = False
bias: bool = True
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
def setup(self) -> None:
self.head_dim = self.embed_dim // self.num_heads
if self.head_dim * self.num_heads != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
f" and `num_heads`: {self.num_heads})."
)
dense = partial(
nn.Dense,
self.embed_dim,
use_bias=self.bias,
dtype=self.dtype,
kernel_init=jax.nn.initializers.normal(self.config.init_std),
)
self.q_proj, self.k_proj, self.v_proj = dense(), dense(), dense()
self.out_proj = dense()
self.dropout_layer = nn.Dropout(rate=self.dropout)
if self.causal:
self.causal_mask = make_causal_mask(
jnp.ones((1, self.config.max_position_embeddings), dtype="bool"), dtype="bool"
)
def _split_heads(self, hidden_states):
return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads, self.head_dim))
def _merge_heads(self, hidden_states):
return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,))
@nn.compact
def _concatenate_to_cache(self, key, value, query, attention_mask):
"""
This function takes projected key, value states from a single input token and concatenates the states to cached
states from previous steps. This function is slighly adapted from the official Flax repository:
https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252
"""
# detect if we're initializing by absence of existing cache data.
is_initialized = self.has_variable("cache", "cached_key")
cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype)
cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype)
cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32))
if is_initialized:
*batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape
# update key, value caches with our new 1d spatial slices
cur_index = cache_index.value
indices = (0,) * len(batch_dims) + (cur_index, 0, 0)
key = lax.dynamic_update_slice(cached_key.value, key, indices)
value = lax.dynamic_update_slice(cached_value.value, value, indices)
cached_key.value = key
cached_value.value = value
num_updated_cache_vectors = query.shape[1]
cache_index.value = cache_index.value + num_updated_cache_vectors
# causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements.
pad_mask = jnp.broadcast_to(
jnp.arange(max_length) < cur_index + num_updated_cache_vectors,
tuple(batch_dims) + (1, num_updated_cache_vectors, max_length),
)
attention_mask = combine_masks(pad_mask, attention_mask)
return key, value, attention_mask
def __call__(
self,
hidden_states: jnp.ndarray,
key_value_states: Optional[jnp.ndarray] = None,
attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
batch_size = hidden_states.shape[0]
# get query proj
query_states = self.q_proj(hidden_states)
# get key, value proj
if is_cross_attention:
# cross_attentions
key_states = self.k_proj(key_value_states)
value_states = self.v_proj(key_value_states)
else:
# self_attention
key_states = self.k_proj(hidden_states)
value_states = self.v_proj(hidden_states)
query_states = self._split_heads(query_states)
key_states = self._split_heads(key_states)
value_states = self._split_heads(value_states)
# handle cache prepare causal attention mask
if self.causal:
query_length, key_length = query_states.shape[1], key_states.shape[1]
if self.has_variable("cache", "cached_key"):
mask_shift = self.variables["cache"]["cache_index"]
max_decoder_length = self.variables["cache"]["cached_key"].shape[1]
causal_mask = lax.dynamic_slice(
self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length)
)
else:
causal_mask = self.causal_mask[:, :, :query_length, :key_length]
causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:])
# combine masks if needed
if attention_mask is not None and self.causal:
attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape)
attention_mask = combine_masks(attention_mask, causal_mask)
elif self.causal:
attention_mask = causal_mask
elif attention_mask is not None:
attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2))
# During fast autoregressive decoding, we feed one position at a time,
# and cache the keys and values step by step.
if self.causal and (self.has_variable("cache", "cached_key") or init_cache):
key_states, value_states, attention_mask = self._concatenate_to_cache(
key_states, value_states, query_states, attention_mask
)
# Convert the boolean attention mask to an attention bias.
if attention_mask is not None:
# attention mask in the form of attention bias
attention_bias = lax.select(
attention_mask > 0,
jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype),
)
else:
attention_bias = None
dropout_rng = None
if not deterministic and self.dropout > 0.0:
dropout_rng = self.make_rng("dropout")
attn_weights = dot_product_attention_weights(
query_states,
key_states,
bias=attention_bias,
dropout_rng=dropout_rng,
dropout_rate=self.dropout,
broadcast_dropout=True,
deterministic=deterministic,
dtype=self.dtype,
precision=None,
)
attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states)
attn_output = self._merge_heads(attn_output)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights
# Copied from transformers.models.mbart.modeling_flax_mbart.FlaxMBartEncoderLayer with MBart->Pegasus
class FlaxPegasusEncoderLayer(nn.Module):
config: PegasusConfig
dtype: jnp.dtype = jnp.float32
def setup(self) -> None:
self.embed_dim = self.config.d_model
self.self_attn = FlaxPegasusAttention(
config=self.config,
embed_dim=self.embed_dim,
num_heads=self.config.encoder_attention_heads,
dropout=self.config.attention_dropout,
dtype=self.dtype,
)
self.self_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
self.dropout_layer = nn.Dropout(rate=self.config.dropout)
self.activation_fn = ACT2FN[self.config.activation_function]
self.activation_dropout_layer = nn.Dropout(rate=self.config.activation_dropout)
self.fc1 = nn.Dense(
self.config.encoder_ffn_dim,
dtype=self.dtype,
kernel_init=jax.nn.initializers.normal(self.config.init_std),
)
self.fc2 = nn.Dense(
self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std)
)
self.final_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
def __call__(
self,
hidden_states: jnp.ndarray,
attention_mask: jnp.ndarray,
output_attentions: bool = True,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask)
hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = self.activation_dropout_layer(hidden_states, deterministic=deterministic)
hidden_states = self.fc2(hidden_states)
hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
# Copied from transformers.models.bart.modeling_flax_bart.FlaxBartEncoderLayerCollection with Bart->Pegasus
class FlaxPegasusEncoderLayerCollection(nn.Module):
config: PegasusConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
def setup(self):
self.layers = [
FlaxPegasusEncoderLayer(self.config, name=str(i), dtype=self.dtype)
for i in range(self.config.encoder_layers)
]
self.layerdrop = self.config.encoder_layerdrop
def __call__(
self,
hidden_states,
attention_mask,
deterministic: bool = True,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
all_attentions = () if output_attentions else None
all_hidden_states = () if output_hidden_states else None
for encoder_layer in self.layers:
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = random.uniform(0, 1)
if not deterministic and (dropout_probability < self.layerdrop): # skip the layer
layer_outputs = (None, None)
else:
layer_outputs = encoder_layer(
hidden_states,
attention_mask,
output_attentions,
deterministic,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states += (hidden_states,)
outputs = (hidden_states, all_hidden_states, all_attentions)
if not return_dict:
return tuple(v for v in outputs if v is not None)
return FlaxBaseModelOutput(
last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
)
# Copied from transformers.models.mbart.modeling_flax_mbart.FlaxMBartDecoderLayer with MBart->Pegasus
class FlaxPegasusDecoderLayer(nn.Module):
config: PegasusConfig
dtype: jnp.dtype = jnp.float32
def setup(self) -> None:
self.embed_dim = self.config.d_model
self.self_attn = FlaxPegasusAttention(
config=self.config,
embed_dim=self.embed_dim,
num_heads=self.config.decoder_attention_heads,
dropout=self.config.attention_dropout,
causal=True,
dtype=self.dtype,
)
self.dropout_layer = nn.Dropout(rate=self.config.dropout)
self.activation_fn = ACT2FN[self.config.activation_function]
self.activation_dropout_layer = nn.Dropout(rate=self.config.activation_dropout)
self.self_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
self.encoder_attn = FlaxPegasusAttention(
config=self.config,
embed_dim=self.embed_dim,
num_heads=self.config.decoder_attention_heads,
dropout=self.config.attention_dropout,
dtype=self.dtype,
)
self.encoder_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
self.fc1 = nn.Dense(
self.config.decoder_ffn_dim,
dtype=self.dtype,
kernel_init=jax.nn.initializers.normal(self.config.init_std),
)
self.fc2 = nn.Dense(
self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std)
)
self.final_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
def __call__(
self,
hidden_states: jnp.ndarray,
attention_mask: jnp.ndarray,
encoder_hidden_states: Optional[jnp.ndarray] = None,
encoder_attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
output_attentions: bool = True,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
# Self Attention
hidden_states, self_attn_weights = self.self_attn(
hidden_states=hidden_states, attention_mask=attention_mask, init_cache=init_cache
)
hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
hidden_states = residual + hidden_states
# Cross-Attention Block
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
hidden_states = self.encoder_attn_layer_norm(hidden_states)
hidden_states, cross_attn_weights = self.encoder_attn(
hidden_states=hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
)
hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = self.activation_dropout_layer(hidden_states, deterministic=deterministic)
hidden_states = self.fc2(hidden_states)
hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights, cross_attn_weights)
return outputs
# Copied from transformers.models.bart.modeling_flax_bart.FlaxBartDecoderLayerCollection with Bart->Pegasus
class FlaxPegasusDecoderLayerCollection(nn.Module):
config: PegasusConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
def setup(self):
self.layers = [
FlaxPegasusDecoderLayer(self.config, name=str(i), dtype=self.dtype)
for i in range(self.config.decoder_layers)
]
self.layerdrop = self.config.decoder_layerdrop
def __call__(
self,
hidden_states,
attention_mask,
encoder_hidden_states: Optional[jnp.ndarray] = None,
encoder_attention_mask: Optional[jnp.ndarray] = None,
deterministic: bool = True,
init_cache: bool = False,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
for decoder_layer in self.layers:
if output_hidden_states:
all_hidden_states += (hidden_states,)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = random.uniform(0, 1)
if not deterministic and (dropout_probability < self.layerdrop):
layer_outputs = (None, None, None)
else:
layer_outputs = decoder_layer(
hidden_states,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
init_cache=init_cache,
output_attentions=output_attentions,
deterministic=deterministic,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attns += (layer_outputs[1],)
if encoder_hidden_states is not None:
all_cross_attentions += (layer_outputs[2],)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
outputs = [hidden_states, all_hidden_states, all_self_attns, all_cross_attentions]
if not return_dict:
return tuple(v for v in outputs if v is not None)
return FlaxBaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attns,
cross_attentions=all_cross_attentions,
)
class FlaxPegasusEncoder(nn.Module):
config: PegasusConfig
embed_tokens: nn.Embed
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
def setup(self):
self.dropout_layer = nn.Dropout(rate=self.config.dropout)
embed_dim = self.config.d_model
self.padding_idx = self.config.pad_token_id
self.max_source_positions = self.config.max_position_embeddings
self.embed_scale = math.sqrt(embed_dim) if self.config.scale_embedding else 1.0
self.embed_positions = create_sinusoidal_positions(
self.config.max_position_embeddings, embed_dim, dtype=self.dtype
)
self.layers = FlaxPegasusEncoderLayerCollection(self.config, self.dtype)
self.layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
def __call__(
self,
input_ids,
attention_mask,
position_ids,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
deterministic: bool = True,
):
input_shape = input_ids.shape
input_ids = input_ids.reshape(-1, input_shape[-1])
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
# embed positions
embed_pos = jnp.take(self.embed_positions, position_ids, axis=0)
# explictly cast the positions here, since self.embed_positions are not registered as parameters
embed_pos = embed_pos.astype(inputs_embeds.dtype)
hidden_states = inputs_embeds + embed_pos
hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
outputs = self.layers(
hidden_states,
attention_mask,
deterministic=deterministic,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
last_hidden_state = outputs[0]
last_hidden_state = self.layer_norm(last_hidden_state)
# update the last element in `hidden_states` after applying `layernorm` above
hidden_states = None
if output_hidden_states:
hidden_states = outputs[1]
hidden_states = hidden_states[:-1] + (last_hidden_state,)
if not return_dict:
outputs = (last_hidden_state, hidden_states) + (outputs[2:] if output_hidden_states else outputs[1:])
return tuple(v for v in outputs if v is not None)
return FlaxBaseModelOutput(
last_hidden_state=last_hidden_state,
hidden_states=hidden_states,
attentions=outputs.attentions,
)
class FlaxPegasusDecoder(nn.Module):
config: PegasusConfig
embed_tokens: nn.Embed
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
def setup(self):
self.dropout_layer = nn.Dropout(rate=self.config.dropout)
embed_dim = self.config.d_model
self.padding_idx = self.config.pad_token_id
self.max_target_positions = self.config.max_position_embeddings
self.embed_scale = math.sqrt(self.config.d_model) if self.config.scale_embedding else 1.0
self.embed_positions = create_sinusoidal_positions(
self.config.max_position_embeddings, embed_dim, dtype=self.dtype
)
self.layers = FlaxPegasusDecoderLayerCollection(self.config, self.dtype)
self.layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
def __call__(
self,
input_ids,
attention_mask,
position_ids,
encoder_hidden_states: Optional[jnp.ndarray] = None,
encoder_attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
deterministic: bool = True,
):
input_shape = input_ids.shape
input_ids = input_ids.reshape(-1, input_shape[-1])
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
# embed positions
positions = jnp.take(self.embed_positions, position_ids, axis=0)
# explictly cast the positions here, since self.embed_positions are not registered as parameters
positions = positions.astype(inputs_embeds.dtype)
hidden_states = inputs_embeds + positions
hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
outputs = self.layers(
hidden_states,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
deterministic=deterministic,
init_cache=init_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
last_hidden_state = outputs[0]
last_hidden_state = self.layer_norm(last_hidden_state)
# update the last element in `hidden_states` after applying `layernorm` above
hidden_states = None
if output_hidden_states:
hidden_states = outputs[1]
hidden_states = hidden_states[:-1] + (last_hidden_state,)
if not return_dict:
outputs = (last_hidden_state, hidden_states) + (outputs[2:] if output_hidden_states else outputs[1:])
return tuple(v for v in outputs if v is not None)
return FlaxBaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=last_hidden_state,
hidden_states=hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
# Copied from transformers.models.bart.modeling_flax_bart.FlaxBartModule with Bart->Pegasus
class FlaxPegasusModule(nn.Module):
config: PegasusConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
def setup(self):
self.shared = nn.Embed(
self.config.vocab_size,
self.config.d_model,
embedding_init=jax.nn.initializers.normal(self.config.init_std),
dtype=self.dtype,
)
self.encoder = FlaxPegasusEncoder(self.config, dtype=self.dtype, embed_tokens=self.shared)
self.decoder = FlaxPegasusDecoder(self.config, dtype=self.dtype, embed_tokens=self.shared)
def _get_encoder_module(self):
return self.encoder
def _get_decoder_module(self):
return self.decoder
def __call__(
self,
input_ids,
attention_mask,
decoder_input_ids,
decoder_attention_mask,
position_ids,
decoder_position_ids,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
deterministic: bool = True,
):
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
deterministic=deterministic,
)
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
position_ids=decoder_position_ids,
encoder_hidden_states=encoder_outputs[0],
encoder_attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
deterministic=deterministic,
)
if not return_dict:
return decoder_outputs + encoder_outputs
return FlaxSeq2SeqModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
class FlaxPegasusPreTrainedModel(FlaxPreTrainedModel):
config_class = PegasusConfig
base_model_prefix: str = "model"
module_class: nn.Module = None
def __init__(
self,
config: PegasusConfig,
input_shape: Tuple[int] = (1, 1),
seed: int = 0,
dtype: jnp.dtype = jnp.float32,
_do_init: bool = True,
**kwargs,
):
module = self.module_class(config=config, dtype=dtype, **kwargs)
super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
# init input tensors
input_ids = jnp.zeros(input_shape, dtype="i4")
attention_mask = jnp.ones_like(input_ids)
decoder_input_ids = input_ids
decoder_attention_mask = jnp.ones_like(input_ids)
batch_size, sequence_length = input_ids.shape
position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
decoder_position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
params_rng, dropout_rng = jax.random.split(rng)
rngs = {"params": params_rng, "dropout": dropout_rng}
random_params = self.module.init(
rngs,
input_ids,
attention_mask,
decoder_input_ids,
decoder_attention_mask,
position_ids,
decoder_position_ids,
)["params"]
if params is not None:
random_params = flatten_dict(unfreeze(random_params))
params = flatten_dict(unfreeze(params))
for missing_key in self._missing_keys:
params[missing_key] = random_params[missing_key]
self._missing_keys = set()
return freeze(unflatten_dict(params))
else:
return random_params
def init_cache(self, batch_size, max_length, encoder_outputs):
r"""
Args:
batch_size (`int`):
batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.
max_length (`int`):
maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized
cache.
encoder_outputs (`Union[FlaxBaseModelOutput, tuple(tuple(jnp.ndarray)]`):
`encoder_outputs` consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*:
`attentions`). `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*)
is a sequence of hidden-states at the output of the last layer of the encoder. Used in the
cross-attention of the decoder.
"""
# init input variables to retrieve cache
decoder_input_ids = jnp.ones((batch_size, max_length), dtype="i4")
decoder_attention_mask = jnp.ones_like(decoder_input_ids)
decoder_position_ids = jnp.broadcast_to(
jnp.arange(jnp.atleast_2d(decoder_input_ids).shape[-1]), decoder_input_ids.shape
)
def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):
decoder_module = module._get_decoder_module()
return decoder_module(
decoder_input_ids,
decoder_attention_mask,
decoder_position_ids,
**kwargs,
)
init_variables = self.module.init(
jax.random.PRNGKey(0),
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
decoder_position_ids=decoder_position_ids,
encoder_hidden_states=encoder_outputs[0],
init_cache=True,
method=_decoder_forward, # we only need to call the decoder to init the cache
)
return unfreeze(init_variables["cache"])
@add_start_docstrings(PEGASUS_ENCODE_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=FlaxBaseModelOutput, config_class=PegasusConfig)
def encode(
self,
input_ids: jnp.ndarray,
attention_mask: Optional[jnp.ndarray] = None,
position_ids: Optional[jnp.ndarray] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
train: bool = False,
params: dict = None,
dropout_rng: PRNGKey = None,
):
r"""
Returns:
Example:
```python
>>> from transformers import AutoTokenizer, FlaxPegasusForConditionalGeneration
>>> model = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-large")
>>> tokenizer = AutoTokenizer.from_pretrained("google/pegasus-large")
>>> text = "My friends are cool but they eat too many carbs."
>>> inputs = tokenizer(text, max_length=1024, return_tensors="np")
>>> encoder_outputs = model.encode(**inputs)
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.return_dict
if attention_mask is None:
attention_mask = jnp.ones_like(input_ids)
if position_ids is None:
batch_size, sequence_length = input_ids.shape
position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
# Handle any PRNG if needed
rngs = {}
if dropout_rng is not None:
rngs["dropout"] = dropout_rng
def _encoder_forward(module, input_ids, attention_mask, position_ids, **kwargs):
encode_module = module._get_encoder_module()
return encode_module(input_ids, attention_mask, position_ids, **kwargs)
return self.module.apply(
{"params": params or self.params},
input_ids=jnp.array(input_ids, dtype="i4"),
attention_mask=jnp.array(attention_mask, dtype="i4"),
position_ids=jnp.array(position_ids, dtype="i4"),
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
deterministic=not train,
rngs=rngs,
method=_encoder_forward,
)
@add_start_docstrings(PEGASUS_DECODE_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=FlaxBaseModelOutputWithPastAndCrossAttentions, config_class=PegasusConfig)
def decode(
self,
decoder_input_ids,
encoder_outputs,
encoder_attention_mask: Optional[jnp.ndarray] = None,
decoder_attention_mask: Optional[jnp.ndarray] = None,
decoder_position_ids: Optional[jnp.ndarray] = None,
past_key_values: dict = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
train: bool = False,
params: dict = None,
dropout_rng: PRNGKey = None,
):
r"""
Returns:
Example:
```python
>>> import jax.numpy as jnp
>>> from transformers import AutoTokenizer, FlaxPegasusForConditionalGeneration
>>> model = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-large")
>>> tokenizer = AutoTokenizer.from_pretrained("google/pegasus-large")
>>> text = "My friends are cool but they eat too many carbs."
>>> inputs = tokenizer(text, max_length=1024, return_tensors="np")
>>> encoder_outputs = model.encode(**inputs)
>>> decoder_start_token_id = model.config.decoder_start_token_id
>>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id
>>> outputs = model.decode(decoder_input_ids, encoder_outputs)
>>> last_decoder_hidden_states = outputs.last_hidden_state
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.return_dict
encoder_hidden_states = encoder_outputs[0]
if encoder_attention_mask is None:
batch_size, sequence_length = encoder_hidden_states.shape[:2]
encoder_attention_mask = jnp.ones((batch_size, sequence_length))
batch_size, sequence_length = decoder_input_ids.shape
if decoder_attention_mask is None:
decoder_attention_mask = jnp.ones((batch_size, sequence_length))
if decoder_position_ids is None:
if past_key_values is not None:
raise ValueError("Make sure to provide `decoder_position_ids` when passing `past_key_values`.")
decoder_position_ids = jnp.broadcast_to(
jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)
)
# Handle any PRNG if needed
rngs = {}
if dropout_rng is not None:
rngs["dropout"] = dropout_rng
inputs = {"params": params or self.params}
# if past_key_values are passed then cache is already initialized a private flag init_cache has to be
# passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that
# it can be changed by FlaxPegasusAttention module
if past_key_values:
inputs["cache"] = past_key_values
mutable = ["cache"]
else:
mutable = False
def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):
decoder_module = module._get_decoder_module()
return decoder_module(
decoder_input_ids,
decoder_attention_mask,
decoder_position_ids,
**kwargs,
)
outputs = self.module.apply(
inputs,
decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"),
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"),
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
deterministic=not train,
rngs=rngs,
mutable=mutable,
method=_decoder_forward,
)
# add updated cache to model output
if past_key_values is not None and return_dict:
outputs, past = outputs
outputs["past_key_values"] = unfreeze(past["cache"])
return outputs
elif past_key_values is not None and not return_dict:
outputs, past = outputs
outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:]
return outputs
@add_start_docstrings_to_model_forward(PEGASUS_INPUTS_DOCSTRING)
def __call__(
self,
input_ids: jnp.ndarray,
attention_mask: Optional[jnp.ndarray] = None,
decoder_input_ids: Optional[jnp.ndarray] = None,
decoder_attention_mask: Optional[jnp.ndarray] = None,
position_ids: Optional[jnp.ndarray] = None,
decoder_position_ids: Optional[jnp.ndarray] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
train: bool = False,
params: dict = None,
dropout_rng: PRNGKey = None,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.return_dict
# prepare encoder inputs
if attention_mask is None:
attention_mask = jnp.ones_like(input_ids)
if position_ids is None:
batch_size, sequence_length = input_ids.shape
position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
# prepare decoder inputs
if decoder_input_ids is None:
decoder_input_ids = shift_tokens_right(
input_ids, self.config.pad_token_id, decoder_start_token_id=self.config.decoder_start_token_id
)
if decoder_attention_mask is None:
decoder_attention_mask = jnp.ones_like(decoder_input_ids)
if decoder_position_ids is None:
batch_size, sequence_length = decoder_input_ids.shape
decoder_position_ids = jnp.broadcast_to(
jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)
)
# Handle any PRNG if needed
rngs = {"dropout": dropout_rng} if dropout_rng is not None else {}
return self.module.apply(
{"params": params or self.params},
input_ids=jnp.array(input_ids, dtype="i4"),
attention_mask=jnp.array(attention_mask, dtype="i4"),
position_ids=jnp.array(position_ids, dtype="i4"),
decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"),
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
deterministic=not train,
rngs=rngs,
)
@add_start_docstrings(
"The bare Pegasus Model transformer outputting raw hidden-states without any specific head on top.",
PEGASUS_START_DOCSTRING,
)
class FlaxPegasusModel(FlaxPegasusPreTrainedModel):
config: PegasusConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
module_class = FlaxPegasusModule
append_call_sample_docstring(FlaxPegasusModel, _CHECKPOINT_FOR_DOC, FlaxSeq2SeqModelOutput, _CONFIG_FOR_DOC)
# Copied from transformers.models.bart.modeling_flax_bart.FlaxBartForConditionalGenerationModule with Bart->Pegasus
class FlaxPegasusForConditionalGenerationModule(nn.Module):
config: PegasusConfig
dtype: jnp.dtype = jnp.float32
bias_init: Callable[..., jnp.ndarray] = jax.nn.initializers.zeros
def setup(self):
self.model = FlaxPegasusModule(config=self.config, dtype=self.dtype)
self.lm_head = nn.Dense(
self.model.shared.num_embeddings,
use_bias=False,
dtype=self.dtype,
kernel_init=jax.nn.initializers.normal(self.config.init_std),
)
self.final_logits_bias = self.param("final_logits_bias", self.bias_init, (1, self.model.shared.num_embeddings))
def _get_encoder_module(self):
return self.model.encoder
def _get_decoder_module(self):
return self.model.decoder
def __call__(
self,
input_ids,
attention_mask,
decoder_input_ids,
decoder_attention_mask,
position_ids,
decoder_position_ids,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
deterministic: bool = True,
):
outputs = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
position_ids=position_ids,
decoder_position_ids=decoder_position_ids,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
deterministic=deterministic,
)
hidden_states = outputs[0]
if self.config.tie_word_embeddings:
shared_embedding = self.model.variables["params"]["shared"]["embedding"]
lm_logits = self.lm_head.apply({"params": {"kernel": shared_embedding.T}}, hidden_states)
else:
lm_logits = self.lm_head(hidden_states)
lm_logits += jax.lax.stop_gradient(self.final_logits_bias.astype(self.dtype))
if not return_dict:
output = (lm_logits,) + outputs[1:]
return output
return FlaxSeq2SeqLMOutput(
logits=lm_logits,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
)
@add_start_docstrings(
"The PEGASUS Model with a language modeling head. Can be used for summarization.", PEGASUS_START_DOCSTRING
)
class FlaxPegasusForConditionalGeneration(FlaxPegasusPreTrainedModel):
module_class = FlaxPegasusForConditionalGenerationModule
dtype: jnp.dtype = jnp.float32
@add_start_docstrings(PEGASUS_DECODE_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=FlaxCausalLMOutputWithCrossAttentions, config_class=PegasusConfig)
def decode(
self,
decoder_input_ids,
encoder_outputs,
encoder_attention_mask: Optional[jnp.ndarray] = None,
decoder_attention_mask: Optional[jnp.ndarray] = None,
decoder_position_ids: Optional[jnp.ndarray] = None,
past_key_values: dict = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
deterministic: bool = True,
params: dict = None,
dropout_rng: PRNGKey = None,
):
r"""
Returns:
Example:
```python
>>> import jax.numpy as jnp
>>> from transformers import AutoTokenizer, FlaxPegasusForConditionalGeneration
>>> model = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-large")
>>> tokenizer = AutoTokenizer.from_pretrained("google/pegasus-large")
>>> text = "My friends are cool but they eat too many carbs."
>>> inputs = tokenizer(text, max_length=1024, return_tensors="np")
>>> encoder_outputs = model.encode(**inputs)
>>> decoder_start_token_id = model.config.decoder_start_token_id
>>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id
>>> outputs = model.decode(decoder_input_ids, encoder_outputs)
>>> logits = outputs.logits
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.return_dict
encoder_hidden_states = encoder_outputs[0]
if encoder_attention_mask is None:
batch_size, sequence_length = encoder_hidden_states.shape[:2]
encoder_attention_mask = jnp.ones((batch_size, sequence_length))
batch_size, sequence_length = decoder_input_ids.shape
if decoder_attention_mask is None:
decoder_attention_mask = jnp.ones((batch_size, sequence_length))
if decoder_position_ids is None:
if past_key_values is not None:
raise ValueError("Make sure to provide `decoder_position_ids` when passing `past_key_values`.")
decoder_position_ids = jnp.broadcast_to(
jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)
)
# Handle any PRNG if needed
rngs = {}
if dropout_rng is not None:
rngs["dropout"] = dropout_rng
inputs = {"params": params or self.params}
# if past_key_values are passed then cache is already initialized a private flag init_cache has to be
# passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that
# it can be changed by FlaxPegasusAttention module
if past_key_values:
inputs["cache"] = past_key_values
mutable = ["cache"]
else:
mutable = False
def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):
decoder_module = module._get_decoder_module()
outputs = decoder_module(
decoder_input_ids,
decoder_attention_mask,
decoder_position_ids,
**kwargs,
)
hidden_states = outputs[0]
if self.config.tie_word_embeddings:
shared_embedding = module.model.variables["params"]["shared"]["embedding"]
lm_logits = module.lm_head.apply({"params": {"kernel": shared_embedding.T}}, hidden_states)
else:
lm_logits = module.lm_head(hidden_states)
lm_logits += module.final_logits_bias.astype(self.dtype)
return lm_logits, outputs
outputs = self.module.apply(
inputs,
decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"),
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"),
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
deterministic=deterministic,
rngs=rngs,
mutable=mutable,
method=_decoder_forward,
)
if past_key_values is None:
lm_logits, decoder_outputs = outputs
else:
(lm_logits, decoder_outputs), past = outputs
if return_dict:
outputs = FlaxCausalLMOutputWithCrossAttentions(
logits=lm_logits,
hidden_states=decoder_outputs.hidden_states,
attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
)
else:
outputs = (lm_logits,) + decoder_outputs[1:]
# add updated cache to model output
if past_key_values is not None and return_dict:
outputs["past_key_values"] = unfreeze(past["cache"])
return outputs
elif past_key_values is not None and not return_dict:
outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:]
return outputs
def prepare_inputs_for_generation(
self,
decoder_input_ids,
max_length,
attention_mask: Optional[jnp.DeviceArray] = None,
decoder_attention_mask: Optional[jnp.DeviceArray] = None,
encoder_outputs=None,
**kwargs,
):
# initializing the cache
batch_size, seq_length = decoder_input_ids.shape
past_key_values = self.init_cache(batch_size, max_length, encoder_outputs)
# Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length.
# But since the decoder uses a causal mask, those positions are masked anyways.
# Thus we can create a single static attention_mask here, which is more efficient for compilation
extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4")
if decoder_attention_mask is not None:
position_ids = decoder_attention_mask.cumsum(axis=-1) - 1
extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, decoder_attention_mask, (0, 0))
else:
position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length))
return {
"past_key_values": past_key_values,
"encoder_outputs": encoder_outputs,
"encoder_attention_mask": attention_mask,
"decoder_attention_mask": extended_attention_mask,
"decoder_position_ids": position_ids,
}
def update_inputs_for_generation(self, model_outputs, model_kwargs):
model_kwargs["past_key_values"] = model_outputs.past_key_values
model_kwargs["decoder_position_ids"] = model_kwargs["decoder_position_ids"][:, -1:] + 1
return model_kwargs
FLAX_PEGASUS_CONDITIONAL_GENERATION_DOCSTRING = """
Returns:
Summarization example:
```pyton
>>> from transformers import AutoTokenizer, FlaxPegasusForConditionalGeneration
>>> model = FlaxPegasusForConditionalGeneration.from_pretrained('google/pegasus-large')
>>> tokenizer = AutoTokenizer.from_pretrained('google/pegasus-large')
>>> ARTICLE_TO_SUMMARIZE = "My friends are cool but they eat too many carbs."
>>> inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors='np')
>>> # Generate Summary
>>> summary_ids = model.generate(inputs['input_ids']).sequences
>>> print(tokenizer.batch_decode(summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False))
```
Mask filling example:
```python
>>> from transformers import AutoTokenizer, FlaxPegasusForConditionalGeneration
>>> tokenizer = AutoTokenizer.from_pretrained("google/pegasus-large")
>>> TXT = "My friends are <mask> but they eat too many carbs."
>>> model = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-large")
>>> input_ids = tokenizer([TXT], return_tensors="np")["input_ids"]
>>> logits = model(input_ids).logits
>>> masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item()
>>> probs = jax.nn.softmax(logits[0, masked_index], axis=0)
>>> values, predictions = jax.lax.top_k(probs)
>>> tokenizer.decode(predictions).split()
```
"""
overwrite_call_docstring(
FlaxPegasusForConditionalGeneration, PEGASUS_INPUTS_DOCSTRING + FLAX_PEGASUS_CONDITIONAL_GENERATION_DOCSTRING
)
append_replace_return_docstrings(
FlaxPegasusForConditionalGeneration, output_type=FlaxSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC
)
|
2877025939/tabelVew-CollectionView | 4,060 | tabelVew嵌套CollectionView/tabelVew嵌套CollectionView/HomeTableViewCell.m | //
// HomeTableViewCell.m
// tabelVew嵌套CollectionView
//
// Created by anan on 2017/6/9.
// Copyright © 2017年 Plan. All rights reserved.
//
#import "HomeTableViewCell.h"
#import "HomeCollectionCell.h"
#import "ViewController1.h"
// 宽高
#define SCREEN_WIDTH [UIScreen mainScreen].bounds.size.width
#define SCREEN_HEIGHT [UIScreen mainScreen].bounds.size.height
@interface HomeTableViewCell () <UICollectionViewDataSource, UICollectionViewDelegate, UICollectionViewDelegateFlowLayout>
@end
@implementation HomeTableViewCell
- (void)awakeFromNib {
[super awakeFromNib];
}
- (instancetype)initWithStyle:(UITableViewCellStyle)style reuseIdentifier:(NSString *)reuseIdentifier
{
if (self = [super initWithStyle:style reuseIdentifier:reuseIdentifier]) {
self.selectionStyle = UITableViewCellSelectionStyleNone;
self.backgroundColor = [UIColor whiteColor];
_headLable = [[UILabel alloc]init];
_headLable.userInteractionEnabled = NO;
_headLable.backgroundColor = [UIColor lightGrayColor];
[self.contentView addSubview:_headLable];
UICollectionViewFlowLayout *layout = [[UICollectionViewFlowLayout alloc] init];
layout.minimumInteritemSpacing = 0;
layout.minimumLineSpacing = 0;
layout.itemSize = CGSizeMake(SCREEN_WIDTH/4, SCREEN_WIDTH/4);
layout.sectionInset = UIEdgeInsetsMake(0, 0, 0, 0);
_collectionView = [[UICollectionView alloc] initWithFrame:CGRectZero collectionViewLayout:layout];
_collectionView.delegate = self;
_collectionView.dataSource = self;
_collectionView.showsVerticalScrollIndicator = NO;
_collectionView.showsHorizontalScrollIndicator = NO;
[_collectionView setBackgroundColor:[UIColor whiteColor]];
//注册cell
[_collectionView registerClass:[HomeCollectionCell class] forCellWithReuseIdentifier:@"cell"];
[self.contentView addSubview:self.collectionView];
}
return self;
}
- (void)layoutSubviews{
[super layoutSubviews];
_headLable.frame = CGRectMake(0, 0, SCREEN_WIDTH, 35);
_headLable.text = [NSString stringWithFormat:@" %@",self.headText];
NSInteger collectionViewCount = self.collectDataArray.count;
//这里要计算一下每一个cell里面collectionView的count
if (collectionViewCount == 0) {
self.collectionView.frame = CGRectMake(0 , 35, SCREEN_WIDTH , 0 );
} else if (collectionViewCount >= 1 && collectionViewCount <= 4){
self.collectionView.frame = CGRectMake(0 , 35, SCREEN_WIDTH , SCREEN_WIDTH/4 );
} else if (collectionViewCount > 4 && collectionViewCount <= 8){
self.collectionView.frame = CGRectMake(0 , 35, SCREEN_WIDTH , SCREEN_WIDTH*2/4 );
} else if (collectionViewCount > 8 && collectionViewCount <= 12){
self.collectionView.frame = CGRectMake(0 , 35, SCREEN_WIDTH , SCREEN_WIDTH*3/4 );
}
}
#pragma mark -- Collection delegate
- (NSInteger)collectionView:(UICollectionView *)collectionView numberOfItemsInSection:(NSInteger)section {
return self.collectDataArray.count;
}
- (UICollectionViewCell *)collectionView:(UICollectionView *)collectionView cellForItemAtIndexPath:(NSIndexPath *)indexPath {
HomeCollectionCell *cell = [collectionView dequeueReusableCellWithReuseIdentifier: @"cell" forIndexPath:indexPath];
cell.backgroundColor = [UIColor clearColor];
cell.textLabel.text = self.collectDataArray[indexPath.row];
cell.imageView.image = [UIImage imageNamed:@"MyTask@2x"];
return cell;
}
#pragma mark -- Collection delegate
- (void)collectionView:(UICollectionView *)collectionView didSelectItemAtIndexPath:(NSIndexPath *)indexPath {
NSLog(@"点击了 %ld ", indexPath.row);
if([self.delegate respondsToSelector:@selector(CustomCollection:didSelectRowAtIndexPath:str:)]){
[self.delegate CustomCollection:collectionView didSelectRowAtIndexPath:indexPath str:self.collectDataArray[indexPath.row]];
}
}
@end
|
2877025939/tabelVew-CollectionView | 1,731 | tabelVew嵌套CollectionView/tabelVew嵌套CollectionView/HomeCollectionCell.m | //
// HomeCollectionCell.m
// tabelVew嵌套CollectionView
//
// Created by anan on 2017/7/6.
// Copyright © 2017年 Plan. All rights reserved.
//
#import "HomeCollectionCell.h"
#define ITEM_HEIGHT (SCREEN_HEIGHT*2/3-4)/5
@implementation HomeCollectionCell
- (void)awakeFromNib {
[super awakeFromNib];
}
- (instancetype)initWithCoder:(NSCoder *)coder
{
self = [super initWithCoder:coder];
if (self) {
self.backgroundColor = [UIColor whiteColor];
self.textLabel.font = [UIFont systemFontOfSize:20];
}
return self;
}
- (instancetype)initWithFrame:(CGRect)frame
{
self = [super initWithFrame:frame];
if (self) {
self.backgroundColor = [UIColor whiteColor];
self.myView = [[UIView alloc]init];
self.myView.backgroundColor = [UIColor whiteColor];
[self.contentView addSubview:self.myView];
self.imageView = [[UIImageView alloc] init];
self.imageView.contentMode = UIViewContentModeScaleAspectFit;
[self.contentView addSubview:self.imageView];
self.textLabel = [[UILabel alloc] init];
self.textLabel.textAlignment = NSTextAlignmentCenter;
self.textLabel.font = [UIFont systemFontOfSize:20];
[self.contentView addSubview:self.textLabel];
}
return self;
}
- (void)layoutSubviews{
[super layoutSubviews];
CGFloat wi = 28 ;
self.myView.frame = CGRectMake(0.5, 0.5, self.frame.size.width-1,self.frame.size.width-1);
self.imageView.frame = CGRectMake((self.frame.size.width - wi)/2, (self.frame.size.height - wi - 10 - 20)/2, wi, wi);
self.textLabel.frame = CGRectMake(0, _imageView.frame.origin.y + 38, self.frame.size.width, 20);
}
@end
|
2877025939/tabelVew-CollectionView | 1,380 | tabelVew嵌套CollectionView/tabelVew嵌套CollectionView/ViewController1.m | //
// ViewController1.m
// tabelVew嵌套CollectionView
//
// Created by anan on 2017/6/7.
// Copyright © 2017年 Plan. All rights reserved.
//
#import "ViewController1.h"
@interface ViewController1 ()
@end
@implementation ViewController1
- (void)viewDidLoad {
[super viewDidLoad];
[[UIApplication sharedApplication] setStatusBarStyle:UIStatusBarStyleDefault];
self.view.backgroundColor = [UIColor whiteColor];
// Do any additional setup after loading the view.
}
- (void)didReceiveMemoryWarning {
[super didReceiveMemoryWarning];
// Dispose of any resources that can be recreated.
}
-(void)dealloc{
NSLog(@"go die");
}
-(void)viewWillDisappear:(BOOL)animated{
[super viewWillDisappear:animated];
}
-(void)viewDidDisappear:(BOOL)animated{
[super viewDidDisappear:animated];
[[UIApplication sharedApplication] setStatusBarStyle:UIStatusBarStyleLightContent];
}
//- (UIStatusBarStyle)preferredStatusBarStyle
//{
// return UIStatusBarStyleDefault;
//}
/*
#pragma mark - Navigation
// In a storyboard-based application, you will often want to do a little preparation before navigation
- (void)prepareForSegue:(UIStoryboardSegue *)segue sender:(id)sender {
// Get the new view controller using [segue destinationViewController].
// Pass the selected object to the new view controller.
}
*/
@end
|
2877025939/tabelVew-CollectionView | 7,443 | tabelVew嵌套CollectionView/tabelVew嵌套CollectionView/HomeViewController.m | //
// HomeViewController.m
// tabelVew嵌套CollectionView
//
// Created by anan on 2017/6/9.
// Copyright © 2017年 Plan. All rights reserved.
//
#import "HomeViewController.h"
#import "HomeTableViewCell.h"
#import "ViewController1.h"
#import "HomeTitle.h"
#import "Header.h"
#import "Masonry.h"
@interface HomeViewController ()<UITableViewDelegate,UITableViewDataSource,CustomCollectionDelegate>
@property (nonatomic,strong) UITableView *tableView;
@property (nonatomic,strong) NSMutableArray *dataArray;
@property (nonatomic,strong) NSArray *array1;
@property (nonatomic,strong) HomeTitle *homeTitle;
@end
@implementation HomeViewController
- (void)viewDidLoad {
[super viewDidLoad];
[self createUI];
}
-(void)createUI{
self.view.backgroundColor = [UIColor whiteColor];
[self.view addSubview:self.tableView];
/*
这里Xcode8打开,会报错 ,可以注释这几行代码
Xcode9使用正常,因为Xcode9才有iOS 11啊
if (@available(iOS 11.0, *)) {
self.tableView.contentInsetAdjustmentBehavior = UIScrollViewContentInsetAdjustmentNever;
} else {
self.automaticallyAdjustsScrollViewInsets = NO; }
*/
if (@available(iOS 11.0, *)) {
self.tableView.contentInsetAdjustmentBehavior = UIScrollViewContentInsetAdjustmentNever;
} else {
self.automaticallyAdjustsScrollViewInsets = NO;
}
// [self.tableView mas_makeConstraints:^(MASConstraintMaker *make) {
// if (@available(iOS 11.0, *)) {
// make.edges.mas_equalTo(self.view.safeAreaInsets);
// }else{
// make.edges.mas_equalTo(self.view);
// }
//
// }];
_homeTitle = [[HomeTitle alloc]initWithFrame:CGRectMake(0, 0, SCREEN_WIDTH, NavHeight)];
[self.view addSubview:_homeTitle];
__weak typeof(self) weakSelf = self;
weakSelf.homeTitle.btnClick = ^{
[weakSelf click:weakSelf];
};
}
-(void)click:(UIViewController *)obj{
NSLog(@"点击了个人");
ViewController1 *mine = [[ViewController1 alloc]init];
[self.navigationController pushViewController:mine animated:YES];
}
#pragma mark - tableView 代理方法
-(NSInteger)numberOfSectionsInTableView:(UITableView *)tableView{
return self.dataArray.count;
}
-(NSInteger)tableView:(UITableView *)tableView numberOfRowsInSection:(NSInteger)section{
return 1;
}
-(UITableViewCell *)tableView:(UITableView *)tableView cellForRowAtIndexPath:(NSIndexPath *)indexPath{
static NSString *CellIdentifier =@"HomeTableViewCell";
HomeTableViewCell *cell = [tableView dequeueReusableCellWithIdentifier:CellIdentifier];
cell.delegate = self;
cell.collectDataArray = self.dataArray[indexPath.section];
cell.headText = self.array1[indexPath.section];
return cell;
}
-(void)tableView:(UITableView *)tableView didSelectRowAtIndexPath:(NSIndexPath *)indexPath{
}
#pragma mark - 代理方法
- (void)CustomCollection:(UICollectionView *)collectionView didSelectRowAtIndexPath:(NSIndexPath *)indexPath str:(NSString *)str{
NSLog(@"select: %ld", indexPath.row);
NSLog(@"str=%@",str);
ViewController1 *vc = [[ViewController1 alloc]init];
[self.navigationController pushViewController:vc animated:YES];
}
-(CGFloat)tableView:(UITableView *)tableView heightForRowAtIndexPath:(NSIndexPath *)indexPath{
NSInteger collectionViewCount = [self.dataArray[indexPath.section] count];
return [self collectionViewCount:collectionViewCount];
}
-(CGFloat)collectionViewCount:(NSInteger)collectionViewCount{
if (collectionViewCount == 0) {
return 0;
} else if (collectionViewCount >= 1 && collectionViewCount <= 4){
return 35 +SCREEN_WIDTH/4;
} else if (collectionViewCount > 4 && collectionViewCount <= 8){
return 35 +SCREEN_WIDTH*2/4 ;
}
return 0;
}
- (void)scrollViewDidScroll:(UIScrollView *)scrollView {
if (scrollView.contentOffset.y < 0) {
[[UIApplication sharedApplication] setStatusBarStyle:UIStatusBarStyleDefault];
}else if (scrollView.contentOffset.y >=100) {
_homeTitle.backgroundColor = [UIColor whiteColor];
_homeTitle.titleLable.textColor = [UIColor blackColor];
_homeTitle.line.backgroundColor = [UIColor blackColor];
[_homeTitle.mine setImage:[UIImage imageNamed:@"1111"] forState:UIControlStateNormal] ;
[[UIApplication sharedApplication] setStatusBarStyle:UIStatusBarStyleDefault];
} else{
//_homeTitle.alpha=scrollView.contentOffset.y/100.0;
_homeTitle.backgroundColor = [UIColor clearColor];
_homeTitle.titleLable.textColor = [UIColor whiteColor];
_homeTitle.line.backgroundColor = [UIColor clearColor];
[_homeTitle.mine setImage:[UIImage imageNamed:@"111"] forState:UIControlStateNormal] ;
[[UIApplication sharedApplication] setStatusBarStyle:UIStatusBarStyleLightContent];
}
}
-(void)viewWillAppear:(BOOL)animated{
[super viewWillAppear:animated];
self.navigationController.navigationBarHidden = YES;
}
-(void)viewDidAppear:(BOOL)animated{
[super viewDidAppear:animated];
if (self.tableView.contentOffset.y >64) {
[[UIApplication sharedApplication] setStatusBarStyle:UIStatusBarStyleDefault];
}
}
-(void)viewWillDisappear:(BOOL)animated{
[super viewWillDisappear:animated];
self.navigationController.navigationBarHidden = NO;
}
-(UITableView *)tableView{
if (!_tableView) {
_tableView = [[UITableView alloc]initWithFrame:CGRectMake(0, 0 , SCREEN_WIDTH, Height)];
_tableView.backgroundColor =[UIColor whiteColor];
_tableView.dataSource =self;
_tableView.delegate = self;
[_tableView setSeparatorStyle:UITableViewCellSeparatorStyleNone];
_tableView.showsVerticalScrollIndicator = NO;
[_tableView registerClass:[HomeTableViewCell class] forCellReuseIdentifier:@"HomeTableViewCell"];
UIImageView *headerView = [[UIImageView alloc]initWithFrame:CGRectMake(0, 0, SCREEN_WIDTH, 200)];
headerView.image = [UIImage imageNamed:@"风景1.jpg"];
_tableView.tableHeaderView = headerView;
[_tableView reloadData];
}
return _tableView;
}
-(NSMutableArray *)dataArray{
if (!_dataArray) {
_dataArray = [NSMutableArray array];
_dataArray = (NSMutableArray*)@[@[@"01",@"02",@"03",@"04",@"05"] ,@[@"11",@"12",@"13",@"14"] , @[@"21",@"22",@"23"],@[@"31",@"32",@"33"],@[@"41",@"42",@"43"],@[@"51",@"52",@"53"]];
}
return _dataArray;
}
-(NSArray *)array1{
if (!_array1 ) {
_array1 = [NSArray array];
_array1 = @[@"标题0",@"标题1",@"标题2",@"标题3",@"标题4",@"标题5"];
}
return _array1;
}
- (void)didReceiveMemoryWarning {
[super didReceiveMemoryWarning];
// Dispose of any resources that can be recreated.
}
/*
#pragma mark - Navigation
// In a storyboard-based application, you will often want to do a little preparation before navigation
- (void)prepareForSegue:(UIStoryboardSegue *)segue sender:(id)sender {
// Get the new view controller using [segue destinationViewController].
// Pass the selected object to the new view controller.
}
*/
@end
|
27182812/ChatGLM-LLaMA-chinese-insturct | 9,935 | src/transformers/models/pegasus/tokenization_pegasus_fast.py | # coding=utf-8
# Copyright 2020 Google and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Tokenization class for model PEGASUS."""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
PegasusTokenizer = None
logger = logging.get_logger(__name__)
SPIECE_UNDERLINE = "▁"
VOCAB_FILES_NAMES = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"},
"tokenizer_file": {
"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"
},
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"google/pegasus-xsum": 512,
}
class PegasusTokenizerFast(PreTrainedTokenizerFast):
r"""
Construct a "fast" PEGASUS tokenizer (backed by HuggingFace's *tokenizers* library). Based on
[Unigram](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=unigram#models).
This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
[SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
contains the vocabulary necessary to instantiate a tokenizer.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
mask_token (`str`, *optional*, defaults to `"<mask_2>"`):
The token used for masking single token values. This is the token used when training this model with masked
language modeling (MLM). This is the token that the PEGASUS encoder will try to predict during pretraining.
It corresponds to *[MASK2]* in [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive
Summarization](https://arxiv.org/pdf/1912.08777.pdf).
mask_token_sent (`str`, *optional*, defaults to `"<mask_1>"`):
The token used for masking whole target sentences. This is the token used when training this model with gap
sentences generation (GSG). This is the sentence that the PEGASUS decoder will try to predict during
pretraining. It corresponds to *[MASK1]* in [PEGASUS: Pre-training with Extracted Gap-sentences for
Abstractive Summarization](https://arxiv.org/pdf/1912.08777.pdf).
additional_special_tokens (`List[str]`, *optional*):
Additional special tokens used by the tokenizer. If no additional_special_tokens are provided <mask_2> and
<unk_2, ..., unk_102> are used as additional special tokens corresponding to the [original PEGASUS
tokenizer](https://github.com/google-research/pegasus/blob/939830367bcf411193d2b5eca2f2f90f3f9260ca/pegasus/ops/pretrain_parsing_ops.cc#L66)
that uses the tokens 2 - 104 only for pretraining
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
slow_tokenizer_class = PegasusTokenizer
model_input_names = ["input_ids", "attention_mask"]
def __init__(
self,
vocab_file=None,
tokenizer_file=None,
pad_token="<pad>",
eos_token="</s>",
unk_token="<unk>",
mask_token="<mask_2>",
mask_token_sent="<mask_1>",
additional_special_tokens=None,
offset=103, # entries 2 - 104 are only used for pretraining
**kwargs,
):
self.offset = offset
if additional_special_tokens is not None:
if not isinstance(additional_special_tokens, list):
raise TypeError(
f"additional_special_tokens should be of type {type(list)}, but is"
f" {type(additional_special_tokens)}"
)
additional_special_tokens_extended = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"<unk_{i}>" for i in range(len(additional_special_tokens_extended), self.offset - 1)
]
if len(set(additional_special_tokens_extended)) != len(additional_special_tokens_extended):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
f" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}."
)
additional_special_tokens = additional_special_tokens_extended
else:
additional_special_tokens = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"<unk_{i}>" for i in range(2, self.offset)]
super().__init__(
vocab_file,
tokenizer_file=tokenizer_file,
pad_token=pad_token,
eos_token=eos_token,
unk_token=unk_token,
mask_token=mask_token,
mask_token_sent=mask_token_sent,
offset=offset,
additional_special_tokens=additional_special_tokens,
**kwargs,
)
self.vocab_file = vocab_file
self.can_save_slow_tokenizer = False if not self.vocab_file else True
def _special_token_mask(self, seq):
all_special_ids = set(self.all_special_ids) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens) + 3)):
raise ValueError(
"There should be 3 special tokens: mask_token, pad_token, and eos_token +"
f" {len(self.additional_special_tokens)} additional_special_tokens, but got {all_special_ids}"
)
return [1 if x in all_special_ids else 0 for x in seq]
def get_special_tokens_mask(
self, token_ids_0: List, token_ids_1: Optional[List] = None, already_has_special_tokens: bool = False
) -> List[int]:
"""Get list where entries are [1] if a token is [eos] or [pad] else 0."""
if already_has_special_tokens:
return self._special_token_mask(token_ids_0)
elif token_ids_1 is None:
return self._special_token_mask(token_ids_0) + [1]
else:
return self._special_token_mask(token_ids_0 + token_ids_1) + [1]
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None) -> List[int]:
"""
Build model inputs from a sequence by adding eos to the end. no bos token is added to the front.
- single sequence: `X </s>`
- pair of sequences: `A B </s>` (not intended use)
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
if token_ids_1 is None:
return token_ids_0 + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_0 + token_ids_1 + [self.eos_token_id]
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer."
)
if not os.path.isdir(save_directory):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
out_vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
copyfile(self.vocab_file, out_vocab_file)
return (out_vocab_file,)
|
2881099/csredis | 6,645 | README.md | ## Features
- CSRedisClient and RedisHelper Keep all method names consistent with redis-cli
- Support geo type commands (redis-server 3.2 or above is required)
- Support Redis Cluster redis-trib.rb
- Support Redis Sentinel and master-slave
- Supports stream type commands (requires redis-server 5.0 and above)
| Package Name | NuGet | Downloads | |
|--------------| ------- | ---- | -- |
| CSRedisCore | [](https://www.nuget.org/packages/CSRedisCore) | [](https://www.nuget.org/stats/packages/CSRedisCore?groupby=Version) |
| Caching.CSRedis | [](https://www.nuget.org/packages/Caching.CSRedis) | [](https://www.nuget.org/stats/packages/Caching.CSRedis?groupby=Version) | IDistributedCache |
> dotnet add package CSRedisCore
## Single machine redis
```csharp
var csredis = new CSRedis.CSRedisClient("127.0.0.1:6379,password=123,defaultDatabase=13,prefix=my_");
```
| Parameter | Default | Explain |
| :---------------- | --------: | :------------------- |
| user | \<Empty\> | Redis server user (redis 6.0+) |
| password | \<Empty\> | Redis server password |
| defaultDatabase | 0 | Redis server database |
| **asyncPipeline** | false | The asynchronous method automatically uses pipeline, and the 10W concurrent time is 450ms (welcome to feedback) |
| poolsize | 50 | Connection pool size |
| idleTimeout | 20000 | Idle time of elements in the connection pool (MS), suitable for connecting to remote redis server |
| connectTimeout | 5000 | Connection timeout (MS) |
| syncTimeout | 10000 | Send / receive timeout (MS) |
| preheat | 5 | Preheat connections, receive values such as preheat = 5 preheat 5 connections |
| autoDispose | true | Follow system exit event to release automatically |
| ssl | false | Enable encrypted transmission |
| testcluster | true | 是否尝试集群模式,阿里云、腾讯云集群需要设置此选项为 false |
| tryit | 0 | Execution error, retry attempts |
| name | \<Empty\> | Connection name, use client list command to view |
| prefix | \<Empty\> | key前辍,所有方法都会附带此前辍,csredis.Set(prefix + "key", 111); |
> IPv6: [fe80::b164:55b3:4b4f:7ce6%15]:6379
# Redis Sentinel
```csharp
var csredis = new CSRedis.CSRedisClient("mymaster,password=123,prefix=my_",
new [] { "192.169.1.10:26379", "192.169.1.11:26379", "192.169.1.12:26379" });
```
Read only: new CSRedisClient("mymaster,password=123", new [] { Sentinels }, false)
# Redis Cluster
假设你已经配置好 redis-trib 集群,定义一个【普通模式】的 CSRedisClient 对象,它会根据 redis-server 返回的 MOVED | ASK 错误记录slot,自动增加节点 Nodes 属性。
> 127.0.0.1:6379,password=123,defaultDatabase=0,poolsize=50,prefix=
> 其他节点在运行过程中自动增加,确保每个节点密码一致。
警告:本模式与【分区模式】同时使用时,切记不可设置“prefix=key前辍”(或者全部设置成一样),否则会导致 keySlot 计算结果与服务端不匹配,无法记录 slotCache。
> 注意:官方集群不支持多 keys 的命令、【管道】、Eval(脚本)等众多杀手级功能。
# IDistributedCache
> dotnet add package Caching.CSRedis
```csharp
RedisHelper.Initialization(csredis);
services.AddSingleton<IDistributedCache>(new Microsoft.Extensions.Caching.Redis.CSRedisCache(RedisHelper.Instance));
```
> Note: CSRedisClient is singleton, RedisHelper static class is recommended
```csharp
RedisHelper.Set("test1", "123123", 60);
RedisHelper.Get("test1");
//The method name is the same as the command of redis cli
```
# Operate on multiple databases
```csharp
var connectionString = "127.0.0.1:6379,password=123,poolsize=10";
var redis = new CSRedisClient[14]; //Singleton
for (var a = 0; a< redis.Length; a++)
redis[a] = new CSRedisClient(connectionString + ",defaultDatabase=" + a);
redis[1].Get("test1");
```
> Multiple RedisHelper
```csharp
public abstract class MyHelper1 : RedisHelper<MyHelper1> {}
public abstract class MyHelper2 : RedisHelper<MyHelper2> {}
MyHelper1.Initialization(new CSRedisClient("...."));
MyHelper2.Initialization(new CSRedisClient("...."));
```
# Subscribe/Publish
```csharp
//Native subscribe
RedisHelper.Subscribe(
("chan1", msg => Console.WriteLine(msg.Body)),
("chan2", msg => Console.WriteLine(msg.Body)));
RedisHelper.PSubscribe(new[] { "test*", "*test001", "test*002" }, msg => {
Console.WriteLine($"PSUB {msg.MessageId}:{msg.Body} {msg.Pattern}: chan:{msg.Channel}");
});
//模式订阅已经解决的难题:
//1、分区的节点匹配规则,导致通配符最大可能匹配全部节点,所以全部节点都要订阅
//2、本组 "test*", "*test001", "test*002" 订阅全部节点时,需要解决同一条消息不可执行多次
RedisHelper.Publish("chan1", "123123123");
```
参考资料:[【由浅至深】redis 实现发布订阅的几种方式](https://www.cnblogs.com/kellynic/p/9952386.html)
# CacheShell
```csharp
//不加缓存的时候,要从数据库查询
var t1 = Test.Select.WhereId(1).ToOne();
//一般的缓存代码,如不封装还挺繁琐的
var cacheValue = RedisHelper.Get("test1");
if (!string.IsNullOrEmpty(cacheValue)) {
try {
return JsonConvert.DeserializeObject(cacheValue);
} catch {
//出错时删除key
RedisHelper.Remove("test1");
throw;
}
}
var t1 = Test.Select.WhereId(1).ToOne();
RedisHelper.Set("test1", JsonConvert.SerializeObject(t1), 10); //缓存10秒
//使用缓存壳效果同上,以下示例使用 string 和 hash 缓存数据
var t1 = RedisHelper.CacheShell("test1", 10, () => Test.Select.WhereId(1).ToOne());
var t2 = RedisHelper.CacheShell("test", "1", 10, () => Test.Select.WhereId(1).ToOne());
var t3 = RedisHelper.CacheShell("test", new [] { "1", "2" }, 10, notCacheFields => new [] {
("1", Test.Select.WhereId(1).ToOne()),
("2", Test.Select.WhereId(2).ToOne())
});
```
# Pipeline
使用管道模式,打包多条命令一起执行,从而提高性能。
```csharp
var ret1 = RedisHelper.StartPipe(p => p.Set("a", "1").Get("a"));
```
# Benchmark
100,000 operations
```shell
StackExchange.Redis StringSet:7882ms
CSRedisCore Set:6101ms
-------------------
StackExchange.Redis StringGet:7729ms
CSRedisCore Get:5762ms
-------------------
StackExchange.Redis StringSetAsync:8094ms
CSRedisCore SetAsync:6315ms
-------------------
StackExchange.Redis StringGetAsync:7986ms
CSRedisClient GetAsync:4931ms
CSRedisCore GetAsync:5960ms
-------------------
CSRedisCore SetAsync(Task.WaitAll):559ms
StackExchange.Redis StringSetAsync (concurrent Task.WaitAll):172ms
-------------------
CSRedisCore GetAsync(Task.WaitAll):435ms
StackExchange.Redis StringGetAsync (concurrent Task.WaitAll):176ms
```
## 💕 Donation (捐赠)
> 感谢你的打赏
- [Alipay](https://www.cnblogs.com/FreeSql/gallery/image/338860.html)
- [WeChat](https://www.cnblogs.com/FreeSql/gallery/image/338859.html)
# Thank
Original open source project: https://github.com/ctstone/csredis
|
2877025939/tabelVew-CollectionView | 1,638 | tabelVew嵌套CollectionView/tabelVew嵌套CollectionView/Info.plist | <?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>CFBundleDevelopmentRegion</key>
<string>en</string>
<key>CFBundleExecutable</key>
<string>$(EXECUTABLE_NAME)</string>
<key>CFBundleIdentifier</key>
<string>$(PRODUCT_BUNDLE_IDENTIFIER)</string>
<key>CFBundleInfoDictionaryVersion</key>
<string>6.0</string>
<key>CFBundleName</key>
<string>$(PRODUCT_NAME)</string>
<key>CFBundlePackageType</key>
<string>APPL</string>
<key>CFBundleShortVersionString</key>
<string>1.0</string>
<key>CFBundleVersion</key>
<string>1</string>
<key>LSApplicationCategoryType</key>
<string></string>
<key>LSRequiresIPhoneOS</key>
<true/>
<key>UILaunchStoryboardName</key>
<string>LaunchScreen</string>
<key>UIMainStoryboardFile</key>
<string>Main</string>
<key>UIRequiredDeviceCapabilities</key>
<array>
<string>armv7</string>
</array>
<key>UIStatusBarStyle</key>
<string>UIStatusBarStyleLightContent</string>
<key>UISupportedInterfaceOrientations</key>
<array>
<string>UIInterfaceOrientationPortrait</string>
<string>UIInterfaceOrientationLandscapeLeft</string>
<string>UIInterfaceOrientationLandscapeRight</string>
</array>
<key>UISupportedInterfaceOrientations~ipad</key>
<array>
<string>UIInterfaceOrientationPortrait</string>
<string>UIInterfaceOrientationPortraitUpsideDown</string>
<string>UIInterfaceOrientationLandscapeLeft</string>
<string>UIInterfaceOrientationLandscapeRight</string>
</array>
<key>UIViewControllerBasedStatusBarAppearance</key>
<false/>
</dict>
</plist>
|
2877025939/tabelVew-CollectionView | 1,262 | tabelVew嵌套CollectionView/tabelVew嵌套CollectionViewUITests/tabelVew__CollectionViewUITests.m | //
// tabelVew__CollectionViewUITests.m
// tabelVew嵌套CollectionViewUITests
//
// Created by anan on 2017/6/7.
// Copyright © 2017年 Plan. All rights reserved.
//
#import <XCTest/XCTest.h>
@interface tabelVew__CollectionViewUITests : XCTestCase
@end
@implementation tabelVew__CollectionViewUITests
- (void)setUp {
[super setUp];
// Put setup code here. This method is called before the invocation of each test method in the class.
// In UI tests it is usually best to stop immediately when a failure occurs.
self.continueAfterFailure = NO;
// UI tests must launch the application that they test. Doing this in setup will make sure it happens for each test method.
[[[XCUIApplication alloc] init] launch];
// In UI tests it’s important to set the initial state - such as interface orientation - required for your tests before they run. The setUp method is a good place to do this.
}
- (void)tearDown {
// Put teardown code here. This method is called after the invocation of each test method in the class.
[super tearDown];
}
- (void)testExample {
// Use recording to get started writing UI tests.
// Use XCTAssert and related functions to verify your tests produce the correct results.
}
@end
|
27182812/ChatGLM-LLaMA-chinese-insturct | 70,992 | src/transformers/models/pegasus/modeling_tf_pegasus.py | # coding=utf-8
# Copyright 2021, Google Inc. and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" TF 2.0 Pegasus model."""
import random
from typing import Optional, Tuple, Union
import numpy as np
import tensorflow as tf
from ...activations_tf import get_tf_activation
from ...modeling_tf_outputs import (
TFBaseModelOutput,
TFBaseModelOutputWithPastAndCrossAttentions,
TFSeq2SeqLMOutput,
TFSeq2SeqModelOutput,
)
# Public API
from ...modeling_tf_utils import (
DUMMY_INPUTS,
TFCausalLanguageModelingLoss,
TFModelInputType,
TFPreTrainedModel,
keras_serializable,
unpack_inputs,
)
from ...tf_utils import shape_list, stable_softmax
from ...utils import (
ContextManagers,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from .configuration_pegasus import PegasusConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "google/pegasus-large"
_CONFIG_FOR_DOC = "PegasusConfig"
LARGE_NEGATIVE = -1e8
# Copied from transformers.models.bart.modeling_tf_bart.shift_tokens_right
def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, decoder_start_token_id: int):
pad_token_id = tf.cast(pad_token_id, input_ids.dtype)
decoder_start_token_id = tf.cast(decoder_start_token_id, input_ids.dtype)
start_tokens = tf.fill(
(shape_list(input_ids)[0], 1), tf.convert_to_tensor(decoder_start_token_id, input_ids.dtype)
)
shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1)
# replace possible -100 values in labels by `pad_token_id`
shifted_input_ids = tf.where(
shifted_input_ids == -100,
tf.fill(shape_list(shifted_input_ids), tf.convert_to_tensor(pad_token_id, input_ids.dtype)),
shifted_input_ids,
)
# "Verify that `labels` has only positive values and -100"
assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0, dtype=input_ids.dtype))
# Make sure the assertion op is called by wrapping the result in an identity no-op
with tf.control_dependencies([assert_gte0]):
shifted_input_ids = tf.identity(shifted_input_ids)
return shifted_input_ids
# Copied from transformers.models.bart.modeling_tf_bart._make_causal_mask
def _make_causal_mask(input_ids_shape: tf.TensorShape, past_key_values_length: int = 0):
"""
Make causal mask used for bi-directional self-attention.
"""
bsz = input_ids_shape[0]
tgt_len = input_ids_shape[1]
mask = tf.ones((tgt_len, tgt_len)) * LARGE_NEGATIVE
mask_cond = tf.range(shape_list(mask)[-1])
mask = tf.where(mask_cond < tf.reshape(mask_cond + 1, (shape_list(mask)[-1], 1)), 0.0, mask)
if past_key_values_length > 0:
mask = tf.concat([tf.zeros((tgt_len, past_key_values_length)), mask], axis=-1)
return tf.tile(mask[None, None, :, :], (bsz, 1, 1, 1))
# Copied from transformers.models.bart.modeling_tf_bart._expand_mask
def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int] = None):
"""
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
"""
src_len = shape_list(mask)[1]
tgt_len = tgt_len if tgt_len is not None else src_len
one_cst = tf.constant(1.0)
mask = tf.cast(mask, dtype=one_cst.dtype)
expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1))
return (one_cst - expanded_mask) * LARGE_NEGATIVE
# Copied from transformers.models.marian.modeling_tf_marian.TFMarianSinusoidalPositionalEmbedding with Marian->Pegasus
class TFPegasusSinusoidalPositionalEmbedding(tf.keras.layers.Layer):
"""This module produces sinusoidal positional embeddings of any length."""
def __init__(self, num_positions: int, embedding_dim: int, **kwargs):
super().__init__(**kwargs)
if embedding_dim % 2 != 0:
raise NotImplementedError(f"odd embedding_dim {embedding_dim} not supported")
self.embedding_dim = embedding_dim
self.num_positions = num_positions
def build(self, input_shape: tf.TensorShape):
"""
Build shared token embedding layer Shared weights logic adapted from
https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24
"""
weight = self._init_weight(self.num_positions, self.embedding_dim)
self.weight = self.add_weight(
name="embeddings",
shape=[self.num_positions, self.embedding_dim],
)
weight = tf.cast(weight, dtype=self.weight.dtype)
self.weight.assign(weight)
super().build(input_shape)
@staticmethod
def _init_weight(n_pos: int, dim: int):
"""
Identical to the XLM create_sinusoidal_embeddings except features are not interleaved. The cos features are in
the 2nd half of the vector. [dim // 2:]
"""
position_enc = np.array(
[[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)]
)
table = np.zeros_like(position_enc)
# index 0 is all zero
table[:, 0 : dim // 2] = np.sin(position_enc[:, 0::2])
table[:, dim // 2 :] = np.cos(position_enc[:, 1::2])
# convert to tensor
table = tf.convert_to_tensor(table)
tf.stop_gradient(table)
return table
def call(
self, input_shape: tf.TensorShape, past_key_values_length: int = 0, position_ids: Optional[tf.Tensor] = None
):
"""Input is expected to be of size [bsz x seqlen]."""
if position_ids is None:
seq_len = input_shape[1]
position_ids = tf.range(past_key_values_length, seq_len + past_key_values_length, delta=1, name="range")
return tf.gather(self.weight, position_ids)
# Copied from transformers.models.bart.modeling_tf_bart.TFBartAttention with Bart->Pegasus
class TFPegasusAttention(tf.keras.layers.Layer):
"""Multi-headed attention from "Attention Is All You Need"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
is_decoder: bool = False,
bias: bool = True,
**kwargs,
):
super().__init__(**kwargs)
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = tf.keras.layers.Dropout(dropout)
self.head_dim = embed_dim // num_heads
if (self.head_dim * num_heads) != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
f" and `num_heads`: {num_heads})."
)
self.scaling = self.head_dim**-0.5
self.is_decoder = is_decoder
self.k_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="k_proj")
self.q_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="q_proj")
self.v_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="v_proj")
self.out_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="out_proj")
def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int):
return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), (0, 2, 1, 3))
def call(
self,
hidden_states: tf.Tensor,
key_value_states: Optional[tf.Tensor] = None,
past_key_value: Optional[Tuple[Tuple[tf.Tensor]]] = None,
attention_mask: Optional[tf.Tensor] = None,
layer_head_mask: Optional[tf.Tensor] = None,
training: Optional[bool] = False,
) -> Tuple[tf.Tensor, Optional[tf.Tensor]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
bsz, tgt_len, embed_dim = shape_list(hidden_states)
# get query proj
query_states = self.q_proj(hidden_states) * self.scaling
# get key, value proj
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_states = past_key_value[0]
value_states = past_key_value[1]
elif is_cross_attention:
# cross_attentions
key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
elif past_key_value is not None:
# reuse k, v, self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
key_states = tf.concat([past_key_value[0], key_states], axis=2)
value_states = tf.concat([past_key_value[1], value_states], axis=2)
else:
# self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
if self.is_decoder:
# if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_states, value_states)
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = tf.reshape(self._shape(query_states, tgt_len, bsz), proj_shape)
key_states = tf.reshape(key_states, proj_shape)
value_states = tf.reshape(value_states, proj_shape)
src_len = shape_list(key_states)[1]
attn_weights = tf.matmul(query_states, key_states, transpose_b=True)
tf.debugging.assert_equal(
shape_list(attn_weights),
[bsz * self.num_heads, tgt_len, src_len],
message=(
f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
f" {shape_list(attn_weights)}"
),
)
if attention_mask is not None:
tf.debugging.assert_equal(
shape_list(attention_mask),
[bsz, 1, tgt_len, src_len],
message=(
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is"
f" {shape_list(attention_mask)}"
),
)
attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype)
attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask
attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))
attn_weights = stable_softmax(attn_weights, axis=-1)
if layer_head_mask is not None:
tf.debugging.assert_equal(
shape_list(layer_head_mask),
[self.num_heads],
message=(
f"Head mask for a single layer should be of size {(self.num_heads)}, but is"
f" {shape_list(layer_head_mask)}"
),
)
attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape(
attn_weights, (bsz, self.num_heads, tgt_len, src_len)
)
attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))
attn_probs = self.dropout(attn_weights, training=training)
attn_output = tf.matmul(attn_probs, value_states)
tf.debugging.assert_equal(
shape_list(attn_output),
[bsz * self.num_heads, tgt_len, self.head_dim],
message=(
f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
f" {shape_list(attn_output)}"
),
)
attn_output = tf.transpose(
tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3)
)
attn_output = tf.reshape(attn_output, (bsz, tgt_len, embed_dim))
attn_output = self.out_proj(attn_output)
attn_weights: tf.Tensor = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len))
return attn_output, attn_weights, past_key_value
# Copied from transformers.models.mbart.modeling_tf_mbart.TFMBartEncoderLayer with MBart->Pegasus
class TFPegasusEncoderLayer(tf.keras.layers.Layer):
def __init__(self, config: PegasusConfig, **kwargs):
super().__init__(**kwargs)
self.embed_dim = config.d_model
self.self_attn = TFPegasusAttention(
self.embed_dim, config.encoder_attention_heads, dropout=config.attention_dropout, name="self_attn"
)
self.self_attn_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm")
self.dropout = tf.keras.layers.Dropout(config.dropout)
self.activation_fn = get_tf_activation(config.activation_function)
self.activation_dropout = tf.keras.layers.Dropout(config.activation_dropout)
self.fc1 = tf.keras.layers.Dense(config.encoder_ffn_dim, name="fc1")
self.fc2 = tf.keras.layers.Dense(self.embed_dim, name="fc2")
self.final_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm")
def call(
self,
hidden_states: tf.Tensor,
attention_mask: tf.Tensor,
layer_head_mask: tf.Tensor,
training: Optional[bool] = False,
):
"""
Args:
hidden_states (`tf.Tensor`): input to the layer of shape *(seq_len, batch, embed_dim)*
attention_mask (`tf.Tensor`): attention mask of size
*(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.
layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size
*(encoder_attention_heads,)*
"""
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
hidden_states, self_attn_weights, _ = self.self_attn(
hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask
)
tf.debugging.assert_equal(
shape_list(hidden_states),
shape_list(residual),
message=f"Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}",
)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = self.activation_dropout(hidden_states, training=training)
hidden_states = self.fc2(hidden_states)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = residual + hidden_states
return hidden_states, self_attn_weights
# Copied from transformers.models.mbart.modeling_tf_mbart.TFMBartDecoderLayer with MBart->Pegasus
class TFPegasusDecoderLayer(tf.keras.layers.Layer):
def __init__(self, config: PegasusConfig, **kwargs):
super().__init__(**kwargs)
self.embed_dim = config.d_model
self.self_attn = TFPegasusAttention(
embed_dim=self.embed_dim,
num_heads=config.decoder_attention_heads,
dropout=config.attention_dropout,
name="self_attn",
is_decoder=True,
)
self.dropout = tf.keras.layers.Dropout(config.dropout)
self.activation_fn = get_tf_activation(config.activation_function)
self.activation_dropout = tf.keras.layers.Dropout(config.activation_dropout)
self.self_attn_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm")
self.encoder_attn = TFPegasusAttention(
self.embed_dim,
config.decoder_attention_heads,
dropout=config.attention_dropout,
name="encoder_attn",
is_decoder=True,
)
self.encoder_attn_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="encoder_attn_layer_norm")
self.fc1 = tf.keras.layers.Dense(config.decoder_ffn_dim, name="fc1")
self.fc2 = tf.keras.layers.Dense(self.embed_dim, name="fc2")
self.final_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm")
def call(
self,
hidden_states: tf.Tensor,
attention_mask: Optional[tf.Tensor] = None,
encoder_hidden_states: Optional[tf.Tensor] = None,
encoder_attention_mask: Optional[tf.Tensor] = None,
layer_head_mask: Optional[tf.Tensor] = None,
cross_attn_layer_head_mask: Optional[tf.Tensor] = None,
past_key_value: Optional[Tuple[tf.Tensor]] = None,
training: Optional[bool] = False,
) -> Tuple[tf.Tensor, tf.Tensor, Tuple[Tuple[tf.Tensor]]]:
"""
Args:
hidden_states (`tf.Tensor`): input to the layer of shape *(seq_len, batch, embed_dim)*
attention_mask (`tf.Tensor`): attention mask of size
*(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.
encoder_hidden_states (`tf.Tensor`):
cross attention input to the layer of shape *(seq_len, batch, embed_dim)*
encoder_attention_mask (`tf.Tensor`): encoder attention mask of size
*(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.
layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size
*(decoder_attention_heads,)*
cross_attn_layer_head_mask (`tf.Tensor`): mask for heads of the cross-attention module.
*(decoder_attention_heads,)*
past_key_value (`Tuple(tf.Tensor)`): cached past key and value projection states
"""
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
# Self Attention
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
# add present self-attn cache to positions 1,2 of present_key_value tuple
hidden_states, self_attn_weights, present_key_value = self.self_attn(
hidden_states=hidden_states,
past_key_value=self_attn_past_key_value,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = residual + hidden_states
# Cross-Attention Block
cross_attn_present_key_value = None
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
hidden_states = self.encoder_attn_layer_norm(hidden_states)
# cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
hidden_states=hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
layer_head_mask=cross_attn_layer_head_mask,
past_key_value=cross_attn_past_key_value,
)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = residual + hidden_states
# add cross-attn to positions 3,4 of present_key_value tuple
present_key_value = present_key_value + cross_attn_present_key_value
# Fully Connected
residual = hidden_states
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = self.activation_dropout(hidden_states, training=training)
hidden_states = self.fc2(hidden_states)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = residual + hidden_states
return (
hidden_states,
self_attn_weights,
cross_attn_weights,
present_key_value,
)
class TFPegasusPreTrainedModel(TFPreTrainedModel):
config_class = PegasusConfig
base_model_prefix = "model"
@property
def dummy_inputs(self):
pad_token = 1
input_ids = tf.convert_to_tensor(DUMMY_INPUTS, dtype=tf.int32)
decoder_input_ids = tf.convert_to_tensor(DUMMY_INPUTS, dtype=tf.int32)
dummy_inputs = {
"decoder_input_ids": decoder_input_ids,
"attention_mask": tf.cast(input_ids != pad_token, tf.int32),
"input_ids": input_ids,
}
return dummy_inputs
@tf.function(
input_signature=[
{
"input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"),
"attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"),
"decoder_input_ids": tf.TensorSpec((None, None), tf.int32, name="decoder_input_ids"),
"decoder_attention_mask": tf.TensorSpec((None, None), tf.int32, name="decoder_attention_mask"),
}
]
)
# Copied from transformers.models.bart.modeling_tf_bart.TFBartPretrainedModel.serving
def serving(self, inputs):
output = self.call(inputs)
return self.serving_output(output)
PEGASUS_START_DOCSTRING = r"""
This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
behavior.
<Tip>
TensorFlow models and layers in `transformers` accept two formats as input:
- having all inputs as keyword arguments (like PyTorch models), or
- having all inputs as a list, tuple or dict in the first positional argument.
The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
positional argument:
- a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
`model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
`model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
Note that when creating models and layers with
[subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
about any of this, as you can just pass inputs like you would to any other Python function!
</Tip>
Args:
config ([`PegasusConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
PEGASUS_GENERATION_EXAMPLE = r"""
Summarization example:
```python
>>> from transformers import AutoTokenizer, TFPegasusForConditionalGeneration
>>> model = TFPegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum")
>>> tokenizer = AutoTokenizer.from_pretrained("google/pegasus-xsum")
>>> ARTICLE_TO_SUMMARIZE = (
... "PG&E stated it scheduled the blackouts in response to forecasts for high winds "
... "amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were "
... "scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow."
... )
>>> inputs = tokenizer(ARTICLE_TO_SUMMARIZE, max_length=1024, return_tensors="tf")
>>> # Generate Summary
>>> summary_ids = model.generate(input_ids)
>>> print(tokenizer.batch_decode(summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False))
```
"""
PEGASUS_INPUTS_DOCSTRING = r"""
Args:
input_ids (`tf.Tensor` of shape `({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`tf.Tensor` of shape `({0})`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
decoder_input_ids (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
Pegasus uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If
`past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
`past_key_values`).
decoder_attention_mask (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*):
will be made by default and ignore pad tokens. It is not recommended to set this for most use cases.
decoder_position_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the
range `[0, config.max_position_embeddings - 1]`.
head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
decoder_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
encoder_outputs (`tf.FloatTensor`, *optional*):
hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
of shape `(batch_size, sequence_length, hidden_size)` is a sequence of
past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`)
contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
`decoder_input_ids` of shape `(batch_size, sequence_length)`.
use_cache (`bool`, *optional*, defaults to `True`):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
`past_key_values`). Set to `False` during training, `True` during generation output_attentions (`bool`,
*optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions`
under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the
value in the config will be used instead.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
config will be used instead.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
used instead.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
eager mode, in graph mode the value will always be set to True.
training (`bool`, *optional*, defaults to `False`):
Whether or not to use the model in training mode (some modules like dropout modules have different
behaviors between training and evaluation).
"""
@keras_serializable
class TFPegasusEncoder(tf.keras.layers.Layer):
config_class = PegasusConfig
"""
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
[`TFPegasusEncoderLayer`].
Args:
config: PegasusConfig
"""
def __init__(self, config: PegasusConfig, embed_tokens: Optional[tf.keras.layers.Embedding] = None, **kwargs):
super().__init__(**kwargs)
self.config = config
self.dropout = tf.keras.layers.Dropout(config.dropout)
self.layerdrop = config.encoder_layerdrop
self.padding_idx = config.pad_token_id
self.max_source_positions = config.max_position_embeddings
self.embed_scale = tf.math.sqrt(float(config.d_model)) if config.scale_embedding else 1.0
self.embed_tokens = embed_tokens
self.embed_positions = TFPegasusSinusoidalPositionalEmbedding(
config.max_position_embeddings,
config.d_model,
name="embed_positions",
)
self.layers = [TFPegasusEncoderLayer(config, name=f"layers.{i}") for i in range(config.encoder_layers)]
self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="layer_norm")
def get_embed_tokens(self):
return self.embed_tokens
def set_embed_tokens(self, embed_tokens):
self.embed_tokens = embed_tokens
@unpack_inputs
def call(
self,
input_ids=None,
inputs_embeds=None,
attention_mask=None,
head_mask=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
training=False,
):
"""
Args:
input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, `optional):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value
in the config will be used instead.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail. This argument can be used only in eager mode, in graph mode the value in the config
will be used instead.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used
in eager mode, in graph mode the value will always be set to True.
training (`bool`, *optional*, defaults to `False`):
Whether or not to use the model in training mode (some modules like dropout modules have different
behaviors between training and evaluation).
"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = shape_list(input_ids)
elif inputs_embeds is not None:
input_shape = shape_list(inputs_embeds)[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if inputs_embeds is None:
# if `self.embed_tokens.load_weight_prefix` is set, runs the embedding operation with the correct name
# scope, so that its weights are registered with the desired name for loading/storing. When `tf.name_scope`
# is used with a name ending in `/`, that name replaces the current name scope.
# (embeddings with tf.name_scope: self.embed_tokens.load_weight_prefix/self.embed_tokens.name/embeddings:0)
context = []
if hasattr(self.embed_tokens, "load_weight_prefix"):
context.append(tf.name_scope(self.embed_tokens.load_weight_prefix + "/"))
with ContextManagers(context):
# Note: tf.gather, on which the embedding layer is based, won't check positive out of bound
# indices on GPU, returning zeros instead. This is a dangerous silent behavior.
tf.debugging.assert_less(
input_ids,
tf.cast(self.embed_tokens.input_dim, dtype=input_ids.dtype),
message=(
"input_ids must be smaller than the embedding layer's input dimension (got"
f" {tf.math.reduce_max(input_ids)} >= {self.embed_tokens.input_dim})"
),
)
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
embed_pos = self.embed_positions(input_shape)
hidden_states = inputs_embeds + embed_pos
hidden_states = self.dropout(hidden_states, training=training)
# check attention mask and invert
if attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
attention_mask = _expand_mask(attention_mask)
else:
attention_mask = None
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
# check if head_mask has a correct number of layers specified if desired
if head_mask is not None:
tf.debugging.assert_equal(
shape_list(head_mask)[0],
len(self.layers),
message=(
f"The head_mask should be specified for {len(self.layers)} layers, but it is for"
f" {shape_list(head_mask)[0]}."
),
)
# encoder layers
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = random.uniform(0, 1)
if training and (dropout_probability < self.layerdrop): # skip the layer
continue
hidden_states, attn = encoder_layer(
hidden_states,
attention_mask,
head_mask[idx] if head_mask is not None else None,
)
if output_attentions:
all_attentions += (attn,)
hidden_states = self.layer_norm(hidden_states)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
return TFBaseModelOutput(
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
)
@keras_serializable
class TFPegasusDecoder(tf.keras.layers.Layer):
config_class = PegasusConfig
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`TFPegasusDecoderLayer`]
Args:
config: PegasusConfig
embed_tokens: output embedding
"""
def __init__(self, config: PegasusConfig, embed_tokens: Optional[tf.keras.layers.Embedding] = None, **kwargs):
super().__init__(**kwargs)
self.config = config
self.padding_idx = config.pad_token_id
self.embed_tokens = embed_tokens
self.layerdrop = config.decoder_layerdrop
self.embed_positions = TFPegasusSinusoidalPositionalEmbedding(
config.max_position_embeddings,
config.d_model,
name="embed_positions",
)
self.embed_scale = tf.math.sqrt(float(config.d_model)) if config.scale_embedding else 1.0
self.layers = [TFPegasusDecoderLayer(config, name=f"layers.{i}") for i in range(config.decoder_layers)]
self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="layer_norm")
self.dropout = tf.keras.layers.Dropout(config.dropout)
def get_embed_tokens(self):
return self.embed_tokens
def set_embed_tokens(self, embed_tokens):
self.embed_tokens = embed_tokens
@unpack_inputs
def call(
self,
input_ids=None,
inputs_embeds=None,
attention_mask=None,
position_ids=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
head_mask=None,
cross_attn_head_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
training=False,
):
r"""
Args:
input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
position_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the
range `[0, config.max_position_embeddings - 1]`.
encoder_hidden_states (`tf.Tensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (`tf.Tensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up
decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`tf.Tensor` of shape
`(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids`
you can choose to directly pass an embedded representation. This is useful if you want more control
over how to convert `input_ids` indices into associated vectors than the model's internal embedding
lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value
in the config will be used instead.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail. This argument can be used only in eager mode, in graph mode the value in the config
will be used instead.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used
in eager mode, in graph mode the value will always be set to True.
training (`bool`, *optional*, defaults to `False`):
Whether or not to use the model in training mode (some modules like dropout modules have different
behaviors between training and evaluation).
"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
elif input_ids is not None:
input_shape = shape_list(input_ids)
elif inputs_embeds is not None:
input_shape = shape_list(inputs_embeds)[:-1]
else:
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
past_key_values_length = shape_list(past_key_values[0][0])[2] if past_key_values is not None else 0
# embed positions
if position_ids is None:
positions = self.embed_positions(input_shape, past_key_values_length)
else:
positions = self.embed_positions(input_shape, position_ids=position_ids)
if inputs_embeds is None:
# if `self.embed_tokens.load_weight_prefix` is set, runs the embedding operation with the correct name
# scope, so that its weights are registered with the desired name for loading/storing. When `tf.name_scope`
# is used with a name ending in `/`, that name replaces the current name scope.
# (embeddings with tf.name_scope: self.embed_tokens.load_weight_prefix/self.embed_tokens.name/embeddings:0)
context = []
if hasattr(self.embed_tokens, "load_weight_prefix"):
context.append(tf.name_scope(self.embed_tokens.load_weight_prefix + "/"))
with ContextManagers(context):
# Note: tf.gather, on which the embedding layer is based, won't check positive out of bound
# indices on GPU, returning zeros instead. This is a dangerous silent behavior.
tf.debugging.assert_less(
input_ids,
tf.cast(self.embed_tokens.input_dim, dtype=input_ids.dtype),
message=(
"input_ids must be smaller than the embedding layer's input dimension (got"
f" {tf.math.reduce_max(input_ids)} >= {self.embed_tokens.input_dim})"
),
)
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
hidden_states = inputs_embeds
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
if input_shape[-1] > 1:
combined_attention_mask = _make_causal_mask(input_shape, past_key_values_length=past_key_values_length)
else:
combined_attention_mask = _expand_mask(
tf.ones((input_shape[0], input_shape[1] + past_key_values_length)), tgt_len=input_shape[-1]
)
if attention_mask is not None:
combined_attention_mask = combined_attention_mask + _expand_mask(attention_mask, tgt_len=input_shape[-1])
if encoder_hidden_states is not None and encoder_attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
encoder_attention_mask = _expand_mask(encoder_attention_mask, tgt_len=input_shape[-1])
hidden_states = self.dropout(hidden_states + positions, training=training)
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attns = () if (output_attentions and encoder_hidden_states is not None) else None
present_key_values = () if use_cache else None
# check if head_mask and cross_attn_head_mask have a correct number of layers specified if desired
for attn_mask_name, attn_mask in [("head_mask", head_mask), ("cross_attn_head_mask", cross_attn_head_mask)]:
if attn_mask is not None:
tf.debugging.assert_equal(
shape_list(attn_mask)[0],
len(self.layers),
message=(
f"The {attn_mask_name} should be specified for {len(self.layers)} layers, but it is for"
f" {shape_list(attn_mask)[0]}."
),
)
for idx, decoder_layer in enumerate(self.layers):
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
if output_hidden_states:
all_hidden_states += (hidden_states,)
dropout_probability = random.uniform(0, 1)
if training and (dropout_probability < self.layerdrop):
continue
past_key_value = past_key_values[idx] if past_key_values is not None else None
hidden_states, layer_self_attn, layer_cross_attn, present_key_value = decoder_layer(
hidden_states,
attention_mask=combined_attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
layer_head_mask=head_mask[idx] if head_mask is not None else None,
cross_attn_layer_head_mask=cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None,
past_key_value=past_key_value,
)
if use_cache:
present_key_values += (present_key_value,)
if output_attentions:
all_self_attns += (layer_self_attn,)
if encoder_hidden_states is not None:
all_cross_attns += (layer_cross_attn,)
hidden_states = self.layer_norm(hidden_states)
if output_hidden_states:
all_hidden_states += (hidden_states,)
if not return_dict:
return hidden_states, present_key_values, all_hidden_states, all_self_attns, all_cross_attns
else:
return TFBaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=present_key_values,
hidden_states=all_hidden_states,
attentions=all_self_attns,
cross_attentions=all_cross_attns,
)
@keras_serializable
class TFPegasusMainLayer(tf.keras.layers.Layer):
config_class = PegasusConfig
def __init__(self, config: PegasusConfig, **kwargs):
super().__init__(**kwargs)
self.config = config
self.shared = tf.keras.layers.Embedding(
input_dim=config.vocab_size,
output_dim=config.d_model,
embeddings_initializer=tf.keras.initializers.TruncatedNormal(stddev=self.config.init_std),
name="model.shared",
)
# Additional attribute to specify the expected name scope of the layer (for loading/storing weights)
self.shared.load_weight_prefix = "model.shared"
self.encoder = TFPegasusEncoder(config, self.shared, name="encoder")
self.decoder = TFPegasusDecoder(config, self.shared, name="decoder")
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, new_embeddings):
self.shared = new_embeddings
self.encoder.embed_tokens = self.shared
self.decoder.embed_tokens = self.shared
@unpack_inputs
def call(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
decoder_position_ids=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]] = None,
past_key_values=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs,
):
if decoder_input_ids is None and decoder_inputs_embeds is None:
use_cache = False
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
if encoder_outputs is None:
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
# If the user passed a tuple for encoder_outputs, we wrap it in a TFBaseModelOutput when return_dict=True
elif return_dict and not isinstance(encoder_outputs, TFBaseModelOutput):
encoder_outputs = TFBaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
# If the user passed a TFBaseModelOutput for encoder_outputs, we wrap it in a tuple when return_dict=False
elif not return_dict and not isinstance(encoder_outputs, tuple):
encoder_outputs = encoder_outputs.to_tuple()
decoder_outputs = self.decoder(
decoder_input_ids,
attention_mask=decoder_attention_mask,
position_ids=decoder_position_ids,
encoder_hidden_states=encoder_outputs[0],
encoder_attention_mask=attention_mask,
head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
past_key_values=past_key_values,
inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
if not return_dict:
return decoder_outputs + encoder_outputs
return TFSeq2SeqModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
@add_start_docstrings(
"The bare PEGASUS Model outputting raw hidden-states without any specific head on top.",
PEGASUS_START_DOCSTRING,
)
class TFPegasusModel(TFPegasusPreTrainedModel):
def __init__(self, config: PegasusConfig, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.model = TFPegasusMainLayer(config, name="model")
def get_encoder(self):
return self.model.encoder
def get_decoder(self):
return self.model.decoder
@unpack_inputs
@add_start_docstrings_to_model_forward(PEGASUS_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFSeq2SeqModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids: Optional[TFModelInputType] = None,
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
decoder_input_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
decoder_attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
decoder_position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
decoder_head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
cross_attn_head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]] = None,
past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
decoder_inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: bool = False,
**kwargs,
) -> Union[TFSeq2SeqModelOutput, Tuple[tf.Tensor]]:
outputs = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
decoder_position_ids=decoder_position_ids,
head_mask=head_mask,
decoder_head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
encoder_outputs=encoder_outputs,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
return outputs
# Copied from transformers.models.bart.modeling_tf_bart.TFBartModel.serving_output
def serving_output(self, output):
pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None
dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None
dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None
cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None
enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None
enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None
return TFSeq2SeqModelOutput(
last_hidden_state=output.last_hidden_state,
past_key_values=pkv,
decoder_hidden_states=dec_hs,
decoder_attentions=dec_attns,
cross_attentions=cross_attns,
encoder_last_hidden_state=output.encoder_last_hidden_state,
encoder_hidden_states=enc_hs,
encoder_attentions=enc_attns,
)
# Copied from transformers.models.bart.modeling_tf_bart.BiasLayer
class BiasLayer(tf.keras.layers.Layer):
"""
Bias as a layer. It is used for serialization purposes: `tf.keras.Model.save_weights` stores on a per-layer basis,
so all weights have to be registered in a layer.
"""
def __init__(self, shape, initializer, trainable, name, **kwargs):
super().__init__(name=name, **kwargs)
# Note: the name of this variable will NOT be scoped when serialized, i.e. it will not be in the format of
# "outer_layer/inner_layer/.../name:0". Instead, it will be "name:0". For further details, see:
# https://github.com/huggingface/transformers/pull/18833#issuecomment-1233090214
self.bias = self.add_weight(name=name, shape=shape, initializer=initializer, trainable=trainable)
def call(self, x):
return x + self.bias
@add_start_docstrings(
"The PEGASUS Model with a language modeling head. Can be used for summarization.",
PEGASUS_START_DOCSTRING,
)
class TFPegasusForConditionalGeneration(TFPegasusPreTrainedModel, TFCausalLanguageModelingLoss):
_keys_to_ignore_on_load_unexpected = [
r"model.encoder.embed_tokens.weight",
r"model.decoder.embed_tokens.weight",
]
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.model = TFPegasusMainLayer(config, name="model")
self.use_cache = config.use_cache
# final_bias_logits is registered as a buffer in pytorch, so not trainable for the sake of consistency.
self.bias_layer = BiasLayer(
name="final_logits_bias", shape=[1, config.vocab_size], initializer="zeros", trainable=False
)
def get_decoder(self):
return self.model.decoder
def get_encoder(self):
return self.model.encoder
def get_output_embeddings(self):
return self.get_input_embeddings()
def set_output_embeddings(self, value):
self.set_input_embeddings(value)
def get_bias(self):
return {"final_logits_bias": self.bias_layer.bias}
def set_bias(self, value):
# Replaces the existing layers containing bias for correct (de)serialization.
vocab_size = value["final_logits_bias"].shape[-1]
self.bias_layer = BiasLayer(
name="final_logits_bias", shape=[1, vocab_size], initializer="zeros", trainable=False
)
self.bias_layer.bias.assign(value["final_logits_bias"])
@unpack_inputs
@add_start_docstrings_to_model_forward(PEGASUS_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=TFSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
@add_end_docstrings(PEGASUS_GENERATION_EXAMPLE)
def call(
self,
input_ids: Optional[TFModelInputType] = None,
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
decoder_input_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
decoder_attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
decoder_position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
decoder_head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
cross_attn_head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
encoder_outputs: Optional[TFBaseModelOutput] = None,
past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
decoder_inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: bool = False,
) -> Union[TFSeq2SeqLMOutput, Tuple[tf.Tensor]]:
"""
labels (`tf.tensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Returns:
"""
if labels is not None:
labels = tf.where(
labels == self.config.pad_token_id,
tf.cast(tf.fill(shape_list(labels), -100), labels.dtype),
labels,
)
use_cache = False
if decoder_input_ids is None and decoder_inputs_embeds is None:
decoder_input_ids = shift_tokens_right(
labels, self.config.pad_token_id, self.config.decoder_start_token_id
)
outputs = self.model(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
encoder_outputs=encoder_outputs,
decoder_attention_mask=decoder_attention_mask,
decoder_position_ids=decoder_position_ids,
head_mask=head_mask,
decoder_head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
lm_logits = tf.matmul(outputs[0], self.model.shared.weights, transpose_b=True)
lm_logits = self.bias_layer(lm_logits)
masked_lm_loss = None if labels is None else self.hf_compute_loss(labels, lm_logits)
if not return_dict:
output = (lm_logits,) + outputs[1:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return TFSeq2SeqLMOutput(
loss=masked_lm_loss,
logits=lm_logits,
past_key_values=outputs.past_key_values, # index 1 of d outputs
decoder_hidden_states=outputs.decoder_hidden_states, # index 2 of d outputs
decoder_attentions=outputs.decoder_attentions, # index 3 of d outputs
cross_attentions=outputs.cross_attentions, # index 4 of d outputs
encoder_last_hidden_state=outputs.encoder_last_hidden_state, # index 0 of encoder outputs
encoder_hidden_states=outputs.encoder_hidden_states, # 1 of e out
encoder_attentions=outputs.encoder_attentions, # 2 of e out
)
# Copied from transformers.models.bart.modeling_tf_bart.TFBartForConditionalGeneration.serving_output
def serving_output(self, output):
pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None
dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None
dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None
cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None
enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None
enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None
return TFSeq2SeqLMOutput(
logits=output.logits,
past_key_values=pkv,
decoder_hidden_states=dec_hs,
decoder_attentions=dec_attns,
cross_attentions=cross_attns,
encoder_last_hidden_state=output.encoder_last_hidden_state,
encoder_hidden_states=enc_hs,
encoder_attentions=enc_attns,
)
# Copied from transformers.models.bart.modeling_tf_bart.TFBartForConditionalGeneration.prepare_inputs_for_generation
def prepare_inputs_for_generation(
self,
decoder_input_ids,
past_key_values=None,
attention_mask=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
use_cache=None,
encoder_outputs=None,
**kwargs,
):
# cut decoder_input_ids if past_key_values is used
if past_key_values is not None:
decoder_input_ids = decoder_input_ids[:, -1:]
if decoder_attention_mask is not None: # xla
decoder_position_ids = tf.math.cumsum(decoder_attention_mask, axis=-1, exclusive=True)[:, -1:]
elif past_key_values is not None: # no xla + past_key_values
decoder_position_ids = past_key_values[0][0].shape[2]
else: # no xla + no past_key_values
decoder_position_ids = tf.range(decoder_input_ids.shape[1])
return {
"input_ids": None, # encoder_outputs is defined. input_ids not needed
"encoder_outputs": encoder_outputs,
"past_key_values": past_key_values,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"decoder_position_ids": decoder_position_ids,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
"use_cache": use_cache, # change this to avoid caching (presumably for debugging)
}
def prepare_decoder_input_ids_from_labels(self, labels: tf.Tensor):
return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)
|
27182812/ChatGLM-LLaMA-chinese-insturct | 5,361 | src/transformers/models/pegasus/convert_pegasus_tf_to_pytorch.py | # coding=utf-8
# Copyright 2020 Google and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
PATTERNS = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["memory_attention", "encoder_attn"],
["attention", "attn"],
["/", "."],
[".LayerNorm.gamma", "_layer_norm.weight"],
[".LayerNorm.beta", "_layer_norm.bias"],
["r.layer_", "r.layers."],
["output_proj", "out_proj"],
["ffn.dense_1.", "fc2."],
["ffn.dense.", "fc1."],
["ffn_layer_norm", "final_layer_norm"],
["kernel", "weight"],
["encoder_layer_norm.", "encoder.layer_norm."],
["decoder_layer_norm.", "decoder.layer_norm."],
["embeddings.weights", "shared.weight"],
]
def rename_state_dict_key(k):
for pegasus_name, hf_name in PATTERNS:
k = k.replace(pegasus_name, hf_name)
return k
# See appendix C of paper for all hyperparams
def convert_pegasus(tf_weights: dict, cfg_updates: dict) -> PegasusForConditionalGeneration:
cfg_kwargs = DEFAULTS.copy()
cfg_kwargs.update(cfg_updates)
cfg = PegasusConfig(**cfg_kwargs)
torch_model = PegasusForConditionalGeneration(cfg)
sd = torch_model.model.state_dict()
mapping = {}
for k, v in tf_weights.items():
new_k = rename_state_dict_key(k)
if new_k not in sd:
raise ValueError(f"could not find new key {new_k} in state dict. (converted from {k})")
if "dense" in k or "proj" in new_k:
v = v.T
mapping[new_k] = torch.tensor(v, dtype=sd[new_k].dtype)
assert v.shape == sd[new_k].shape, f"{new_k}, {k}, {v.shape}, {sd[new_k].shape}"
# make sure embedding.padding_idx is respected
mapping["shared.weight"][cfg.pad_token_id] = torch.zeros_like(mapping["shared.weight"][cfg.pad_token_id + 1])
mapping["encoder.embed_tokens.weight"] = mapping["shared.weight"]
mapping["decoder.embed_tokens.weight"] = mapping["shared.weight"]
empty_biases = {k: torch.zeros_like(v) for k, v in sd.items() if k.endswith("bias") and k not in mapping}
mapping.update(**empty_biases)
missing, extra = torch_model.model.load_state_dict(mapping, strict=False)
unexpected_missing = [
k for k in missing if k not in ["encoder.embed_positions.weight", "decoder.embed_positions.weight"]
]
assert unexpected_missing == [], f"no matches found for the following torch keys {unexpected_missing}"
assert extra == [], f"no matches found for the following tf keys {extra}"
return torch_model
def get_tf_weights_as_numpy(path="./ckpt/aeslc/model.ckpt-32000") -> Dict:
init_vars = tf.train.list_variables(path)
tf_weights = {}
ignore_name = ["Adafactor", "global_step"]
for name, shape in tqdm(init_vars, desc="converting tf checkpoint to dict"):
skip_key = any([pat in name for pat in ignore_name])
if skip_key:
continue
array = tf.train.load_variable(path, name)
tf_weights[name] = array
return tf_weights
def convert_pegasus_ckpt_to_pytorch(ckpt_path: str, save_dir: str):
# save tokenizer first
dataset = Path(ckpt_path).parent.name
desired_max_model_length = task_specific_params[f"summarization_{dataset}"]["max_position_embeddings"]
tok = PegasusTokenizer.from_pretrained("sshleifer/pegasus", model_max_length=desired_max_model_length)
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(save_dir)
# convert model
tf_weights = get_tf_weights_as_numpy(ckpt_path)
cfg_updates = task_specific_params[f"summarization_{dataset}"]
if dataset == "large":
cfg_updates["task_specific_params"] = task_specific_params
torch_model = convert_pegasus(tf_weights, cfg_updates)
torch_model.save_pretrained(save_dir)
sd = torch_model.state_dict()
sd.pop("model.decoder.embed_positions.weight")
sd.pop("model.encoder.embed_positions.weight")
torch.save(sd, Path(save_dir) / "pytorch_model.bin")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
args = parser.parse_args()
if args.save_dir is None:
dataset = Path(args.tf_ckpt_path).parent.name
args.save_dir = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
|
2877025939/tabelVew-CollectionView | 6,154 | tabelVew嵌套CollectionView/tabelVew嵌套CollectionView/Masonry/Masonry/NSArray+MASAdditions.m | //
// NSArray+MASAdditions.m
//
//
// Created by Daniel Hammond on 11/26/13.
//
//
#import "NSArray+MASAdditions.h"
#import "View+MASAdditions.h"
@implementation NSArray (MASAdditions)
- (NSArray *)mas_makeConstraints:(void(^)(MASConstraintMaker *make))block {
NSMutableArray *constraints = [NSMutableArray array];
for (MAS_VIEW *view in self) {
NSAssert([view isKindOfClass:[MAS_VIEW class]], @"All objects in the array must be views");
[constraints addObjectsFromArray:[view mas_makeConstraints:block]];
}
return constraints;
}
- (NSArray *)mas_updateConstraints:(void(^)(MASConstraintMaker *make))block {
NSMutableArray *constraints = [NSMutableArray array];
for (MAS_VIEW *view in self) {
NSAssert([view isKindOfClass:[MAS_VIEW class]], @"All objects in the array must be views");
[constraints addObjectsFromArray:[view mas_updateConstraints:block]];
}
return constraints;
}
- (NSArray *)mas_remakeConstraints:(void(^)(MASConstraintMaker *make))block {
NSMutableArray *constraints = [NSMutableArray array];
for (MAS_VIEW *view in self) {
NSAssert([view isKindOfClass:[MAS_VIEW class]], @"All objects in the array must be views");
[constraints addObjectsFromArray:[view mas_remakeConstraints:block]];
}
return constraints;
}
- (void)mas_distributeViewsAlongAxis:(MASAxisType)axisType withFixedSpacing:(CGFloat)fixedSpacing leadSpacing:(CGFloat)leadSpacing tailSpacing:(CGFloat)tailSpacing {
if (self.count < 2) {
NSAssert(self.count>1,@"views to distribute need to bigger than one");
return;
}
MAS_VIEW *tempSuperView = [self mas_commonSuperviewOfViews];
if (axisType == MASAxisTypeHorizontal) {
MAS_VIEW *prev;
for (int i = 0; i < self.count; i++) {
MAS_VIEW *v = self[i];
[v mas_makeConstraints:^(MASConstraintMaker *make) {
if (prev) {
make.width.equalTo(prev);
make.left.equalTo(prev.mas_right).offset(fixedSpacing);
if (i == self.count - 1) {//last one
make.right.equalTo(tempSuperView).offset(-tailSpacing);
}
}
else {//first one
make.left.equalTo(tempSuperView).offset(leadSpacing);
}
}];
prev = v;
}
}
else {
MAS_VIEW *prev;
for (int i = 0; i < self.count; i++) {
MAS_VIEW *v = self[i];
[v mas_makeConstraints:^(MASConstraintMaker *make) {
if (prev) {
make.height.equalTo(prev);
make.top.equalTo(prev.mas_bottom).offset(fixedSpacing);
if (i == self.count - 1) {//last one
make.bottom.equalTo(tempSuperView).offset(-tailSpacing);
}
}
else {//first one
make.top.equalTo(tempSuperView).offset(leadSpacing);
}
}];
prev = v;
}
}
}
- (void)mas_distributeViewsAlongAxis:(MASAxisType)axisType withFixedItemLength:(CGFloat)fixedItemLength leadSpacing:(CGFloat)leadSpacing tailSpacing:(CGFloat)tailSpacing {
if (self.count < 2) {
NSAssert(self.count>1,@"views to distribute need to bigger than one");
return;
}
MAS_VIEW *tempSuperView = [self mas_commonSuperviewOfViews];
if (axisType == MASAxisTypeHorizontal) {
MAS_VIEW *prev;
for (int i = 0; i < self.count; i++) {
MAS_VIEW *v = self[i];
[v mas_makeConstraints:^(MASConstraintMaker *make) {
make.width.equalTo(@(fixedItemLength));
if (prev) {
if (i == self.count - 1) {//last one
make.right.equalTo(tempSuperView).offset(-tailSpacing);
}
else {
CGFloat offset = (1-(i/((CGFloat)self.count-1)))*(fixedItemLength+leadSpacing)-i*tailSpacing/(((CGFloat)self.count-1));
make.right.equalTo(tempSuperView).multipliedBy(i/((CGFloat)self.count-1)).with.offset(offset);
}
}
else {//first one
make.left.equalTo(tempSuperView).offset(leadSpacing);
}
}];
prev = v;
}
}
else {
MAS_VIEW *prev;
for (int i = 0; i < self.count; i++) {
MAS_VIEW *v = self[i];
[v mas_makeConstraints:^(MASConstraintMaker *make) {
make.height.equalTo(@(fixedItemLength));
if (prev) {
if (i == self.count - 1) {//last one
make.bottom.equalTo(tempSuperView).offset(-tailSpacing);
}
else {
CGFloat offset = (1-(i/((CGFloat)self.count-1)))*(fixedItemLength+leadSpacing)-i*tailSpacing/(((CGFloat)self.count-1));
make.bottom.equalTo(tempSuperView).multipliedBy(i/((CGFloat)self.count-1)).with.offset(offset);
}
}
else {//first one
make.top.equalTo(tempSuperView).offset(leadSpacing);
}
}];
prev = v;
}
}
}
- (MAS_VIEW *)mas_commonSuperviewOfViews
{
MAS_VIEW *commonSuperview = nil;
MAS_VIEW *previousView = nil;
for (id object in self) {
if ([object isKindOfClass:[MAS_VIEW class]]) {
MAS_VIEW *view = (MAS_VIEW *)object;
if (previousView) {
commonSuperview = [view mas_closestCommonSuperview:commonSuperview];
} else {
commonSuperview = view;
}
previousView = view;
}
}
NSAssert(commonSuperview, @"Can't constrain views that do not share a common superview. Make sure that all the views in this array have been added into the same view hierarchy.");
return commonSuperview;
}
@end
|
2877025939/tabelVew-CollectionView | 7,965 | tabelVew嵌套CollectionView/tabelVew嵌套CollectionView/Masonry/Masonry/MASConstraint.m | //
// MASConstraint.m
// Masonry
//
// Created by Nick Tymchenko on 1/20/14.
//
#import "MASConstraint.h"
#import "MASConstraint+Private.h"
#define MASMethodNotImplemented() \
@throw [NSException exceptionWithName:NSInternalInconsistencyException \
reason:[NSString stringWithFormat:@"You must override %@ in a subclass.", NSStringFromSelector(_cmd)] \
userInfo:nil]
@implementation MASConstraint
#pragma mark - Init
- (id)init {
NSAssert(![self isMemberOfClass:[MASConstraint class]], @"MASConstraint is an abstract class, you should not instantiate it directly.");
return [super init];
}
#pragma mark - NSLayoutRelation proxies
- (MASConstraint * (^)(id))equalTo {
return ^id(id attribute) {
return self.equalToWithRelation(attribute, NSLayoutRelationEqual);
};
}
- (MASConstraint * (^)(id))mas_equalTo {
return ^id(id attribute) {
return self.equalToWithRelation(attribute, NSLayoutRelationEqual);
};
}
- (MASConstraint * (^)(id))greaterThanOrEqualTo {
return ^id(id attribute) {
return self.equalToWithRelation(attribute, NSLayoutRelationGreaterThanOrEqual);
};
}
- (MASConstraint * (^)(id))mas_greaterThanOrEqualTo {
return ^id(id attribute) {
return self.equalToWithRelation(attribute, NSLayoutRelationGreaterThanOrEqual);
};
}
- (MASConstraint * (^)(id))lessThanOrEqualTo {
return ^id(id attribute) {
return self.equalToWithRelation(attribute, NSLayoutRelationLessThanOrEqual);
};
}
- (MASConstraint * (^)(id))mas_lessThanOrEqualTo {
return ^id(id attribute) {
return self.equalToWithRelation(attribute, NSLayoutRelationLessThanOrEqual);
};
}
#pragma mark - MASLayoutPriority proxies
- (MASConstraint * (^)())priorityLow {
return ^id{
self.priority(MASLayoutPriorityDefaultLow);
return self;
};
}
- (MASConstraint * (^)())priorityMedium {
return ^id{
self.priority(MASLayoutPriorityDefaultMedium);
return self;
};
}
- (MASConstraint * (^)())priorityHigh {
return ^id{
self.priority(MASLayoutPriorityDefaultHigh);
return self;
};
}
#pragma mark - NSLayoutConstraint constant proxies
- (MASConstraint * (^)(MASEdgeInsets))insets {
return ^id(MASEdgeInsets insets){
self.insets = insets;
return self;
};
}
- (MASConstraint * (^)(CGSize))sizeOffset {
return ^id(CGSize offset) {
self.sizeOffset = offset;
return self;
};
}
- (MASConstraint * (^)(CGPoint))centerOffset {
return ^id(CGPoint offset) {
self.centerOffset = offset;
return self;
};
}
- (MASConstraint * (^)(CGFloat))offset {
return ^id(CGFloat offset){
self.offset = offset;
return self;
};
}
- (MASConstraint * (^)(NSValue *value))valueOffset {
return ^id(NSValue *offset) {
NSAssert([offset isKindOfClass:NSValue.class], @"expected an NSValue offset, got: %@", offset);
[self setLayoutConstantWithValue:offset];
return self;
};
}
- (MASConstraint * (^)(id offset))mas_offset {
// Will never be called due to macro
return nil;
}
#pragma mark - NSLayoutConstraint constant setter
- (void)setLayoutConstantWithValue:(NSValue *)value {
if ([value isKindOfClass:NSNumber.class]) {
self.offset = [(NSNumber *)value doubleValue];
} else if (strcmp(value.objCType, @encode(CGPoint)) == 0) {
CGPoint point;
[value getValue:&point];
self.centerOffset = point;
} else if (strcmp(value.objCType, @encode(CGSize)) == 0) {
CGSize size;
[value getValue:&size];
self.sizeOffset = size;
} else if (strcmp(value.objCType, @encode(MASEdgeInsets)) == 0) {
MASEdgeInsets insets;
[value getValue:&insets];
self.insets = insets;
} else {
NSAssert(NO, @"attempting to set layout constant with unsupported value: %@", value);
}
}
#pragma mark - Semantic properties
- (MASConstraint *)with {
return self;
}
- (MASConstraint *)and {
return self;
}
#pragma mark - Chaining
- (MASConstraint *)addConstraintWithLayoutAttribute:(NSLayoutAttribute __unused)layoutAttribute {
MASMethodNotImplemented();
}
- (MASConstraint *)left {
return [self addConstraintWithLayoutAttribute:NSLayoutAttributeLeft];
}
- (MASConstraint *)top {
return [self addConstraintWithLayoutAttribute:NSLayoutAttributeTop];
}
- (MASConstraint *)right {
return [self addConstraintWithLayoutAttribute:NSLayoutAttributeRight];
}
- (MASConstraint *)bottom {
return [self addConstraintWithLayoutAttribute:NSLayoutAttributeBottom];
}
- (MASConstraint *)leading {
return [self addConstraintWithLayoutAttribute:NSLayoutAttributeLeading];
}
- (MASConstraint *)trailing {
return [self addConstraintWithLayoutAttribute:NSLayoutAttributeTrailing];
}
- (MASConstraint *)width {
return [self addConstraintWithLayoutAttribute:NSLayoutAttributeWidth];
}
- (MASConstraint *)height {
return [self addConstraintWithLayoutAttribute:NSLayoutAttributeHeight];
}
- (MASConstraint *)centerX {
return [self addConstraintWithLayoutAttribute:NSLayoutAttributeCenterX];
}
- (MASConstraint *)centerY {
return [self addConstraintWithLayoutAttribute:NSLayoutAttributeCenterY];
}
- (MASConstraint *)baseline {
return [self addConstraintWithLayoutAttribute:NSLayoutAttributeBaseline];
}
#if (__IPHONE_OS_VERSION_MIN_REQUIRED >= 80000) || (__TV_OS_VERSION_MIN_REQUIRED >= 9000) || (__MAC_OS_X_VERSION_MIN_REQUIRED >= 101100)
- (MASConstraint *)firstBaseline {
return [self addConstraintWithLayoutAttribute:NSLayoutAttributeFirstBaseline];
}
- (MASConstraint *)lastBaseline {
return [self addConstraintWithLayoutAttribute:NSLayoutAttributeLastBaseline];
}
#endif
#if TARGET_OS_IPHONE || TARGET_OS_TV
- (MASConstraint *)leftMargin {
return [self addConstraintWithLayoutAttribute:NSLayoutAttributeLeftMargin];
}
- (MASConstraint *)rightMargin {
return [self addConstraintWithLayoutAttribute:NSLayoutAttributeRightMargin];
}
- (MASConstraint *)topMargin {
return [self addConstraintWithLayoutAttribute:NSLayoutAttributeTopMargin];
}
- (MASConstraint *)bottomMargin {
return [self addConstraintWithLayoutAttribute:NSLayoutAttributeBottomMargin];
}
- (MASConstraint *)leadingMargin {
return [self addConstraintWithLayoutAttribute:NSLayoutAttributeLeadingMargin];
}
- (MASConstraint *)trailingMargin {
return [self addConstraintWithLayoutAttribute:NSLayoutAttributeTrailingMargin];
}
- (MASConstraint *)centerXWithinMargins {
return [self addConstraintWithLayoutAttribute:NSLayoutAttributeCenterXWithinMargins];
}
- (MASConstraint *)centerYWithinMargins {
return [self addConstraintWithLayoutAttribute:NSLayoutAttributeCenterYWithinMargins];
}
#endif
#pragma mark - Abstract
- (MASConstraint * (^)(CGFloat multiplier))multipliedBy { MASMethodNotImplemented(); }
- (MASConstraint * (^)(CGFloat divider))dividedBy { MASMethodNotImplemented(); }
- (MASConstraint * (^)(MASLayoutPriority priority))priority { MASMethodNotImplemented(); }
- (MASConstraint * (^)(id, NSLayoutRelation))equalToWithRelation { MASMethodNotImplemented(); }
- (MASConstraint * (^)(id key))key { MASMethodNotImplemented(); }
- (void)setInsets:(MASEdgeInsets __unused)insets { MASMethodNotImplemented(); }
- (void)setSizeOffset:(CGSize __unused)sizeOffset { MASMethodNotImplemented(); }
- (void)setCenterOffset:(CGPoint __unused)centerOffset { MASMethodNotImplemented(); }
- (void)setOffset:(CGFloat __unused)offset { MASMethodNotImplemented(); }
#if TARGET_OS_MAC && !(TARGET_OS_IPHONE || TARGET_OS_TV)
- (MASConstraint *)animator { MASMethodNotImplemented(); }
#endif
- (void)activate { MASMethodNotImplemented(); }
- (void)deactivate { MASMethodNotImplemented(); }
- (void)install { MASMethodNotImplemented(); }
- (void)uninstall { MASMethodNotImplemented(); }
@end
|
2877025939/tabelVew-CollectionView | 1,191 | tabelVew嵌套CollectionView/tabelVew嵌套CollectionView/Masonry/Masonry/MASViewAttribute.m | //
// MASAttribute.m
// Masonry
//
// Created by Jonas Budelmann on 21/07/13.
// Copyright (c) 2013 cloudling. All rights reserved.
//
#import "MASViewAttribute.h"
@implementation MASViewAttribute
- (id)initWithView:(MAS_VIEW *)view layoutAttribute:(NSLayoutAttribute)layoutAttribute {
self = [self initWithView:view item:view layoutAttribute:layoutAttribute];
return self;
}
- (id)initWithView:(MAS_VIEW *)view item:(id)item layoutAttribute:(NSLayoutAttribute)layoutAttribute {
self = [super init];
if (!self) return nil;
_view = view;
_item = item;
_layoutAttribute = layoutAttribute;
return self;
}
- (BOOL)isSizeAttribute {
return self.layoutAttribute == NSLayoutAttributeWidth
|| self.layoutAttribute == NSLayoutAttributeHeight;
}
- (BOOL)isEqual:(MASViewAttribute *)viewAttribute {
if ([viewAttribute isKindOfClass:self.class]) {
return self.view == viewAttribute.view
&& self.layoutAttribute == viewAttribute.layoutAttribute;
}
return [super isEqual:viewAttribute];
}
- (NSUInteger)hash {
return MAS_NSUINTROTATE([self.view hash], MAS_NSUINT_BIT / 2) ^ self.layoutAttribute;
}
@end
|
2877025939/tabelVew-CollectionView | 5,629 | tabelVew嵌套CollectionView/tabelVew嵌套CollectionView/Masonry/Masonry/MASConstraintMaker.h | //
// MASConstraintBuilder.h
// Masonry
//
// Created by Jonas Budelmann on 20/07/13.
// Copyright (c) 2013 cloudling. All rights reserved.
//
#import "MASConstraint.h"
#import "MASUtilities.h"
typedef NS_OPTIONS(NSInteger, MASAttribute) {
MASAttributeLeft = 1 << NSLayoutAttributeLeft,
MASAttributeRight = 1 << NSLayoutAttributeRight,
MASAttributeTop = 1 << NSLayoutAttributeTop,
MASAttributeBottom = 1 << NSLayoutAttributeBottom,
MASAttributeLeading = 1 << NSLayoutAttributeLeading,
MASAttributeTrailing = 1 << NSLayoutAttributeTrailing,
MASAttributeWidth = 1 << NSLayoutAttributeWidth,
MASAttributeHeight = 1 << NSLayoutAttributeHeight,
MASAttributeCenterX = 1 << NSLayoutAttributeCenterX,
MASAttributeCenterY = 1 << NSLayoutAttributeCenterY,
MASAttributeBaseline = 1 << NSLayoutAttributeBaseline,
#if (__IPHONE_OS_VERSION_MIN_REQUIRED >= 80000) || (__TV_OS_VERSION_MIN_REQUIRED >= 9000) || (__MAC_OS_X_VERSION_MIN_REQUIRED >= 101100)
MASAttributeFirstBaseline = 1 << NSLayoutAttributeFirstBaseline,
MASAttributeLastBaseline = 1 << NSLayoutAttributeLastBaseline,
#endif
#if TARGET_OS_IPHONE || TARGET_OS_TV
MASAttributeLeftMargin = 1 << NSLayoutAttributeLeftMargin,
MASAttributeRightMargin = 1 << NSLayoutAttributeRightMargin,
MASAttributeTopMargin = 1 << NSLayoutAttributeTopMargin,
MASAttributeBottomMargin = 1 << NSLayoutAttributeBottomMargin,
MASAttributeLeadingMargin = 1 << NSLayoutAttributeLeadingMargin,
MASAttributeTrailingMargin = 1 << NSLayoutAttributeTrailingMargin,
MASAttributeCenterXWithinMargins = 1 << NSLayoutAttributeCenterXWithinMargins,
MASAttributeCenterYWithinMargins = 1 << NSLayoutAttributeCenterYWithinMargins,
#endif
};
/**
* Provides factory methods for creating MASConstraints.
* Constraints are collected until they are ready to be installed
*
*/
@interface MASConstraintMaker : NSObject
/**
* The following properties return a new MASViewConstraint
* with the first item set to the makers associated view and the appropriate MASViewAttribute
*/
@property (nonatomic, strong, readonly) MASConstraint *left;
@property (nonatomic, strong, readonly) MASConstraint *top;
@property (nonatomic, strong, readonly) MASConstraint *right;
@property (nonatomic, strong, readonly) MASConstraint *bottom;
@property (nonatomic, strong, readonly) MASConstraint *leading;
@property (nonatomic, strong, readonly) MASConstraint *trailing;
@property (nonatomic, strong, readonly) MASConstraint *width;
@property (nonatomic, strong, readonly) MASConstraint *height;
@property (nonatomic, strong, readonly) MASConstraint *centerX;
@property (nonatomic, strong, readonly) MASConstraint *centerY;
@property (nonatomic, strong, readonly) MASConstraint *baseline;
#if (__IPHONE_OS_VERSION_MIN_REQUIRED >= 80000) || (__TV_OS_VERSION_MIN_REQUIRED >= 9000) || (__MAC_OS_X_VERSION_MIN_REQUIRED >= 101100)
@property (nonatomic, strong, readonly) MASConstraint *firstBaseline;
@property (nonatomic, strong, readonly) MASConstraint *lastBaseline;
#endif
#if TARGET_OS_IPHONE || TARGET_OS_TV
@property (nonatomic, strong, readonly) MASConstraint *leftMargin;
@property (nonatomic, strong, readonly) MASConstraint *rightMargin;
@property (nonatomic, strong, readonly) MASConstraint *topMargin;
@property (nonatomic, strong, readonly) MASConstraint *bottomMargin;
@property (nonatomic, strong, readonly) MASConstraint *leadingMargin;
@property (nonatomic, strong, readonly) MASConstraint *trailingMargin;
@property (nonatomic, strong, readonly) MASConstraint *centerXWithinMargins;
@property (nonatomic, strong, readonly) MASConstraint *centerYWithinMargins;
#endif
/**
* Returns a block which creates a new MASCompositeConstraint with the first item set
* to the makers associated view and children corresponding to the set bits in the
* MASAttribute parameter. Combine multiple attributes via binary-or.
*/
@property (nonatomic, strong, readonly) MASConstraint *(^attributes)(MASAttribute attrs);
/**
* Creates a MASCompositeConstraint with type MASCompositeConstraintTypeEdges
* which generates the appropriate MASViewConstraint children (top, left, bottom, right)
* with the first item set to the makers associated view
*/
@property (nonatomic, strong, readonly) MASConstraint *edges;
/**
* Creates a MASCompositeConstraint with type MASCompositeConstraintTypeSize
* which generates the appropriate MASViewConstraint children (width, height)
* with the first item set to the makers associated view
*/
@property (nonatomic, strong, readonly) MASConstraint *size;
/**
* Creates a MASCompositeConstraint with type MASCompositeConstraintTypeCenter
* which generates the appropriate MASViewConstraint children (centerX, centerY)
* with the first item set to the makers associated view
*/
@property (nonatomic, strong, readonly) MASConstraint *center;
/**
* Whether or not to check for an existing constraint instead of adding constraint
*/
@property (nonatomic, assign) BOOL updateExisting;
/**
* Whether or not to remove existing constraints prior to installing
*/
@property (nonatomic, assign) BOOL removeExisting;
/**
* initialises the maker with a default view
*
* @param view any MASConstrait are created with this view as the first item
*
* @return a new MASConstraintMaker
*/
- (id)initWithView:(MAS_VIEW *)view;
/**
* Calls install method on any MASConstraints which have been created by this maker
*
* @return an array of all the installed MASConstraints
*/
- (NSArray *)install;
- (MASConstraint * (^)(dispatch_block_t))group;
@end
|
2881099/csredis | 11,664 | test/CSRedisCore.Tests/CSRedisClientListTests.cs | using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading;
using Xunit;
namespace CSRedisCore.Tests {
public class CSRedisClientListTests : TestBase {
[Fact]
public void BLPopWithKey() {
Assert.Null(rds.BRPop(1, "TestBLPopWithKey1", "TestBLPopWithKey2"));
new Thread(() => {
Thread.CurrentThread.Join(500);
rds.RPush("TestBLPopWithKey1", "testv1");
}).Start();
Assert.Equal(("TestBLPopWithKey1", "testv1"), rds.BLPopWithKey(5, "TestBLPopWithKey1", "TestBLPopWithKey2"));
new Thread(() => {
Thread.CurrentThread.Join(500);
rds.RPush("TestBLPopWithKey2", "testv2");
}).Start();
Assert.Equal(("TestBLPopWithKey2", "testv2"), rds.BLPopWithKey(5, "TestBLPopWithKey1", "TestBLPopWithKey2"));
}
[Fact]
public void BLPop() {
Assert.Null(rds.BRPop(1, "TestBLPop1", "TestBLPop2"));
new Thread(() => {
Thread.CurrentThread.Join(500);
rds.RPush("TestBLPop1", "testv1");
}).Start();
Assert.Equal("testv1", rds.BRPop(5, "TestBLPop1", "TestBLPop2"));
new Thread(() => {
Thread.CurrentThread.Join(500);
rds.RPush("TestBLPop2", "testv2");
}).Start();
Assert.Equal("testv2", rds.BRPop(5, "TestBLPop1", "TestBLPop2"));
}
[Fact]
public void BRPopWithKey() {
Assert.Null(rds.BRPop(1, "TestBRPopWithKey1", "TestBRPopWithKey2"));
new Thread(() => {
Thread.CurrentThread.Join(500);
rds.LPush("TestBRPopWithKey1", "testv1");
}).Start();
Assert.Equal(("TestBRPopWithKey1", "testv1"), rds.BRPopWithKey(5, "TestBRPopWithKey1", "TestBRPopWithKey2"));
new Thread(() => {
Thread.CurrentThread.Join(500);
rds.LPush("TestBRPopWithKey2", "testv2");
}).Start();
Assert.Equal(("TestBRPopWithKey2", "testv2"), rds.BRPopWithKey(5, "TestBRPopWithKey1", "TestBRPopWithKey2"));
}
[Fact]
public void BRPop() {
Assert.Null(rds.BRPop(1, "TestBRPop1", "TestBRPop2"));
new Thread(() => {
Thread.CurrentThread.Join(500);
rds.LPush("TestBRPop1", "testv1");
}).Start();
Assert.Equal("testv1", rds.BRPop(5, "TestBRPop1", "TestBRPop2"));
new Thread(() => {
Thread.CurrentThread.Join(500);
rds.LPush("TestBRPop2", "testv2");
}).Start();
Assert.Equal("testv2", rds.BRPop(5, "TestBRPop1", "TestBRPop2"));
}
[Fact]
public void BRPopLPush() {
}
[Fact]
public void LIndex() {
Assert.Equal(8, rds.RPush("TestLIndex", base.Class, base.Class, base.Bytes, base.Bytes, base.String, base.String, base.Null, base.Null));
Assert.Equal(base.Class.ToString(), rds.LIndex<TestClass>("TestLIndex", 0).ToString());
Assert.Equal(base.Bytes, rds.LIndex<byte[]>("TestLIndex", 2));
Assert.Equal(base.String, rds.LIndex("TestLIndex", 4));
Assert.Equal("", rds.LIndex("TestLIndex", 6));
}
[Fact]
public void LInsertBefore() {
Assert.Equal(8, rds.RPush("TestLInsertBefore", base.Class, base.Class, base.Bytes, base.Bytes, base.String, base.String, base.Null, base.Null));
Assert.Equal(9, rds.LInsertBefore("TestLInsertBefore", base.Class, "TestLInsertBefore"));
Assert.Equal("TestLInsertBefore", rds.LIndex("TestLInsertBefore", 0));
Assert.Equal(base.Class.ToString(), rds.LIndex<TestClass>("TestLInsertBefore", 1).ToString());
}
[Fact]
public void LInsertAfter() {
Assert.Equal(8, rds.RPush("TestLInsertAfter", base.Class, base.Class, base.Bytes, base.Bytes, base.String, base.String, base.Null, base.Null));
Assert.Equal(9, rds.LInsertAfter("TestLInsertAfter", base.Class, "TestLInsertAfter"));
Assert.Equal("TestLInsertAfter", rds.LIndex("TestLInsertAfter", 1));
Assert.Equal(base.Class.ToString(), rds.LIndex<TestClass>("TestLInsertAfter", 0).ToString());
Assert.Equal(base.Class.ToString(), rds.LIndex<TestClass>("TestLInsertAfter", 2).ToString());
}
[Fact]
public void LLen() {
Assert.Equal(8, rds.RPush("TestLLen", base.Class, base.Class, base.Bytes, base.Bytes, base.String, base.String, base.Null, base.Null));
Assert.Equal(8, rds.LLen("TestLLen"));
Assert.True(rds.LTrim("TestLLen", -1, -1));
Assert.Equal(1, rds.LLen("TestLLen"));
}
[Fact]
public void LPop() {
Assert.Equal(8, rds.LPush("TestLPop", base.Class, base.Class, base.Bytes, base.Bytes, base.String, base.String, base.Null, base.Null));
Assert.Equal("", rds.LPop("TestLPop"));
Assert.Equal("", rds.LPop("TestLPop"));
Assert.Equal(base.String, rds.LPop("TestLPop"));
Assert.Equal(base.String, rds.LPop("TestLPop"));
Assert.Equal(base.Bytes, rds.LPop<byte[]>("TestLPop"));
Assert.Equal(base.Bytes, rds.LPop<byte[]>("TestLPop"));
Assert.Equal(base.Class.ToString(), rds.LPop<TestClass>("TestLPop").ToString());
Assert.Equal(base.Class.ToString(), rds.LPop<TestClass>("TestLPop").ToString());
}
[Fact]
public void LPush() {
Assert.Equal(8, rds.LPush("TestLPush", base.Class, base.Class, base.Bytes, base.Bytes, base.String, base.String, base.Null, base.Null));
Assert.Equal(2, rds.LRange("TestLPush", 0, 1).Length);
Assert.Equal("", rds.LRange("TestLPush", 0, 1)[0]);
Assert.Equal("", rds.LRange("TestLPush", 0, 1)[1]);
Assert.Equal(2, rds.LRange("TestLPush", 2, 3).Length);
Assert.Equal(base.String, rds.LRange("TestLPush", 2, 3)[0]);
Assert.Equal(base.String, rds.LRange("TestLPush", 2, 3)[1]);
Assert.Equal(2, rds.LRange("TestLPush", 4, 5).Length);
Assert.Equal(base.Bytes, rds.LRange<byte[]>("TestLPush", 4, 5)[0]);
Assert.Equal(base.Bytes, rds.LRange<byte[]>("TestLPush", 4, 5)[1]);
Assert.Equal(2, rds.LRange("TestLPush", 6, -1).Length);
Assert.Equal(base.Class.ToString(), rds.LRange<TestClass>("TestLPush", 6, -1)[0].ToString());
Assert.Equal(base.Class.ToString(), rds.LRange<TestClass>("TestLPush", 6, -1)[1].ToString());
}
[Fact]
public void LPushX() {
Assert.Equal(0, rds.LPushX("TestLPushX", base.Null));
Assert.Equal(0, rds.LPushX("TestLPushX", base.String));
Assert.Equal(0, rds.LPushX("TestLPushX", base.Bytes));
Assert.Equal(0, rds.LPushX("TestLPushX", base.Class));
Assert.Equal(1, rds.RPush("TestLPushX", base.Null));
Assert.Equal(2, rds.LPushX("TestLPushX", base.Null));
Assert.Equal(3, rds.LPushX("TestLPushX", base.String));
Assert.Equal(4, rds.LPushX("TestLPushX", base.Bytes));
Assert.Equal(5, rds.LPushX("TestLPushX", base.Class));
}
[Fact]
public void LRange() {
rds.LPush("TestLRange", base.Class, base.Class, base.Bytes, base.Bytes, base.String, base.String, base.Null, base.Null);
Assert.Equal(2, rds.LRange("TestLRange", 0, 1).Length);
Assert.Equal("", rds.LRange("TestLRange", 0, 1)[0]);
Assert.Equal("", rds.LRange("TestLRange", 0, 1)[1]);
Assert.Equal(2, rds.LRange("TestLRange", 2, 3).Length);
Assert.Equal(base.String, rds.LRange("TestLRange", 2, 3)[0]);
Assert.Equal(base.String, rds.LRange("TestLRange", 2, 3)[1]);
Assert.Equal(2, rds.LRange("TestLRange", 4, 5).Length);
Assert.Equal(base.Bytes, rds.LRange<byte[]>("TestLRange", 4, 5)[0]);
Assert.Equal(base.Bytes, rds.LRange<byte[]>("TestLRange", 4, 5)[1]);
Assert.Equal(2, rds.LRange("TestLRange", 6, -1).Length);
Assert.Equal(base.Class.ToString(), rds.LRange<TestClass>("TestLRange", 6, -1)[0].ToString());
Assert.Equal(base.Class.ToString(), rds.LRange<TestClass>("TestLRange", 6, -1)[1].ToString());
}
[Fact]
public void LRem() {
Assert.Equal(8, rds.LPush("TestLRem", base.Class, base.Class, base.Bytes, base.Bytes, base.String, base.String, base.Null, base.Null));
Assert.Equal(2, rds.LRem("TestLRem", 0, base.Class));
Assert.Equal(0, rds.LRem("TestLRem", 0, base.Class));
Assert.Equal(2, rds.LRem("TestLRem", 0, base.Bytes));
Assert.Equal(0, rds.LRem("TestLRem", 0, base.Bytes));
Assert.Equal(2, rds.LRem("TestLRem", 0, base.String));
Assert.Equal(0, rds.LRem("TestLRem", 0, base.String));
Assert.Equal(2, rds.LRem("TestLRem", 0, base.Null));
Assert.Equal(0, rds.LRem("TestLRem", 0, base.Null));
}
[Fact]
public void LSet() {
Assert.Equal(8, rds.RPush("TestLSet", base.Class, base.Class, base.Bytes, base.Bytes, base.String, base.String, base.Null, base.Null));
var now = DateTime.Now;
Assert.True(rds.LSet("TestLSet", -1, now));
Assert.Equal(now.ToString(), rds.LIndex<DateTime>("TestLSet", -1).ToString());
}
[Fact]
public void LTrim() {
Assert.Equal(8, rds.RPush("TestLTrim", base.Class, base.Class, base.Bytes, base.Bytes, base.String, base.String, base.Null, base.Null));
Assert.True(rds.LTrim("TestLTrim", -1, -1));
Assert.Equal(1, rds.LLen("TestLTrim"));
Assert.Equal("", rds.LRange("TestLTrim", 0, -1)[0]);
}
[Fact]
public void RPop() {
Assert.Equal(8, rds.RPush("TestRPop", base.Class, base.Class, base.Bytes, base.Bytes, base.String, base.String, base.Null, base.Null));
Assert.Equal("", rds.RPop("TestRPop"));
Assert.Equal("", rds.RPop("TestRPop"));
Assert.Equal(base.String, rds.RPop("TestRPop"));
Assert.Equal(base.String, rds.RPop("TestRPop"));
Assert.Equal(base.Bytes, rds.RPop<byte[]>("TestRPop"));
Assert.Equal(base.Bytes, rds.RPop<byte[]>("TestRPop"));
Assert.Equal(base.Class.ToString(), rds.RPop<TestClass>("TestRPop").ToString());
Assert.Equal(base.Class.ToString(), rds.RPop<TestClass>("TestRPop").ToString());
}
[Fact]
public void RPopLPush() {
Assert.Equal(8, rds.RPush("TestRPopLPush", base.Class, base.Class, base.Bytes, base.Bytes, base.String, base.String, base.Null, base.Null));
Assert.Equal("", rds.RPopLPush("TestRPopLPush", "TestRPopLPush"));
Assert.Equal("", rds.RPopLPush("TestRPopLPush", "TestRPopLPush"));
Assert.Equal(base.String, rds.RPopLPush("TestRPopLPush", "TestRPopLPush"));
Assert.Equal(base.String, rds.RPopLPush("TestRPopLPush", "TestRPopLPush"));
Assert.Equal(base.Bytes, rds.RPopLPush<byte[]>("TestRPopLPush", "TestRPopLPush"));
Assert.Equal(base.Bytes, rds.RPopLPush<byte[]>("TestRPopLPush", "TestRPopLPush"));
Assert.Equal(base.Class.ToString(), rds.RPopLPush<TestClass>("TestRPopLPush", "TestRPopLPush").ToString());
Assert.Equal(base.Class.ToString(), rds.RPopLPush<TestClass>("TestRPopLPush", "TestRPopLPush").ToString());
}
[Fact]
public void RPush() {
Assert.Equal(8, rds.RPush("TestRPush", base.Null, base.Null, base.String, base.String, base.Bytes, base.Bytes, base.Class, base.Class));
Assert.Equal(2, rds.LRange("TestRPush", 0, 1).Length);
Assert.Equal("", rds.LRange("TestRPush", 0, 1)[0]);
Assert.Equal("", rds.LRange("TestRPush", 0, 1)[1]);
Assert.Equal(2, rds.LRange("TestRPush", 2, 3).Length);
Assert.Equal(base.String, rds.LRange("TestRPush", 2, 3)[0]);
Assert.Equal(base.String, rds.LRange("TestRPush", 2, 3)[1]);
Assert.Equal(2, rds.LRange("TestRPush", 4, 5).Length);
Assert.Equal(base.Bytes, rds.LRange<byte[]>("TestRPush", 4, 5)[0]);
Assert.Equal(base.Bytes, rds.LRange<byte[]>("TestRPush", 4, 5)[1]);
Assert.Equal(2, rds.LRange("TestRPush", 6, -1).Length);
Assert.Equal(base.Class.ToString(), rds.LRange<TestClass>("TestRPush", 6, -1)[0].ToString());
Assert.Equal(base.Class.ToString(), rds.LRange<TestClass>("TestRPush", 6, -1)[1].ToString());
}
[Fact]
public void RPushX() {
Assert.Equal(0, rds.RPushX("TestRPushX", base.Null));
Assert.Equal(0, rds.RPushX("TestRPushX", base.String));
Assert.Equal(0, rds.RPushX("TestRPushX", base.Bytes));
Assert.Equal(0, rds.RPushX("TestRPushX", base.Class));
Assert.Equal(1, rds.RPush("TestRPushX", base.Null));
Assert.Equal(2, rds.RPushX("TestRPushX", base.Null));
Assert.Equal(3, rds.RPushX("TestRPushX", base.String));
Assert.Equal(4, rds.RPushX("TestRPushX", base.Bytes));
Assert.Equal(5, rds.RPushX("TestRPushX", base.Class));
}
}
}
|
2877025939/tabelVew-CollectionView | 5,522 | tabelVew嵌套CollectionView/tabelVew嵌套CollectionView/Masonry/Masonry/View+MASAdditions.m | //
// UIView+MASAdditions.m
// Masonry
//
// Created by Jonas Budelmann on 20/07/13.
// Copyright (c) 2013 cloudling. All rights reserved.
//
#import "View+MASAdditions.h"
#import <objc/runtime.h>
@implementation MAS_VIEW (MASAdditions)
- (NSArray *)mas_makeConstraints:(void(^)(MASConstraintMaker *))block {
self.translatesAutoresizingMaskIntoConstraints = NO;
MASConstraintMaker *constraintMaker = [[MASConstraintMaker alloc] initWithView:self];
block(constraintMaker);
return [constraintMaker install];
}
- (NSArray *)mas_updateConstraints:(void(^)(MASConstraintMaker *))block {
self.translatesAutoresizingMaskIntoConstraints = NO;
MASConstraintMaker *constraintMaker = [[MASConstraintMaker alloc] initWithView:self];
constraintMaker.updateExisting = YES;
block(constraintMaker);
return [constraintMaker install];
}
- (NSArray *)mas_remakeConstraints:(void(^)(MASConstraintMaker *make))block {
self.translatesAutoresizingMaskIntoConstraints = NO;
MASConstraintMaker *constraintMaker = [[MASConstraintMaker alloc] initWithView:self];
constraintMaker.removeExisting = YES;
block(constraintMaker);
return [constraintMaker install];
}
#pragma mark - NSLayoutAttribute properties
- (MASViewAttribute *)mas_left {
return [[MASViewAttribute alloc] initWithView:self layoutAttribute:NSLayoutAttributeLeft];
}
- (MASViewAttribute *)mas_top {
return [[MASViewAttribute alloc] initWithView:self layoutAttribute:NSLayoutAttributeTop];
}
- (MASViewAttribute *)mas_right {
return [[MASViewAttribute alloc] initWithView:self layoutAttribute:NSLayoutAttributeRight];
}
- (MASViewAttribute *)mas_bottom {
return [[MASViewAttribute alloc] initWithView:self layoutAttribute:NSLayoutAttributeBottom];
}
- (MASViewAttribute *)mas_leading {
return [[MASViewAttribute alloc] initWithView:self layoutAttribute:NSLayoutAttributeLeading];
}
- (MASViewAttribute *)mas_trailing {
return [[MASViewAttribute alloc] initWithView:self layoutAttribute:NSLayoutAttributeTrailing];
}
- (MASViewAttribute *)mas_width {
return [[MASViewAttribute alloc] initWithView:self layoutAttribute:NSLayoutAttributeWidth];
}
- (MASViewAttribute *)mas_height {
return [[MASViewAttribute alloc] initWithView:self layoutAttribute:NSLayoutAttributeHeight];
}
- (MASViewAttribute *)mas_centerX {
return [[MASViewAttribute alloc] initWithView:self layoutAttribute:NSLayoutAttributeCenterX];
}
- (MASViewAttribute *)mas_centerY {
return [[MASViewAttribute alloc] initWithView:self layoutAttribute:NSLayoutAttributeCenterY];
}
- (MASViewAttribute *)mas_baseline {
return [[MASViewAttribute alloc] initWithView:self layoutAttribute:NSLayoutAttributeBaseline];
}
- (MASViewAttribute *(^)(NSLayoutAttribute))mas_attribute
{
return ^(NSLayoutAttribute attr) {
return [[MASViewAttribute alloc] initWithView:self layoutAttribute:attr];
};
}
#if (__IPHONE_OS_VERSION_MIN_REQUIRED >= 80000) || (__TV_OS_VERSION_MIN_REQUIRED >= 9000) || (__MAC_OS_X_VERSION_MIN_REQUIRED >= 101100)
- (MASViewAttribute *)mas_firstBaseline {
return [[MASViewAttribute alloc] initWithView:self layoutAttribute:NSLayoutAttributeFirstBaseline];
}
- (MASViewAttribute *)mas_lastBaseline {
return [[MASViewAttribute alloc] initWithView:self layoutAttribute:NSLayoutAttributeLastBaseline];
}
#endif
#if TARGET_OS_IPHONE || TARGET_OS_TV
- (MASViewAttribute *)mas_leftMargin {
return [[MASViewAttribute alloc] initWithView:self layoutAttribute:NSLayoutAttributeLeftMargin];
}
- (MASViewAttribute *)mas_rightMargin {
return [[MASViewAttribute alloc] initWithView:self layoutAttribute:NSLayoutAttributeRightMargin];
}
- (MASViewAttribute *)mas_topMargin {
return [[MASViewAttribute alloc] initWithView:self layoutAttribute:NSLayoutAttributeTopMargin];
}
- (MASViewAttribute *)mas_bottomMargin {
return [[MASViewAttribute alloc] initWithView:self layoutAttribute:NSLayoutAttributeBottomMargin];
}
- (MASViewAttribute *)mas_leadingMargin {
return [[MASViewAttribute alloc] initWithView:self layoutAttribute:NSLayoutAttributeLeadingMargin];
}
- (MASViewAttribute *)mas_trailingMargin {
return [[MASViewAttribute alloc] initWithView:self layoutAttribute:NSLayoutAttributeTrailingMargin];
}
- (MASViewAttribute *)mas_centerXWithinMargins {
return [[MASViewAttribute alloc] initWithView:self layoutAttribute:NSLayoutAttributeCenterXWithinMargins];
}
- (MASViewAttribute *)mas_centerYWithinMargins {
return [[MASViewAttribute alloc] initWithView:self layoutAttribute:NSLayoutAttributeCenterYWithinMargins];
}
#endif
#pragma mark - associated properties
- (id)mas_key {
return objc_getAssociatedObject(self, @selector(mas_key));
}
- (void)setMas_key:(id)key {
objc_setAssociatedObject(self, @selector(mas_key), key, OBJC_ASSOCIATION_RETAIN_NONATOMIC);
}
#pragma mark - heirachy
- (instancetype)mas_closestCommonSuperview:(MAS_VIEW *)view {
MAS_VIEW *closestCommonSuperview = nil;
MAS_VIEW *secondViewSuperview = view;
while (!closestCommonSuperview && secondViewSuperview) {
MAS_VIEW *firstViewSuperview = self;
while (!closestCommonSuperview && firstViewSuperview) {
if (secondViewSuperview == firstViewSuperview) {
closestCommonSuperview = secondViewSuperview;
}
firstViewSuperview = firstViewSuperview.superview;
}
secondViewSuperview = secondViewSuperview.superview;
}
return closestCommonSuperview;
}
@end
|
2877025939/tabelVew-CollectionView | 6,273 | tabelVew嵌套CollectionView/tabelVew嵌套CollectionView/Masonry/Masonry/MASUtilities.h | //
// MASUtilities.h
// Masonry
//
// Created by Jonas Budelmann on 19/08/13.
// Copyright (c) 2013 Jonas Budelmann. All rights reserved.
//
#import <Foundation/Foundation.h>
#if TARGET_OS_IPHONE || TARGET_OS_TV
#import <UIKit/UIKit.h>
#define MAS_VIEW UIView
#define MAS_VIEW_CONTROLLER UIViewController
#define MASEdgeInsets UIEdgeInsets
typedef UILayoutPriority MASLayoutPriority;
static const MASLayoutPriority MASLayoutPriorityRequired = UILayoutPriorityRequired;
static const MASLayoutPriority MASLayoutPriorityDefaultHigh = UILayoutPriorityDefaultHigh;
static const MASLayoutPriority MASLayoutPriorityDefaultMedium = 500;
static const MASLayoutPriority MASLayoutPriorityDefaultLow = UILayoutPriorityDefaultLow;
static const MASLayoutPriority MASLayoutPriorityFittingSizeLevel = UILayoutPriorityFittingSizeLevel;
#elif TARGET_OS_MAC
#import <AppKit/AppKit.h>
#define MAS_VIEW NSView
#define MASEdgeInsets NSEdgeInsets
typedef NSLayoutPriority MASLayoutPriority;
static const MASLayoutPriority MASLayoutPriorityRequired = NSLayoutPriorityRequired;
static const MASLayoutPriority MASLayoutPriorityDefaultHigh = NSLayoutPriorityDefaultHigh;
static const MASLayoutPriority MASLayoutPriorityDragThatCanResizeWindow = NSLayoutPriorityDragThatCanResizeWindow;
static const MASLayoutPriority MASLayoutPriorityDefaultMedium = 501;
static const MASLayoutPriority MASLayoutPriorityWindowSizeStayPut = NSLayoutPriorityWindowSizeStayPut;
static const MASLayoutPriority MASLayoutPriorityDragThatCannotResizeWindow = NSLayoutPriorityDragThatCannotResizeWindow;
static const MASLayoutPriority MASLayoutPriorityDefaultLow = NSLayoutPriorityDefaultLow;
static const MASLayoutPriority MASLayoutPriorityFittingSizeCompression = NSLayoutPriorityFittingSizeCompression;
#endif
/**
* Allows you to attach keys to objects matching the variable names passed.
*
* view1.mas_key = @"view1", view2.mas_key = @"view2";
*
* is equivalent to:
*
* MASAttachKeys(view1, view2);
*/
#define MASAttachKeys(...) \
{ \
NSDictionary *keyPairs = NSDictionaryOfVariableBindings(__VA_ARGS__); \
for (id key in keyPairs.allKeys) { \
id obj = keyPairs[key]; \
NSAssert([obj respondsToSelector:@selector(setMas_key:)], \
@"Cannot attach mas_key to %@", obj); \
[obj setMas_key:key]; \
} \
}
/**
* Used to create object hashes
* Based on http://www.mikeash.com/pyblog/friday-qa-2010-06-18-implementing-equality-and-hashing.html
*/
#define MAS_NSUINT_BIT (CHAR_BIT * sizeof(NSUInteger))
#define MAS_NSUINTROTATE(val, howmuch) ((((NSUInteger)val) << howmuch) | (((NSUInteger)val) >> (MAS_NSUINT_BIT - howmuch)))
/**
* Given a scalar or struct value, wraps it in NSValue
* Based on EXPObjectify: https://github.com/specta/expecta
*/
static inline id _MASBoxValue(const char *type, ...) {
va_list v;
va_start(v, type);
id obj = nil;
if (strcmp(type, @encode(id)) == 0) {
id actual = va_arg(v, id);
obj = actual;
} else if (strcmp(type, @encode(CGPoint)) == 0) {
CGPoint actual = (CGPoint)va_arg(v, CGPoint);
obj = [NSValue value:&actual withObjCType:type];
} else if (strcmp(type, @encode(CGSize)) == 0) {
CGSize actual = (CGSize)va_arg(v, CGSize);
obj = [NSValue value:&actual withObjCType:type];
} else if (strcmp(type, @encode(MASEdgeInsets)) == 0) {
MASEdgeInsets actual = (MASEdgeInsets)va_arg(v, MASEdgeInsets);
obj = [NSValue value:&actual withObjCType:type];
} else if (strcmp(type, @encode(double)) == 0) {
double actual = (double)va_arg(v, double);
obj = [NSNumber numberWithDouble:actual];
} else if (strcmp(type, @encode(float)) == 0) {
float actual = (float)va_arg(v, double);
obj = [NSNumber numberWithFloat:actual];
} else if (strcmp(type, @encode(int)) == 0) {
int actual = (int)va_arg(v, int);
obj = [NSNumber numberWithInt:actual];
} else if (strcmp(type, @encode(long)) == 0) {
long actual = (long)va_arg(v, long);
obj = [NSNumber numberWithLong:actual];
} else if (strcmp(type, @encode(long long)) == 0) {
long long actual = (long long)va_arg(v, long long);
obj = [NSNumber numberWithLongLong:actual];
} else if (strcmp(type, @encode(short)) == 0) {
short actual = (short)va_arg(v, int);
obj = [NSNumber numberWithShort:actual];
} else if (strcmp(type, @encode(char)) == 0) {
char actual = (char)va_arg(v, int);
obj = [NSNumber numberWithChar:actual];
} else if (strcmp(type, @encode(bool)) == 0) {
bool actual = (bool)va_arg(v, int);
obj = [NSNumber numberWithBool:actual];
} else if (strcmp(type, @encode(unsigned char)) == 0) {
unsigned char actual = (unsigned char)va_arg(v, unsigned int);
obj = [NSNumber numberWithUnsignedChar:actual];
} else if (strcmp(type, @encode(unsigned int)) == 0) {
unsigned int actual = (unsigned int)va_arg(v, unsigned int);
obj = [NSNumber numberWithUnsignedInt:actual];
} else if (strcmp(type, @encode(unsigned long)) == 0) {
unsigned long actual = (unsigned long)va_arg(v, unsigned long);
obj = [NSNumber numberWithUnsignedLong:actual];
} else if (strcmp(type, @encode(unsigned long long)) == 0) {
unsigned long long actual = (unsigned long long)va_arg(v, unsigned long long);
obj = [NSNumber numberWithUnsignedLongLong:actual];
} else if (strcmp(type, @encode(unsigned short)) == 0) {
unsigned short actual = (unsigned short)va_arg(v, unsigned int);
obj = [NSNumber numberWithUnsignedShort:actual];
}
va_end(v);
return obj;
}
#define MASBoxValue(value) _MASBoxValue(@encode(__typeof__((value))), (value))
|
27182812/ChatGLM-LLaMA-chinese-insturct | 81,990 | src/transformers/models/pegasus/modeling_pegasus.py | # coding=utf-8
# Copyright 2021, Google and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch PEGASUS model."""
import copy
import math
import random
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import CrossEntropyLoss
from ...activations import ACT2FN
from ...modeling_outputs import (
BaseModelOutput,
BaseModelOutputWithPastAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
Seq2SeqLMOutput,
Seq2SeqModelOutput,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from .configuration_pegasus import PegasusConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "google/pegasus-large"
_CONFIG_FOR_DOC = "PegasusConfig"
PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST = [
"google/pegasus-large",
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
]
# Copied from transformers.models.bart.modeling_bart.shift_tokens_right
def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):
"""
Shift input ids one token to the right.
"""
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()
shifted_input_ids[:, 0] = decoder_start_token_id
if pad_token_id is None:
raise ValueError("self.model.config.pad_token_id has to be defined.")
# replace possible -100 values in labels by `pad_token_id`
shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
return shifted_input_ids
# Copied from transformers.models.bart.modeling_bart._make_causal_mask
def _make_causal_mask(input_ids_shape: torch.Size, dtype: torch.dtype, past_key_values_length: int = 0):
"""
Make causal mask used for bi-directional self-attention.
"""
bsz, tgt_len = input_ids_shape
mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min))
mask_cond = torch.arange(mask.size(-1))
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
mask = mask.to(dtype)
if past_key_values_length > 0:
mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype), mask], dim=-1)
return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
# Copied from transformers.models.bart.modeling_bart._expand_mask
def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
"""
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
"""
bsz, src_len = mask.size()
tgt_len = tgt_len if tgt_len is not None else src_len
expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
inverted_mask = 1.0 - expanded_mask
return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
# Copied from transformers.models.marian.modeling_marian.MarianSinusoidalPositionalEmbedding with Marian->Pegasus
class PegasusSinusoidalPositionalEmbedding(nn.Embedding):
"""This module produces sinusoidal positional embeddings of any length."""
def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None) -> None:
super().__init__(num_positions, embedding_dim)
self.weight = self._init_weight(self.weight)
@staticmethod
def _init_weight(out: nn.Parameter) -> nn.Parameter:
"""
Identical to the XLM create_sinusoidal_embeddings except features are not interleaved. The cos features are in
the 2nd half of the vector. [dim // 2:]
"""
n_pos, dim = out.shape
position_enc = np.array(
[[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)]
)
out.requires_grad = False # set early to avoid an error in pytorch-1.8+
sentinel = dim // 2 if dim % 2 == 0 else (dim // 2) + 1
out[:, 0:sentinel] = torch.FloatTensor(np.sin(position_enc[:, 0::2]))
out[:, sentinel:] = torch.FloatTensor(np.cos(position_enc[:, 1::2]))
out.detach_()
return out
@torch.no_grad()
def forward(self, input_ids_shape: torch.Size, past_key_values_length: int = 0) -> torch.Tensor:
"""`input_ids_shape` is expected to be [bsz x seqlen]."""
bsz, seq_len = input_ids_shape[:2]
positions = torch.arange(
past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device
)
return super().forward(positions)
# Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->Pegasus
class PegasusAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
is_decoder: bool = False,
bias: bool = True,
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
if (self.head_dim * num_heads) != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
f" and `num_heads`: {num_heads})."
)
self.scaling = self.head_dim**-0.5
self.is_decoder = is_decoder
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def forward(
self,
hidden_states: torch.Tensor,
key_value_states: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
bsz, tgt_len, _ = hidden_states.size()
# get query proj
query_states = self.q_proj(hidden_states) * self.scaling
# get key, value proj
# `past_key_value[0].shape[2] == key_value_states.shape[1]`
# is checking that the `sequence_length` of the `past_key_value` is the same as
# the provided `key_value_states` to support prefix tuning
if (
is_cross_attention
and past_key_value is not None
and past_key_value[0].shape[2] == key_value_states.shape[1]
):
# reuse k,v, cross_attentions
key_states = past_key_value[0]
value_states = past_key_value[1]
elif is_cross_attention:
# cross_attentions
key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
elif past_key_value is not None:
# reuse k, v, self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
key_states = torch.cat([past_key_value[0], key_states], dim=2)
value_states = torch.cat([past_key_value[1], value_states], dim=2)
else:
# self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_states, value_states)
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
key_states = key_states.reshape(*proj_shape)
value_states = value_states.reshape(*proj_shape)
src_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
raise ValueError(
f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
f" {attn_weights.size()}"
)
if attention_mask is not None:
if attention_mask.size() != (bsz, 1, tgt_len, src_len):
raise ValueError(
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
)
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
if layer_head_mask is not None:
if layer_head_mask.size() != (self.num_heads,):
raise ValueError(
f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
f" {layer_head_mask.size()}"
)
attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if output_attentions:
# this operation is a bit awkward, but it's required to
# make sure that attn_weights keeps its gradient.
# In order to do so, attn_weights have to be reshaped
# twice and have to be reused in the following
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
else:
attn_weights_reshaped = None
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_probs, value_states)
if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is"
f" {attn_output.size()}"
)
attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
attn_output = attn_output.transpose(1, 2)
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# partitioned across GPUs when using tensor-parallelism.
attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights_reshaped, past_key_value
# Copied from transformers.models.mbart.modeling_mbart.MBartEncoderLayer with MBart->Pegasus
class PegasusEncoderLayer(nn.Module):
def __init__(self, config: PegasusConfig):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = PegasusAttention(
embed_dim=self.embed_dim,
num_heads=config.encoder_attention_heads,
dropout=config.attention_dropout,
)
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
layer_head_mask: torch.Tensor,
output_attentions: bool = False,
) -> torch.Tensor:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
`(encoder_attention_heads,)`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
hidden_states, attn_weights, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
if hidden_states.dtype == torch.float16 and (
torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()
):
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
# Copied from transformers.models.mbart.modeling_mbart.MBartDecoderLayer with MBart->Pegasus
class PegasusDecoderLayer(nn.Module):
def __init__(self, config: PegasusConfig):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = PegasusAttention(
embed_dim=self.embed_dim,
num_heads=config.decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.encoder_attn = PegasusAttention(
self.embed_dim,
config.decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
)
self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
cross_attn_layer_head_mask: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = True,
) -> torch.Tensor:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
encoder_hidden_states (`torch.FloatTensor`):
cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
`(encoder_attention_heads,)`.
cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of
size `(decoder_attention_heads,)`.
past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
# Self Attention
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
# add present self-attn cache to positions 1,2 of present_key_value tuple
hidden_states, self_attn_weights, present_key_value = self.self_attn(
hidden_states=hidden_states,
past_key_value=self_attn_past_key_value,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
# Cross-Attention Block
cross_attn_present_key_value = None
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
hidden_states = self.encoder_attn_layer_norm(hidden_states)
# cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
hidden_states=hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
layer_head_mask=cross_attn_layer_head_mask,
past_key_value=cross_attn_past_key_value,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
# add cross-attn to positions 3,4 of present_key_value tuple
present_key_value = present_key_value + cross_attn_present_key_value
# Fully Connected
residual = hidden_states
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights, cross_attn_weights)
if use_cache:
outputs += (present_key_value,)
return outputs
class PegasusPreTrainedModel(PreTrainedModel):
config_class = PegasusConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
def _init_weights(self, module):
std = self.config.init_std
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, PegasusSinusoidalPositionalEmbedding):
pass
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, (PegasusDecoder, PegasusEncoder)):
module.gradient_checkpointing = value
PEGASUS_START_DOCSTRING = r"""
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
and behavior.
Parameters:
config ([`PegasusConfig`]):
Model configuration class with all the parameters of the model. Initializing with a config file does not
load the weights associated with the model, only the configuration. Check out the
[`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
PEGASUS_GENERATION_EXAMPLE = r"""
Summarization example:
```python
>>> from transformers import AutoTokenizer, PegasusForConditionalGeneration
>>> model = PegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum")
>>> tokenizer = AutoTokenizer.from_pretrained("google/pegasus-xsum")
>>> ARTICLE_TO_SUMMARIZE = (
... "PG&E stated it scheduled the blackouts in response to forecasts for high winds "
... "amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were "
... "scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow."
... )
>>> inputs = tokenizer(ARTICLE_TO_SUMMARIZE, max_length=1024, return_tensors="pt")
>>> # Generate Summary
>>> summary_ids = model.generate(inputs["input_ids"])
>>> tokenizer.batch_decode(summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"California's largest electricity provider has turned off power to hundreds of thousands of customers."
```
"""
PEGASUS_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
Pegasus uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If
`past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
`past_key_values`).
decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0,
1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
`last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
`decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape
`(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you
can choose to directly pass an embedded representation. This is useful if you want more control over how to
convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be
input (see `past_key_values`). This is useful if you want more control over how to convert
`decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value
of `inputs_embeds`.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
`past_key_values`).
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
class PegasusEncoder(PegasusPreTrainedModel):
"""
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
[`PegasusEncoderLayer`].
Args:
config: PegasusConfig
embed_tokens (nn.Embedding): output embedding
"""
def __init__(self, config: PegasusConfig, embed_tokens: Optional[nn.Embedding] = None):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.encoder_layerdrop
embed_dim = config.d_model
self.padding_idx = config.pad_token_id
self.max_source_positions = config.max_position_embeddings
self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
if embed_tokens is not None:
self.embed_tokens = embed_tokens
else:
self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim, self.padding_idx)
self.embed_positions = PegasusSinusoidalPositionalEmbedding(
config.max_position_embeddings,
embed_dim,
self.padding_idx,
)
self.layers = nn.ModuleList([PegasusEncoderLayer(config) for _ in range(config.encoder_layers)])
self.layer_norm = nn.LayerNorm(config.d_model)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def resize_position_embeddings(self, new_num_position_embeddings: int):
"""
Resizes position embeddings matrix of the model if `new_num_position_embeddings !=
config.max_position_embeddings`.
Arguments:
new_num_position_embeddings (`int`):
The number of new position embeddings. If position embeddings are learned, increasing the size will add
newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If
position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will
add correct vectors at the end following the position encoding algorithm, whereas reducing the size
will remove vectors from the end.
"""
logger.info(f"Setting `config.max_position_embeddings={new_num_position_embeddings}`...")
self.config.max_position_embeddings = new_num_position_embeddings
self.embed_positions = PegasusSinusoidalPositionalEmbedding(
self.config.max_position_embeddings,
self.config.d_model,
self.padding_idx,
)
self.embed_positions.to(self.device)
def get_position_embeddings(self) -> nn.Embedding:
"""
Returns the position embeddings matrix
"""
return self.embed_positions
def forward(
self,
input_ids=None,
attention_mask=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# retrieve input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
embed_pos = self.embed_positions(input_shape)
hidden_states = inputs_embeds + embed_pos
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
# expand attention_mask
if attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
attention_mask = _expand_mask(attention_mask, inputs_embeds.dtype)
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
# check if head_mask has a correct number of layers specified if desired
if head_mask is not None:
if head_mask.size()[0] != len(self.layers):
raise ValueError(
f"The head_mask should be specified for {len(self.layers)} layers, but it is for"
f" {head_mask.size()[0]}."
)
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop): # skip the layer
layer_outputs = (None, None)
else:
if self.gradient_checkpointing and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(encoder_layer),
hidden_states,
attention_mask,
(head_mask[idx] if head_mask is not None else None),
)
else:
layer_outputs = encoder_layer(
hidden_states,
attention_mask,
layer_head_mask=(head_mask[idx] if head_mask is not None else None),
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
hidden_states = self.layer_norm(hidden_states)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
)
class PegasusDecoder(PegasusPreTrainedModel):
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`PegasusDecoderLayer`]
Args:
config: PegasusConfig
embed_tokens (nn.Embedding): output embedding
"""
def __init__(self, config: PegasusConfig, embed_tokens: Optional[nn.Embedding] = None):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.decoder_layerdrop
self.padding_idx = config.pad_token_id
self.max_target_positions = config.max_position_embeddings
self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
if embed_tokens is not None:
self.embed_tokens = embed_tokens
else:
self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx)
self.embed_positions = PegasusSinusoidalPositionalEmbedding(
config.max_position_embeddings,
config.d_model,
self.padding_idx,
)
self.layers = nn.ModuleList([PegasusDecoderLayer(config) for _ in range(config.decoder_layers)])
self.layer_norm = nn.LayerNorm(config.d_model)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embed_tokens
def set_input_embeddings(self, value):
self.embed_tokens = value
# Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask
def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
# create causal mask
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
combined_attention_mask = None
if input_shape[-1] > 1:
combined_attention_mask = _make_causal_mask(
input_shape, inputs_embeds.dtype, past_key_values_length=past_key_values_length
).to(inputs_embeds.device)
if attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to(
inputs_embeds.device
)
combined_attention_mask = (
expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
)
return combined_attention_mask
def resize_position_embeddings(self, new_num_position_embeddings: int):
"""
Resizes position embeddings matrix of the model if `new_num_position_embeddings !=
config.max_position_embeddings`.
Arguments:
new_num_position_embeddings (`int`):
The number of new position embeddings. If position embeddings are learned, increasing the size will add
newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If
position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will
add correct vectors at the end following the position encoding algorithm, whereas reducing the size
will remove vectors from the end.
"""
logger.info(f"Setting `config.max_position_embeddings={new_num_position_embeddings}`...")
self.config.max_position_embeddings = new_num_position_embeddings
self.embed_positions = PegasusSinusoidalPositionalEmbedding(
self.config.max_position_embeddings,
self.config.d_model,
self.padding_idx,
)
self.embed_positions.to(self.device)
def get_position_embeddings(self) -> nn.Embedding:
"""
Returns the position embeddings matrix
"""
return self.embed_positions
def forward(
self,
input_ids=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
head_mask=None,
cross_attn_head_mask=None,
past_key_values=None,
inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules in decoder to avoid performing
cross-attention on hidden heads. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of
shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing
`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more
control over how to convert `input_ids` indices into associated vectors than the model's internal
embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# retrieve input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
attention_mask = self._prepare_decoder_attention_mask(
attention_mask, input_shape, inputs_embeds, past_key_values_length
)
# expand encoder attention mask
if encoder_hidden_states is not None and encoder_attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
encoder_attention_mask = _expand_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
# embed positions
positions = self.embed_positions(input_shape, past_key_values_length)
hidden_states = inputs_embeds + positions
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
next_decoder_cache = () if use_cache else None
# check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired
for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]):
if attn_mask is not None:
if attn_mask.size()[0] != len(self.layers):
raise ValueError(
f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for"
f" {head_mask.size()[0]}."
)
for idx, decoder_layer in enumerate(self.layers):
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
if output_hidden_states:
all_hidden_states += (hidden_states,)
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop):
continue
past_key_value = past_key_values[idx] if past_key_values is not None else None
if self.gradient_checkpointing and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
# None for past_key_value
return module(*inputs, output_attentions, use_cache)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(decoder_layer),
hidden_states,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
head_mask[idx] if head_mask is not None else None,
cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None,
None,
)
else:
layer_outputs = decoder_layer(
hidden_states,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
layer_head_mask=(head_mask[idx] if head_mask is not None else None),
cross_attn_layer_head_mask=(
cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None
),
past_key_value=past_key_value,
output_attentions=output_attentions,
use_cache=use_cache,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[3 if output_attentions else 1],)
if output_attentions:
all_self_attns += (layer_outputs[1],)
if encoder_hidden_states is not None:
all_cross_attentions += (layer_outputs[2],)
hidden_states = self.layer_norm(hidden_states)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
next_cache = next_decoder_cache if use_cache else None
if not return_dict:
return tuple(
v
for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_cache,
hidden_states=all_hidden_states,
attentions=all_self_attns,
cross_attentions=all_cross_attentions,
)
@add_start_docstrings(
"The bare PEGASUS Model outputting raw hidden-states without any specific head on top.",
PEGASUS_START_DOCSTRING,
)
class PegasusModel(PegasusPreTrainedModel):
_keys_to_ignore_on_load_missing = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"]
def __init__(self, config: PegasusConfig):
super().__init__(config)
padding_idx, vocab_size = config.pad_token_id, config.vocab_size
self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx)
self.encoder = PegasusEncoder(config, self.shared)
self.decoder = PegasusDecoder(config, self.shared)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, value):
self.shared = value
self.encoder.embed_tokens = self.shared
self.decoder.embed_tokens = self.shared
def get_encoder(self):
return self.encoder
def get_decoder(self):
return self.decoder
def resize_position_embeddings(self, new_num_position_embeddings: int):
"""
Resizes position embeddings matrix of the model if `new_num_position_embeddings !=
config.max_position_embeddings`.
Arguments:
new_num_position_embeddings (`int`):
The number of new position embeddings. If position embeddings are learned, increasing the size will add
newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If
position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will
add correct vectors at the end following the position encoding algorithm, whereas reducing the size
will remove vectors from the end.
"""
self.config.max_position_embeddings = new_num_position_embeddings
self.encoder.resize_position_embeddings(new_num_position_embeddings)
self.decoder.resize_position_embeddings(new_num_position_embeddings)
def get_position_embeddings(self) -> Tuple[nn.Embedding]:
"""
Returns the position embeddings matrix
"""
return (self.encoder.get_position_embeddings(), self.decoder.get_position_embeddings())
@add_start_docstrings_to_model_forward(PEGASUS_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
decoder_input_ids: Optional[torch.Tensor] = None,
decoder_attention_mask: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
decoder_head_mask: Optional[torch.Tensor] = None,
cross_attn_head_mask: Optional[torch.Tensor] = None,
encoder_outputs: Optional[Tuple[torch.FloatTensor]] = None,
past_key_values: Optional[Tuple[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.Tensor] = None,
decoder_inputs_embeds: Optional[torch.Tensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, Seq2SeqModelOutput]:
r"""
Returns:
Example:
```python
>>> from transformers import AutoTokenizer, PegasusModel
>>> tokenizer = AutoTokenizer.from_pretrained("google/pegasus-large")
>>> model = PegasusModel.from_pretrained("google/pegasus-large")
>>> inputs = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="pt")
>>> decoder_inputs = tokenizer("Studies show that", return_tensors="pt")
>>> outputs = model(input_ids=inputs.input_ids, decoder_input_ids=decoder_inputs.input_ids)
>>> last_hidden_states = outputs.last_hidden_state
>>> list(last_hidden_states.shape)
[1, 4, 1024]
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if encoder_outputs is None:
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
# If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
# decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
encoder_hidden_states=encoder_outputs[0],
encoder_attention_mask=attention_mask,
head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
past_key_values=past_key_values,
inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if not return_dict:
return decoder_outputs + encoder_outputs
return Seq2SeqModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
@add_start_docstrings(
"The PEGASUS Model with a language modeling head. Can be used for summarization.", PEGASUS_START_DOCSTRING
)
class PegasusForConditionalGeneration(PegasusPreTrainedModel):
base_model_prefix = "model"
_keys_to_ignore_on_load_missing = [
r"final_logits_bias",
r"encoder.version",
r"decoder.version",
r"lm_head.weight",
r"embed_positions.weight",
"encoder.embed_tokens.weight",
"decoder.embed_tokens.weight",
]
def __init__(self, config: PegasusConfig):
super().__init__(config)
self.model = PegasusModel(config)
self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings)))
self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False)
# Initialize weights and apply final processing
self.post_init()
def get_encoder(self):
return self.model.get_encoder()
def get_decoder(self):
return self.model.get_decoder()
def resize_token_embeddings(self, new_num_tokens: int) -> nn.Embedding:
new_embeddings = super().resize_token_embeddings(new_num_tokens)
self._resize_final_logits_bias(new_num_tokens)
return new_embeddings
def _resize_final_logits_bias(self, new_num_tokens: int) -> None:
old_num_tokens = self.final_logits_bias.shape[-1]
if new_num_tokens <= old_num_tokens:
new_bias = self.final_logits_bias[:, :new_num_tokens]
else:
extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device)
new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1)
self.register_buffer("final_logits_bias", new_bias)
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def resize_position_embeddings(self, new_num_position_embeddings: int):
"""
Resizes position embeddings matrix of the model if `new_num_position_embeddings !=
config.max_position_embeddings`.
Arguments:
new_num_position_embeddings (`int`):
The number of new position embeddings. If position embeddings are learned, increasing the size will add
newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If
position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will
add correct vectors at the end following the position encoding algorithm, whereas reducing the size
will remove vectors from the end.
"""
self.config.max_position_embeddings = new_num_position_embeddings
self.model.encoder.resize_position_embeddings(new_num_position_embeddings)
self.model.decoder.resize_position_embeddings(new_num_position_embeddings)
def get_position_embeddings(self) -> Tuple[nn.Embedding]:
"""
Returns the position embeddings matrix
"""
return (self.model.encoder.get_position_embeddings(), self.model.decoder.get_position_embeddings())
@add_start_docstrings_to_model_forward(PEGASUS_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
@add_end_docstrings(PEGASUS_GENERATION_EXAMPLE)
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
decoder_input_ids: Optional[torch.Tensor] = None,
decoder_attention_mask: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
decoder_head_mask: Optional[torch.Tensor] = None,
cross_attn_head_mask: Optional[torch.Tensor] = None,
encoder_outputs: Optional[Tuple[torch.FloatTensor]] = None,
past_key_values: Optional[Tuple[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.Tensor] = None,
decoder_inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, Seq2SeqLMOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Returns:
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
if use_cache:
logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.")
use_cache = False
if decoder_input_ids is None and decoder_inputs_embeds is None:
decoder_input_ids = shift_tokens_right(
labels, self.config.pad_token_id, self.config.decoder_start_token_id
)
outputs = self.model(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
encoder_outputs=encoder_outputs,
decoder_attention_mask=decoder_attention_mask,
head_mask=head_mask,
decoder_head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
lm_logits = self.lm_head(outputs[0]) + self.final_logits_bias
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (lm_logits,) + outputs[1:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return Seq2SeqLMOutput(
loss=masked_lm_loss,
logits=lm_logits,
past_key_values=outputs.past_key_values,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
)
def prepare_inputs_for_generation(
self,
decoder_input_ids,
past_key_values=None,
attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
use_cache=None,
encoder_outputs=None,
**kwargs,
):
# cut decoder_input_ids if past is used
if past_key_values is not None:
decoder_input_ids = decoder_input_ids[:, -1:]
return {
"input_ids": None, # encoder_outputs is defined. input_ids not needed
"encoder_outputs": encoder_outputs,
"past_key_values": past_key_values,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
"use_cache": use_cache, # change this to avoid caching (presumably for debugging)
}
def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)
@staticmethod
def _reorder_cache(past_key_values, beam_idx):
reordered_past = ()
for layer_past in past_key_values:
# cached cross_attention states don't have to be reordered -> they are always the same
reordered_past += (
tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:],
)
return reordered_past
# Copied from transformers.models.bart.modeling_bart.BartDecoderWrapper with Bart->Pegasus
class PegasusDecoderWrapper(PegasusPreTrainedModel):
"""
This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is
used in combination with the [`EncoderDecoderModel`] framework.
"""
def __init__(self, config):
super().__init__(config)
self.decoder = PegasusDecoder(config)
def forward(self, *args, **kwargs):
return self.decoder(*args, **kwargs)
class PegasusForCausalLM(PegasusPreTrainedModel):
_keys_to_ignore_on_load_missing = ["lm_head.weight"]
def __init__(self, config):
config = copy.deepcopy(config)
config.is_decoder = True
config.is_encoder_decoder = False
super().__init__(config)
self.model = PegasusDecoderWrapper(config)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.model.decoder.embed_tokens
def set_input_embeddings(self, value):
self.model.decoder.embed_tokens = value
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def set_decoder(self, decoder):
self.model.decoder = decoder
def get_decoder(self):
return self.model.decoder
def get_position_embeddings(self) -> nn.Embedding:
"""
Returns the position embeddings matrix
"""
return self.model.decoder.get_position_embeddings()
def resize_position_embeddings(self, new_num_position_embeddings: int):
"""
Resizes position embeddings matrix of the model if `new_num_position_embeddings !=
config.max_position_embeddings`.
Arguments:
new_num_position_embeddings (`int`):
The number of new position embeddings. If position embeddings are learned, increasing the size will add
newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If
position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will
add correct vectors at the end following the position encoding algorithm, whereas reducing the size
will remove vectors from the end.
"""
self.config.max_position_embeddings = new_num_position_embeddings
self.model.decoder.resize_position_embeddings(new_num_position_embeddings)
@replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
# Copied from transformers.models.bart.modeling_bart.BartForCausalLM.forward with Bart->Pegasus, facebook/bart-base->google/pegasus-large
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.Tensor] = None,
cross_attn_head_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, CausalLMOutputWithCrossAttentions]:
r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
if the model is configured as a decoder.
encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used
in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two additional
tensors are only required when the model is used as a decoder in a Sequence to Sequence model.
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
Returns:
Example:
```python
>>> from transformers import AutoTokenizer, PegasusForCausalLM
>>> tokenizer = AutoTokenizer.from_pretrained("google/pegasus-large")
>>> model = PegasusForCausalLM.from_pretrained("google/pegasus-large", add_cross_attention=False)
>>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder."
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> logits = outputs.logits
>>> expected_shape = [1, inputs.input_ids.shape[-1], model.config.vocab_size]
>>> list(logits.shape) == expected_shape
True
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs = self.model.decoder(
input_ids=input_ids,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
head_mask=head_mask,
cross_attn_head_mask=cross_attn_head_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
logits = self.lm_head(outputs[0])
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[1:]
return (loss,) + output if loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
def prepare_inputs_for_generation(
self, input_ids, past_key_values=None, attention_mask=None, use_cache=None, **kwargs
):
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
if attention_mask is None:
attention_mask = input_ids.new_ones(input_ids.shape)
if past_key_values:
input_ids = input_ids[:, -1:]
# first step, decoder_cached_states are empty
return {
"input_ids": input_ids, # encoder_outputs is defined. input_ids not needed
"attention_mask": attention_mask,
"past_key_values": past_key_values,
"use_cache": use_cache,
}
@staticmethod
def _reorder_cache(past_key_values, beam_idx):
reordered_past = ()
for layer_past in past_key_values:
reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
return reordered_past
|
2881099/csredis | 1,107 | test/CSRedisCore.Tests/CSRedisClientSortedSetTests.cs | using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using Xunit;
namespace CSRedisCore.Tests {
public class CSRedisClientSortedSetTests : TestBase {
public void ZAdd() { }
public void ZCard() { }
public void ZCount() { }
public void ZIncrBy() { }
public void ZInterStoreMax() { }
public void ZInterStoreMin() { }
public void ZInterStoreSum() { }
public void ZRange() { }
public void ZRangeWithScores() { }
public void ZRangeByScore() { }
public void ZRangeByScoreWithScores() { }
public void ZRank() { }
public void ZRem() { }
public void ZRemRangeByRank() { }
public void ZRemRangeByScore() { }
public void ZRevRange() { }
public void ZRevRangeWithScores() { }
public void ZRevRangeByScore() { }
public void ZRevRangeByScoreWithScores() { }
public void ZRevRank() { }
public void ZScore() { }
public void ZUnionStoreMax() { }
public void ZUnionStoreMin() { }
public void ZUnionStoreSum() { }
public void ZScan() { }
public void ZRangeByLex() { }
public void ZRemRangeByLex() { }
public void ZLexCount() { }
}
}
|
2881099/csredis | 8,068 | test/CSRedisCore.Tests/CSRedisClientStreamTests.cs | using CSRedis;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using Xunit;
namespace CSRedisCore.Tests {
public class CSRedisClientStreamTests : TestBase {
/*
*
* Stream 只有 redis-server 5.0+ 才提供,测试代码请连接高版本
*
* */
[Fact]
public void Issues457()
{
var redis = rds;
var key = "key_Issues457";
var group = "group_Issues457";
var consumer = "consumer_Issues457";
var maxLen = 9999;
//删除,重新创建,并加入数据,进行测试
redis.Del(key);
redis.XGroupCreate(key, group, "0", true);
redis.XAdd(key, maxLen, "*", ("__data", "my data1"));
redis.XAdd(key, maxLen, "*", ("__data", "my data2"));
//检查pending表的长度
//!!!!!!pending表不存在时,读取会报错!!!!!!!!!
var pending0 = redis.XPending(key, group);
//消费确认前,pending 应该等于0
Assert.True(pending0.count == 0);
//读取未阅读的消息1,读取2次
var new1 = redis.XReadGroup(group, consumer, 1, 1, (key, ">"));
var new2 = redis.XReadGroup(group, consumer, 1, 1, (key, ">"));
Assert.NotNull(new1[0].data);
Assert.NotEmpty(new1[0].data);
Assert.NotNull(new2[0].data);
Assert.NotEmpty(new2[0].data);
//检查pending表的长度
var pending = redis.XPending(key, group);
//消费确认前,pending 应该等于2
Assert.True(pending.count == 2);
//消费确认
var id1 = new1[0].data[0].id;
var id2 = new2[0].data[0].id;
redis.XAck(key, group, id1);
redis.XAck(key, group, id2);
//检查pending表的长度
//!!!!!!pending表不存在时,读取会报错!!!!!!!!!
var pending2 = redis.XPending(key, group);
//消费确认后,pending 应该等于0
//Assert.True(pending2.count == 0);
}
[Fact]
public void XAck()
{
}
[Fact]
public void XAdd()
{
rds.XAdd("testXAdd01", ("f1", "v1"), ("f2", "v2"));
rds.XAdd("testXAdd02", "*", ("f1", "v1"), ("f2", "v2"));
rds.XAdd("testXAdd03", 128, "*", ("f1", "v1"), ("f2", "v2"));
rds.XAdd("testXAdd04", -128, "*", ("f1", "v1"), ("f2", "v2"));
rds.Del("testXAdd01", "testXAdd02", "testXAdd03", "testXAdd04");
rds.XAdd("testXAdd01", "42-0", ("f1", "v1"), ("f2", "v2"));
rds.XAdd("testXAdd02", 128, "43-0", ("f1", "v1"), ("f2", "v2"));
rds.XAdd("testXAdd03", -128, "44-0", ("f1", "v1"), ("f2", "v2"));
rds.Del("testXAdd01", "testXAdd02", "testXAdd03", "testXAdd04");
}
[Fact]
public void XClaim()
{
var id = rds.XAdd("testXClaim01", ("f1", "v1"), ("f2", "v2"));
//rds.XGroupCreate("testXClaimKey01", "group01", id, true);
rds.XClaim("testXClaimKey01", "group01", "consumer01", 5000, id);
rds.XClaim("testXClaimKey01", "group01", "consumer01", 5000, new string[] { id }, 3000, 3, false);
rds.XClaim("testXClaimKey01", "group01", "consumer01", 5000, new string[] { id }, 3000, 3, true);
var d11 = rds.XClaim("mystream", "group55", "Alice", 1000, "1573547631296-0");
var d22 = rds.XClaim("mystream", "group55", "Alice", 1000, new[] { "1573547631296-0" }, 1000, 3, true);
var d33 = rds.XClaim("mystream", "group55", "Alice", 1000, new[] { "1573547631296-0" }, 1000, 3, false);
}
[Fact]
public void XClaimJustId()
{
var id = rds.XAdd("testXClaimJustId01", ("f1", "v1"), ("f2", "v2"));
//rds.XGroupCreate("testXClaimJustIdKey01", "group01", id, true);
rds.XClaimJustId("testXClaimJustIdKey01", "group01", "consumer01", 5000, id);
rds.XClaimJustId("testXClaimJustIdKey01", "group01", "consumer01", 5000, new string[] { id }, 3000, 3, false);
rds.XClaimJustId("testXClaimJustIdKey01", "group01", "consumer01", 5000, new string[] { id }, 3000, 3, true);
var d11 = rds.XClaimJustId("mystream", "group55", "Alice", 1000, "1573547631296-0");
var d22 = rds.XClaimJustId("mystream", "group55", "Alice", 1000, new[] { "1573547631296-0" }, 1000, 3, true);
var d33 = rds.XClaimJustId("mystream", "group55", "Alice", 1000, new[] { "1573547631296-0" }, 1000, 3, false);
}
[Fact]
public void XDel()
{
var id = rds.XAdd("testXDel01", ("f1", "v1"), ("f2", "v2"));
rds.XDel("testtestXDelKey01", id);
}
[Fact]
public void XGroupCreate()
{
var id = rds.XAdd("testXGroupCreate01", ("f1", "v1"), ("f2", "v2"));
//rds.XGroupCreate("testXGroupCreateKey01", "group01", id, true);
//rds.XGroupCreate("testXGroupCreateKey01", "group02", "$", true);
}
[Fact]
public void XGroupSetId()
{
//rds.XGroupCreate("testXGroupSetIdKey01", "group04", "$", true);
var id = rds.XAdd("testXGroupSetId01", ("f1", "v1"), ("f2", "v2"));
rds.XGroupSetId("testXGroupSetIdKey01", "group04", id);
}
[Fact]
public void XGroupDestroy()
{
rds.XGroupCreate("testXGroupDestroyKey01", "group04", "$", true);
rds.XGroupDestroy("testXGroupDestroyKey01", "group04");
}
[Fact]
public void XGroupDelConsumer()
{
//rds.XGroupCreate("testXGroupDelConsumerKey01", "group04", "$", true);
rds.XGroupDelConsumer("testXGroupDelConsumerKey01", "group04", "consumer01");
}
[Fact]
public void XLen()
{
rds.XLen("textsss");
}
[Fact]
public void XRange()
{
rds.XRange("textXRangeKey01", "-", "+", 1);
for (var i = 0; i < 10; i++)
{
//if (i >= 5)
// Thread.Sleep(TimeSpan.FromSeconds(1));
rds.XAdd("mystream", 5, "*", ($"k{i}", $"v{i}"));
}
var ttt1 = rds.XRange("mystream", "-", "+", 1);
var ttt2 = rds.XRange("mystream", "-", "+", 2);
}
[Fact]
public void XRevRange()
{
rds.XRevRange("textXRangeKey01", "-", "+", 1);
for (var i = 0; i < 10; i++)
{
//if (i >= 5)
// Thread.Sleep(TimeSpan.FromSeconds(1));
rds.XAdd("mystream", 5, "*", ($"k{i}", $"v{i}"));
}
var ttt1 = rds.XRevRange("mystream", "-", "+", 1);
var ttt2 = rds.XRevRange("mystream", "-", "+", 2);
}
[Fact]
public void XRead()
{
var id1 = rds.XAdd("testXRead01", ("f1", "v1"), ("f2", "v2"));
var id2 = rds.XAdd("testXRead02", ("f1", "v1"), ("f2", "v2"));
rds.XRead(10, 1000, ("testKey01", id1), ("testKey02", id2));
rds.XAdd("mt2", ("aaa", "111"), ("bbb", "222"));
var ttt1 = rds.XRead(2, 1000, ("mt2", "0-0"), ("mystream", "0-0"));
}
[Fact]
public void XReadGroup()
{
var id1 = rds.XAdd("testXReadGroupKey01", ("f1", "v1"), ("f2", "v2"));
var id2 = rds.XAdd("testXReadGroupKey02", ("f1", "v1"), ("f2", "v2"));
//rds.XGroupCreate("testXReadGroupKey01", "testXReadGroup01", id1, true);
//rds.XGroupCreate("testXReadGroupKey02", "testXReadGroup01", id2, true);
rds.XReadGroup("testXReadGroup01", "consumer01", 10, 1000, ("testXReadGroupKey01", ">"), ("testXReadGroupKey02", ">"));
}
[Fact]
public void XTrim()
{
rds.XTrim("testXTrimKey01", 5);
}
[Fact]
public void XInfo()
{
var d11 = rds.XInfoStream("mystream");
var d22 = rds.XInfoGroups("mystream");
var d33 = rds.XInfoConsumers("mystream", "group55");
}
}
}
|
2877025939/tabelVew-CollectionView | 7,227 | tabelVew嵌套CollectionView/tabelVew嵌套CollectionView/Masonry/Masonry/MASConstraint.h | //
// MASConstraint.h
// Masonry
//
// Created by Jonas Budelmann on 22/07/13.
// Copyright (c) 2013 cloudling. All rights reserved.
//
#import "MASUtilities.h"
/**
* Enables Constraints to be created with chainable syntax
* Constraint can represent single NSLayoutConstraint (MASViewConstraint)
* or a group of NSLayoutConstraints (MASComposisteConstraint)
*/
@interface MASConstraint : NSObject
// Chaining Support
/**
* Modifies the NSLayoutConstraint constant,
* only affects MASConstraints in which the first item's NSLayoutAttribute is one of the following
* NSLayoutAttributeTop, NSLayoutAttributeLeft, NSLayoutAttributeBottom, NSLayoutAttributeRight
*/
- (MASConstraint * (^)(MASEdgeInsets insets))insets;
/**
* Modifies the NSLayoutConstraint constant,
* only affects MASConstraints in which the first item's NSLayoutAttribute is one of the following
* NSLayoutAttributeWidth, NSLayoutAttributeHeight
*/
- (MASConstraint * (^)(CGSize offset))sizeOffset;
/**
* Modifies the NSLayoutConstraint constant,
* only affects MASConstraints in which the first item's NSLayoutAttribute is one of the following
* NSLayoutAttributeCenterX, NSLayoutAttributeCenterY
*/
- (MASConstraint * (^)(CGPoint offset))centerOffset;
/**
* Modifies the NSLayoutConstraint constant
*/
- (MASConstraint * (^)(CGFloat offset))offset;
/**
* Modifies the NSLayoutConstraint constant based on a value type
*/
- (MASConstraint * (^)(NSValue *value))valueOffset;
/**
* Sets the NSLayoutConstraint multiplier property
*/
- (MASConstraint * (^)(CGFloat multiplier))multipliedBy;
/**
* Sets the NSLayoutConstraint multiplier to 1.0/dividedBy
*/
- (MASConstraint * (^)(CGFloat divider))dividedBy;
/**
* Sets the NSLayoutConstraint priority to a float or MASLayoutPriority
*/
- (MASConstraint * (^)(MASLayoutPriority priority))priority;
/**
* Sets the NSLayoutConstraint priority to MASLayoutPriorityLow
*/
- (MASConstraint * (^)())priorityLow;
/**
* Sets the NSLayoutConstraint priority to MASLayoutPriorityMedium
*/
- (MASConstraint * (^)())priorityMedium;
/**
* Sets the NSLayoutConstraint priority to MASLayoutPriorityHigh
*/
- (MASConstraint * (^)())priorityHigh;
/**
* Sets the constraint relation to NSLayoutRelationEqual
* returns a block which accepts one of the following:
* MASViewAttribute, UIView, NSValue, NSArray
* see readme for more details.
*/
- (MASConstraint * (^)(id attr))equalTo;
/**
* Sets the constraint relation to NSLayoutRelationGreaterThanOrEqual
* returns a block which accepts one of the following:
* MASViewAttribute, UIView, NSValue, NSArray
* see readme for more details.
*/
- (MASConstraint * (^)(id attr))greaterThanOrEqualTo;
/**
* Sets the constraint relation to NSLayoutRelationLessThanOrEqual
* returns a block which accepts one of the following:
* MASViewAttribute, UIView, NSValue, NSArray
* see readme for more details.
*/
- (MASConstraint * (^)(id attr))lessThanOrEqualTo;
/**
* Optional semantic property which has no effect but improves the readability of constraint
*/
- (MASConstraint *)with;
/**
* Optional semantic property which has no effect but improves the readability of constraint
*/
- (MASConstraint *)and;
/**
* Creates a new MASCompositeConstraint with the called attribute and reciever
*/
- (MASConstraint *)left;
- (MASConstraint *)top;
- (MASConstraint *)right;
- (MASConstraint *)bottom;
- (MASConstraint *)leading;
- (MASConstraint *)trailing;
- (MASConstraint *)width;
- (MASConstraint *)height;
- (MASConstraint *)centerX;
- (MASConstraint *)centerY;
- (MASConstraint *)baseline;
#if (__IPHONE_OS_VERSION_MIN_REQUIRED >= 80000) || (__TV_OS_VERSION_MIN_REQUIRED >= 9000) || (__MAC_OS_X_VERSION_MIN_REQUIRED >= 101100)
- (MASConstraint *)firstBaseline;
- (MASConstraint *)lastBaseline;
#endif
#if TARGET_OS_IPHONE || TARGET_OS_TV
- (MASConstraint *)leftMargin;
- (MASConstraint *)rightMargin;
- (MASConstraint *)topMargin;
- (MASConstraint *)bottomMargin;
- (MASConstraint *)leadingMargin;
- (MASConstraint *)trailingMargin;
- (MASConstraint *)centerXWithinMargins;
- (MASConstraint *)centerYWithinMargins;
#endif
/**
* Sets the constraint debug name
*/
- (MASConstraint * (^)(id key))key;
// NSLayoutConstraint constant Setters
// for use outside of mas_updateConstraints/mas_makeConstraints blocks
/**
* Modifies the NSLayoutConstraint constant,
* only affects MASConstraints in which the first item's NSLayoutAttribute is one of the following
* NSLayoutAttributeTop, NSLayoutAttributeLeft, NSLayoutAttributeBottom, NSLayoutAttributeRight
*/
- (void)setInsets:(MASEdgeInsets)insets;
/**
* Modifies the NSLayoutConstraint constant,
* only affects MASConstraints in which the first item's NSLayoutAttribute is one of the following
* NSLayoutAttributeWidth, NSLayoutAttributeHeight
*/
- (void)setSizeOffset:(CGSize)sizeOffset;
/**
* Modifies the NSLayoutConstraint constant,
* only affects MASConstraints in which the first item's NSLayoutAttribute is one of the following
* NSLayoutAttributeCenterX, NSLayoutAttributeCenterY
*/
- (void)setCenterOffset:(CGPoint)centerOffset;
/**
* Modifies the NSLayoutConstraint constant
*/
- (void)setOffset:(CGFloat)offset;
// NSLayoutConstraint Installation support
#if TARGET_OS_MAC && !(TARGET_OS_IPHONE || TARGET_OS_TV)
/**
* Whether or not to go through the animator proxy when modifying the constraint
*/
@property (nonatomic, copy, readonly) MASConstraint *animator;
#endif
/**
* Activates an NSLayoutConstraint if it's supported by an OS.
* Invokes install otherwise.
*/
- (void)activate;
/**
* Deactivates previously installed/activated NSLayoutConstraint.
*/
- (void)deactivate;
/**
* Creates a NSLayoutConstraint and adds it to the appropriate view.
*/
- (void)install;
/**
* Removes previously installed NSLayoutConstraint
*/
- (void)uninstall;
@end
/**
* Convenience auto-boxing macros for MASConstraint methods.
*
* Defining MAS_SHORTHAND_GLOBALS will turn on auto-boxing for default syntax.
* A potential drawback of this is that the unprefixed macros will appear in global scope.
*/
#define mas_equalTo(...) equalTo(MASBoxValue((__VA_ARGS__)))
#define mas_greaterThanOrEqualTo(...) greaterThanOrEqualTo(MASBoxValue((__VA_ARGS__)))
#define mas_lessThanOrEqualTo(...) lessThanOrEqualTo(MASBoxValue((__VA_ARGS__)))
#define mas_offset(...) valueOffset(MASBoxValue((__VA_ARGS__)))
#ifdef MAS_SHORTHAND_GLOBALS
#define equalTo(...) mas_equalTo(__VA_ARGS__)
#define greaterThanOrEqualTo(...) mas_greaterThanOrEqualTo(__VA_ARGS__)
#define lessThanOrEqualTo(...) mas_lessThanOrEqualTo(__VA_ARGS__)
#define offset(...) mas_offset(__VA_ARGS__)
#endif
@interface MASConstraint (AutoboxingSupport)
/**
* Aliases to corresponding relation methods (for shorthand macros)
* Also needed to aid autocompletion
*/
- (MASConstraint * (^)(id attr))mas_equalTo;
- (MASConstraint * (^)(id attr))mas_greaterThanOrEqualTo;
- (MASConstraint * (^)(id attr))mas_lessThanOrEqualTo;
/**
* A dummy method to aid autocompletion
*/
- (MASConstraint * (^)(id offset))mas_offset;
@end
|
2877025939/tabelVew-CollectionView | 1,275 | tabelVew嵌套CollectionView/tabelVew嵌套CollectionView/Masonry/Masonry/MASViewConstraint.h | //
// MASConstraint.h
// Masonry
//
// Created by Jonas Budelmann on 20/07/13.
// Copyright (c) 2013 cloudling. All rights reserved.
//
#import "MASViewAttribute.h"
#import "MASConstraint.h"
#import "MASLayoutConstraint.h"
#import "MASUtilities.h"
/**
* A single constraint.
* Contains the attributes neccessary for creating a NSLayoutConstraint and adding it to the appropriate view
*/
@interface MASViewConstraint : MASConstraint <NSCopying>
/**
* First item/view and first attribute of the NSLayoutConstraint
*/
@property (nonatomic, strong, readonly) MASViewAttribute *firstViewAttribute;
/**
* Second item/view and second attribute of the NSLayoutConstraint
*/
@property (nonatomic, strong, readonly) MASViewAttribute *secondViewAttribute;
/**
* initialises the MASViewConstraint with the first part of the equation
*
* @param firstViewAttribute view.mas_left, view.mas_width etc.
*
* @return a new view constraint
*/
- (id)initWithFirstViewAttribute:(MASViewAttribute *)firstViewAttribute;
/**
* Returns all MASViewConstraints installed with this view as a first item.
*
* @param view A view to retrieve constraints for.
*
* @return An array of MASViewConstraints.
*/
+ (NSArray *)installedConstraintsForView:(MAS_VIEW *)view;
@end
|
27182812/ChatGLM-LLaMA-chinese-insturct | 7,693 | src/transformers/models/pegasus/configuration_pegasus.py | # coding=utf-8
# Copyright 2021, Google and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PEGASUS model configuration"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)
PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"google/pegasus-large": "https://huggingface.co/google/pegasus-large/resolve/main/config.json",
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class PegasusConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`PegasusModel`]. It is used to instantiate an
PEGASUS model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the PEGASUS
[google/pegasus-large](https://huggingface.co/google/pegasus-large) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50265):
Vocabulary size of the PEGASUS model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`PegasusModel`] or [`TFPegasusModel`].
d_model (`int`, *optional*, defaults to 1024):
Dimensionality of the layers and the pooler layer.
encoder_layers (`int`, *optional*, defaults to 12):
Number of encoder layers.
decoder_layers (`int`, *optional*, defaults to 12):
Number of decoder layers.
encoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
decoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer decoder.
decoder_ffn_dim (`int`, *optional*, defaults to 4096):
Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
encoder_ffn_dim (`int`, *optional*, defaults to 4096):
Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
activation_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for activations inside the fully connected layer.
max_position_embeddings (`int`, *optional*, defaults to 1024):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
init_std (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
encoder_layerdrop (`float`, *optional*, defaults to 0.0):
The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
for more details.
decoder_layerdrop (`float`, *optional*, defaults to 0.0):
The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
for more details.
scale_embedding (`bool`, *optional*, defaults to `False`):
Scale embeddings by diving by sqrt(d_model).
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models)
forced_eos_token_id (`int`, *optional*, defaults to 1):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
Example:
```python
>>> from transformers import PegasusConfig, PegasusModel
>>> # Initializing a PEGASUS google/pegasus-large style configuration
>>> configuration = PegasusConfig()
>>> # Initializing a model (with random weights) from the google/pegasus-large style configuration
>>> model = PegasusModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "pegasus"
keys_to_ignore_at_inference = ["past_key_values"]
attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__(
self,
vocab_size=50265,
max_position_embeddings=1024,
encoder_layers=12,
encoder_ffn_dim=4096,
encoder_attention_heads=16,
decoder_layers=12,
decoder_ffn_dim=4096,
decoder_attention_heads=16,
encoder_layerdrop=0.0,
decoder_layerdrop=0.0,
use_cache=True,
is_encoder_decoder=True,
activation_function="gelu",
d_model=1024,
dropout=0.1,
attention_dropout=0.0,
activation_dropout=0.0,
init_std=0.02,
decoder_start_token_id=0,
scale_embedding=False,
pad_token_id=0,
eos_token_id=1,
forced_eos_token_id=1,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.d_model = d_model
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.activation_function = activation_function
self.init_std = init_std
self.encoder_layerdrop = encoder_layerdrop
self.decoder_layerdrop = decoder_layerdrop
self.use_cache = use_cache
self.num_hidden_layers = encoder_layers
self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
is_encoder_decoder=is_encoder_decoder,
decoder_start_token_id=decoder_start_token_id,
forced_eos_token_id=forced_eos_token_id,
**kwargs,
)
@property
def num_attention_heads(self) -> int:
return self.encoder_attention_heads
@property
def hidden_size(self) -> int:
return self.d_model
|
2881099/csredis | 3,257 | test/CSRedisCore.Tests/CSRedisClientGeoTests.cs | using CSRedis;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using Xunit;
namespace CSRedisCore.Tests {
public class CSRedisClientGeoTests_ : TestBase {
/*
*
* Geo 只有 redis-server 3.2+ 才提供,测试代码请连接高版本
*
* */
[Fact]
public void GeoAdd() {
Assert.Equal(3, rds.GeoAdd("TestGeoAdd", (10, 20, "m1"), (11, 21, "m2"), (12, 22, "m3")));
}
[Fact]
public void GeoDist() {
Assert.Equal(3, rds.GeoAdd("TestGeoDist", (10, 20, "m1"), (11, 21, "m2"), (12, 22, "m3")));
Assert.NotNull(rds.GeoDist("TestGeoDist", "m1", "m2"));
Assert.NotNull(rds.GeoDist("TestGeoDist", "m1", "m3"));
Assert.NotNull(rds.GeoDist("TestGeoDist", "m2", "m3"));
Assert.Null(rds.GeoDist("TestGeoDist", "m1", "m31"));
Assert.Null(rds.GeoDist("TestGeoDist", "m11", "m31"));
}
[Fact]
public void GeoHash() {
Assert.Equal(3, rds.GeoAdd("TestGeoHash", (10, 20, "m1"), (11, 21, "m2"), (12, 22, "m3")));
Assert.Equal(2, rds.GeoHash("TestGeoHash", new[] { "m1", "m2" }).Select(a => string.IsNullOrEmpty(a) == false).Count());
Assert.Equal(2, rds.GeoHash("TestGeoHash", new[] { "m1", "m2", "m22" }).Where(a => string.IsNullOrEmpty(a) == false).Count());
}
[Fact]
public void GeoPos() {
Assert.Equal(3, rds.GeoAdd("TestGeoPos", (10, 20, "m1"), (11, 21, "m2"), (12, 22, "m3")));
Assert.Equal(4, rds.GeoPos("TestGeoPos", new[] { "m1", "m2", "m22", "m3" }).Length);
//Assert.Equal((10, 20), rds.GeoPos("TestGeoPos", new[] { "m1", "m2", "m22", "m3" })[0]);
//Assert.Equal((11, 21), rds.GeoPos("TestGeoPos", new[] { "m1", "m2", "m22", "m3" })[1]);
Assert.Null(rds.GeoPos("TestGeoPos", new[] { "m1", "m2", "m22", "m3" })[2]);
//Assert.Equal((12, 22), rds.GeoPos("TestGeoPos", new[] { "m1", "m2", "m22", "m3" })[3]);
}
[Fact]
public void GeoRadius() {
Assert.Equal(3, rds.GeoAdd("TestGeoRadius", (10, 20, "m1"), (11, 21, "m2"), (12, 22, "m3")));
var geopos = rds.GeoPos("TestGeoRadius", new[] { "m1", "Catania", "m2", "Palermo", "Catania2" });
var georadius1 = rds.GeoRadius("TestGeoRadius", 15, 37, 200, GeoUnit.km, null, null);
var georadius2 = rds.GeoRadius<byte[]>("TestGeoRadius", 15, 37, 200, GeoUnit.km, null, null);
var georadius5 = rds.GeoRadiusWithDist("TestGeoRadius", 15, 37, 200, GeoUnit.km, null, null);
var georadius6 = rds.GeoRadiusWithDist<byte[]>("TestGeoRadius", 15, 37, 200, GeoUnit.km, null);
var georadius7 = rds.GeoRadiusWithDistAndCoord("TestGeoRadius", 15, 37, 200, GeoUnit.km, null);
var georadius8 = rds.GeoRadiusWithDistAndCoord<byte[]>("TestGeoRadius", 15, 37, 200, GeoUnit.km);
var georadius11 = rds.GeoRadiusByMember("TestGeoRadius", "m1", 200, GeoUnit.km, null, null);
var georadius12 = rds.GeoRadiusByMember<byte[]>("TestGeoRadius", "m1", 200, GeoUnit.km, null, null);
var georadius15 = rds.GeoRadiusByMemberWithDist("TestGeoRadius", "m1", 200, GeoUnit.km, null, null);
var georadius16 = rds.GeoRadiusByMemberWithDist<byte[]>("TestGeoRadius", "m1", 200, GeoUnit.km, null);
var georadius17 = rds.GeoRadiusByMemberWithDistAndCoord("TestGeoRadius", "m1", 200, GeoUnit.km, null);
var georadius18 = rds.GeoRadiusByMemberWithDistAndCoord<byte[]>("TestGeoRadius", "m1", 200, GeoUnit.km);
}
}
}
|
2881099/csredis | 12,173 | test/CSRedisCore.Tests/CSRedisClientStringTests.cs | using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using Xunit;
namespace CSRedisCore.Tests {
public class CSRedisClientStringTests : TestBase {
[Fact]
public void Append() {
var key = "TestAppend_null";
rds.Set(key, base.String);
rds.Append(key, base.Null);
Assert.Equal(rds.Get(key), base.String);
key = "TestAppend_string";
rds.Set(key, base.String);
rds.Append(key, base.String);
Assert.Equal(rds.Get(key), base.String + base.String);
var ms = new MemoryStream();
rds.Get(key, ms);
Assert.Equal(Encoding.UTF8.GetString(ms.ToArray()), base.String + base.String);
ms.Close();
key = "TestAppend_bytes";
rds.Set(key, base.Bytes);
rds.Append(key, base.Bytes);
Assert.Equal(Convert.ToBase64String(rds.Get<byte[]>(key)), Convert.ToBase64String(base.Bytes.Concat(base.Bytes).ToArray()));
}
[Fact]
async public Task AppendAsync()
{
var key = "TestAppendAsync_null";
await rds.SetAsync(key, base.String);
await rds.AppendAsync(key, base.Null);
Assert.Equal(await rds.GetAsync(key), base.String);
key = "TestAppendAsync_string";
await rds.SetAsync(key, base.String);
await rds.AppendAsync(key, base.String);
Assert.Equal(await rds.GetAsync(key), base.String + base.String);
key = "TestAppendAsync_bytes";
await rds.SetAsync(key, base.Bytes);
await rds.AppendAsync(key, base.Bytes);
Assert.Equal(Convert.ToBase64String(await rds.GetAsync<byte[]>(key)), Convert.ToBase64String(base.Bytes.Concat(base.Bytes).ToArray()));
}
[Fact]
public void BitCount() {
var key = "TestBitCount";
rds.SetBit(key, 100, true);
rds.SetBit(key, 90, true);
rds.SetBit(key, 80, true);
Assert.Equal(3, rds.BitCount(key, 0, 101));
Assert.Equal(3, rds.BitCount(key, 0, 100));
Assert.Equal(3, rds.BitCount(key, 0, 99));
Assert.Equal(3, rds.BitCount(key, 0, 60));
}
[Fact]
public void BitOp() { }
[Fact]
public void BitPos() { }
[Fact]
public void Get() {
var testss = rds.StartPipe(a =>
{
a.Get<int?>("1");
a.Get<int?>("2");
a.Get("3");
a.Get<long>("4");
});
Task.Run(async() => {
var key = "TestGet_null";
await rds.SetAsync(key, base.Null);
Assert.Equal((await rds.GetAsync(key))?.ToString() ?? "", base.Null?.ToString() ?? "");
key = "TestGet_string";
await rds.SetAsync(key, base.String);
Assert.Equal(await rds.GetAsync(key), base.String);
key = "TestGet_bytes";
await rds.SetAsync(key, base.Bytes);
Assert.Equal(await rds.GetAsync<byte[]>(key), base.Bytes);
key = "TestGet_class";
await rds.SetAsync(key, base.Class);
Assert.Equal((await rds.GetAsync<TestClass>(key))?.ToString(), base.Class.ToString());
key = "TestGet_classArray";
await rds.SetAsync(key, new[] { base.Class, base.Class });
Assert.Equal(2, rds.Get<TestClass[]>(key)?.Length);
Assert.Equal((await rds.GetAsync<TestClass[]>(key))?.First().ToString(), base.Class.ToString());
Assert.Equal((await rds.GetAsync<TestClass[]>(key))?.Last().ToString(), base.Class.ToString());
}).Wait();
}
[Fact]
public void GetBit() {
var key = "TestGetBit";
rds.SetBit(key, 100, true);
rds.SetBit(key, 90, true);
rds.SetBit(key, 80, true);
Assert.True(rds.GetBit(key, 100));
Assert.True(rds.GetBit(key, 90));
Assert.True(rds.GetBit(key, 80));
Assert.False(rds.GetBit(key, 79));
}
[Fact]
public void GetRange() {
var key = "TestGetRange_null";
rds.Set(key, base.Null);
Assert.Equal("", rds.GetRange(key, 10, 20));
key = "TestGetRange_string";
rds.Set(key, "abcdefg");
Assert.Equal("cde", rds.GetRange(key, 2, 4));
Assert.Equal("abcdefg", rds.GetRange(key, 0, -1));
key = "TestGetRange_bytes";
rds.Set(key, base.Bytes);
Assert.Equal(base.Bytes.AsSpan(2, 3).ToArray(), rds.GetRange<byte[]>(key, 2, 4));
Assert.Equal(base.Bytes, rds.GetRange<byte[]>(key, 0, -1));
}
[Fact]
public void GetSet() {
var key = "TestGetSet_null";
rds.Set(key, base.Null);
Assert.Equal("", rds.GetSet(key, base.Null));
key = "TestGetSet_string";
rds.Set(key, base.String);
Assert.Equal(base.String, rds.GetSet(key, "newvalue"));
Assert.Equal("newvalue", rds.Get(key));
key = "TestGetSet_bytes";
rds.Set(key, base.Bytes);
Assert.Equal(base.Bytes, rds.GetSet<byte[]>(key, "newvalue"));
Assert.Equal("newvalue", rds.Get(key));
}
[Fact]
public void IncrBy() {
var key = "TestIncrBy_null";
//rds.Set(key, base.Null);
Assert.Equal(1, rds.IncrBy(key, 1));
//key = "TestIncrBy_string";
//rds.Set(key, base.String);
//Assert.Throws<CSRedis.RedisException>(() => rds.IncrBy(key, 1));
//key = "TestIncrBy_bytes";
//rds.Set(key, base.Bytes);
//Assert.Throws<CSRedis.RedisException>(() => rds.IncrBy(key, 1));
key = "TestIncrBy";
Assert.Equal(1, rds.IncrBy(key, 1));
Assert.Equal(11, rds.IncrBy(key, 10));
Assert.Equal(21.5m, rds.IncrByFloat(key, 10.5m));
}
[Fact]
public void MGet() {
rds.Set("TestMGet_null1", base.Null);
rds.Set("TestMGet_string1", base.String);
rds.Set("TestMGet_bytes1", base.Bytes);
rds.Set("TestMGet_class1", base.Class);
rds.Set("TestMGet_null2", base.Null);
rds.Set("TestMGet_string2", base.String);
rds.Set("TestMGet_bytes2", base.Bytes);
rds.Set("TestMGet_class2", base.Class);
rds.Set("TestMGet_null3", base.Null);
rds.Set("TestMGet_string3", base.String);
rds.Set("TestMGet_bytes3", base.Bytes);
rds.Set("TestMGet_class3", base.Class);
Assert.Equal(4, rds.MGet("TestMGet_null1", "TestMGet_string1", "TestMGet_bytes1", "TestMGet_class1").Length);
Assert.Equal("", rds.MGet("TestMGet_null1", "TestMGet_string1", "TestMGet_bytes1", "TestMGet_class1")[0]);
Assert.Equal(base.String, rds.MGet("TestMGet_null1", "TestMGet_string1", "TestMGet_bytes1", "TestMGet_class1")[1]);
Assert.Equal(Encoding.UTF8.GetString(base.Bytes), rds.MGet("TestMGet_null1", "TestMGet_string1", "TestMGet_bytes1", "TestMGet_class1")[2]);
Assert.Equal(base.Class.ToString(), rds.MGet("TestMGet_null1", "TestMGet_string1", "TestMGet_bytes1", "TestMGet_class1")[3]);
Assert.Equal(4, rds.MGet<byte[]>("TestMGet_null1", "TestMGet_string1", "TestMGet_bytes1", "TestMGet_class1").Length);
Assert.Equal(new byte[0], rds.MGet<byte[]>("TestMGet_null1", "TestMGet_string1", "TestMGet_bytes1", "TestMGet_class1")[0]);
Assert.Equal(Encoding.UTF8.GetBytes(base.String), rds.MGet<byte[]>("TestMGet_null1", "TestMGet_string1", "TestMGet_bytes1", "TestMGet_class1")[1]);
Assert.Equal(base.Bytes, rds.MGet<byte[]>("TestMGet_null1", "TestMGet_string1", "TestMGet_bytes1", "TestMGet_class1")[2]);
Assert.Equal(Encoding.UTF8.GetBytes(base.Class.ToString()), rds.MGet<byte[]>("TestMGet_null1", "TestMGet_string1", "TestMGet_bytes1", "TestMGet_class1")[3]);
Assert.Equal(3, rds.MGet<TestClass>("TestMGet_class1", "TestMGet_class2", "TestMGet_class3").Length);
Assert.Equal(base.Class.ToString(), rds.MGet<TestClass>("TestMGet_class1", "TestMGet_class2", "TestMGet_class3")[0]?.ToString());
Assert.Equal(base.Class.ToString(), rds.MGet<TestClass>("TestMGet_class1", "TestMGet_class2", "TestMGet_class3")[1]?.ToString());
Assert.Equal(base.Class.ToString(), rds.MGet<TestClass>("TestMGet_class1", "TestMGet_class2", "TestMGet_class3")[2]?.ToString());
}
[Fact]
public void MSet() {
Assert.True(rds.MSet("TestMSet_null1", base.Null, "TestMSet_string1", base.String, "TestMSet_bytes1", base.Bytes, "TestMSet_class1", base.Class));
Assert.Equal("", rds.Get("TestMSet_null1"));
Assert.Equal(base.String, rds.Get("TestMSet_string1"));
Assert.Equal(base.Bytes, rds.Get<byte[]>("TestMSet_bytes1"));
Assert.Equal(base.Class.ToString(), rds.Get<TestClass>("TestMSet_class1").ToString());
}
[Fact]
public void MSetNx() {
Assert.True(rds.MSetNx("TestMSetNx_null", base.Null));
Assert.False(rds.MSetNx("TestMSetNx_null", base.Null));
Assert.Equal("", rds.Get("TestMSetNx_null"));
Assert.True(rds.MSetNx("TestMSetNx_string", base.String));
Assert.False(rds.MSetNx("TestMSetNx_string", base.String));
Assert.Equal(base.String, rds.Get("TestMSetNx_string"));
Assert.True(rds.MSetNx("TestMSetNx_bytes", base.Bytes));
Assert.False(rds.MSetNx("TestMSetNx_bytes", base.Bytes));
Assert.Equal(base.Bytes, rds.Get<byte[]>("TestMSetNx_bytes"));
Assert.True(rds.MSetNx("TestMSetNx_class", base.Class));
Assert.False(rds.MSetNx("TestMSetNx_class", base.Class));
Assert.Equal(base.Class.ToString(), rds.Get<TestClass>("TestMSetNx_class").ToString());
rds.Set("abctest", 1);
Assert.False(rds.MSetNx("abctest", 2, "TestMSetNx_null1", base.Null, "TestMSetNx_string1", base.String, "TestMSetNx_bytes1", base.Bytes, "TestMSetNx_class1", base.Class));
Assert.True(rds.MSetNx("TestMSetNx_null1", base.Null, "TestMSetNx_string1", base.String, "TestMSetNx_bytes1", base.Bytes, "TestMSetNx_class1", base.Class));
Assert.Equal(1, rds.Get<int>("abctest"));
Assert.Equal("", rds.Get("TestMSetNx_null1"));
Assert.Equal(base.String, rds.Get("TestMSetNx_string1"));
Assert.Equal(base.Bytes, rds.Get<byte[]>("TestMSetNx_bytes1"));
Assert.Equal(base.Class.ToString(), rds.Get<TestClass>("TestMSetNx_class1").ToString());
}
[Fact]
public void Set() {
Assert.True(rds.Set("TestSet_null", base.Null));
Assert.Equal("", rds.Get("TestSet_null"));
Assert.True(rds.Set("TestSet_string", base.String));
Assert.Equal(base.String, rds.Get("TestSet_string"));
Assert.True(rds.Set("TestSet_bytes", base.Bytes));
Assert.Equal(base.Bytes, rds.Get<byte[]>("TestSet_bytes"));
Assert.True(rds.Set("TestSet_class", base.Class));
Assert.Equal(base.Class.ToString(), rds.Get<TestClass>("TestSet_class").ToString());
}
[Fact]
public void SetBit() {
var key = "TestSetBit";
rds.SetBit(key, 100, true);
rds.SetBit(key, 90, true);
rds.SetBit(key, 80, true);
Assert.True(rds.GetBit(key, 100));
Assert.True(rds.GetBit(key, 90));
Assert.True(rds.GetBit(key, 80));
Assert.False(rds.GetBit(key, 79));
}
[Fact]
public void SetNx() {
Assert.True(rds.SetNx("TestSetNx_null", base.Null));
Assert.False(rds.SetNx("TestSetNx_null", base.Null));
Assert.Equal("", rds.Get("TestSetNx_null"));
Assert.True(rds.SetNx("TestSetNx_string", base.String));
Assert.False(rds.SetNx("TestSetNx_string", base.String));
Assert.Equal(base.String, rds.Get("TestSetNx_string"));
Assert.True(rds.SetNx("TestSetNx_bytes", base.Bytes));
Assert.False(rds.SetNx("TestSetNx_bytes", base.Bytes));
Assert.Equal(base.Bytes, rds.Get<byte[]>("TestSetNx_bytes"));
Assert.True(rds.SetNx("TestSetNx_class", base.Class));
Assert.False(rds.SetNx("TestSetNx_class", base.Class));
Assert.Equal(base.Class.ToString(), rds.Get<TestClass>("TestSetNx_class").ToString());
}
[Fact]
public void SetRange() {
var key = "TestSetRange_null";
rds.Set(key, base.Null);
rds.SetRange(key, 10, base.String);
Assert.Equal(base.String, rds.GetRange(key, 10, -1));
key = "TestSetRange_string";
rds.Set(key, "abcdefg");
rds.SetRange(key, 2, "yyy");
Assert.Equal("yyy", rds.GetRange(key, 2, 4));
key = "TestSetRange_bytes";
rds.Set(key, base.Bytes);
rds.SetRange(key, 2, base.Bytes);
Assert.Equal(base.Bytes, rds.GetRange<byte[]>(key, 2, base.Bytes.Length + 2));
}
[Fact]
public void StrLen() {
var key = "TestStrLen_null";
rds.Set(key, base.Null);
Assert.Equal(0, rds.StrLen(key));
key = "TestStrLen_string";
rds.Set(key, "abcdefg");
Assert.Equal(7, rds.StrLen(key));
key = "TestStrLen_string";
rds.Set(key, base.String);
Assert.Equal(15, rds.StrLen(key));
key = "TestStrLen_bytes";
rds.Set(key, base.Bytes);
Assert.Equal(base.Bytes.Length, rds.StrLen(key));
}
}
}
|
2881099/csredis | 8,129 | test/CSRedisCore.Tests/CSRedisClientHashTests.cs | using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using Xunit;
namespace CSRedisCore.Tests {
public class CSRedisClientHashTests : TestBase {
[Fact]
public void HDel() {
Assert.True(rds.HMSet("TestHDel", "string1", base.String, "bytes1", base.Bytes, "class1", base.Class));
Assert.Equal(3, rds.HDel("TestHDel", "string1", "bytes1", "class1"));
}
[Fact]
public void HExists() {
Assert.False(rds.HExists("TestHExists", "null1"));
Assert.True(rds.HSet("TestHExists", "null1", 1));
Assert.True(rds.HExists("TestHExists", "null1"));
Assert.Equal(1, rds.HDel("TestHExists", "null1"));
Assert.False(rds.HExists("TestHExists", "null1"));
}
[Fact]
public void HGet() {
Assert.True(rds.HMSet("TestHGet", "null1", base.Null, "string1", base.String, "bytes1", base.Bytes, "class1", base.Class, "class1array", new[] { base.Class, base.Class }));
Assert.Equal(rds.HGet("TestHGet", "null1")?.ToString() ?? "", base.Null?.ToString() ?? "");
Assert.Equal(rds.HGet("TestHGet", "string1"), base.String);
Assert.Equal(rds.HGet<byte[]>("TestHGet", "bytes1"), base.Bytes);
Assert.Equal(rds.HGet<TestClass>("TestHGet", "class1")?.ToString(), base.Class.ToString());
Assert.Equal(2, rds.HGet<TestClass[]>("TestHGet", "class1array")?.Length);
Assert.Equal(rds.HGet<TestClass[]>("TestHGet", "class1array")?.First().ToString(), base.Class.ToString());
Assert.Equal(rds.HGet<TestClass[]>("TestHGet", "class1array")?.Last().ToString(), base.Class.ToString());
}
[Fact]
public void HGetAll()
{
Assert.True(rds.HMSet("TestHGetAll", "string1", base.String, "bytes1", base.Bytes, "class1", base.Class, "class1array", new[] { base.Class, base.Class }));
Assert.Equal(4, rds.HGetAll("TestHGetAll").Count);
Assert.Equal(base.String, rds.HGetAll("TestHGetAll")["string1"]);
Assert.Equal(Encoding.UTF8.GetString(base.Bytes), rds.HGetAll("TestHGetAll")["bytes1"]);
Assert.Equal(base.Class.ToString(), rds.HGetAll("TestHGetAll")["class1"]);
Task.Run(async () =>
{
var test = await rds.HGetAllAsync("TestHGetAll");
rds.Set("TestHGetAll2", "1");
try
{
var test2 = await rds.HGetAllAsync("TestHGetAll2");
}
catch
{
}
for (var a = 0; a < 1000; a++)
test = await rds.HGetAllAsync("TestHGetAll");
}).Wait();
}
[Fact]
public void HIncrBy() {
Assert.True(rds.HMSet("TestHIncrBy", "null1", base.Null, "string1", base.String, "bytes1", base.Bytes, "class1", base.Class, "class1array", new[] { base.Class, base.Class }));
Assert.Equal(1, rds.HIncrBy("TestHIncrBy", "null1", 1));
Assert.Throws<CSRedis.RedisException>(() => rds.HIncrBy("TestHIncrBy", "string1", 1));
Assert.Throws<CSRedis.RedisException>(() => rds.HIncrBy("TestHIncrBy", "bytes1", 1));
Assert.Equal(2, rds.HIncrBy("TestHIncrBy", "null1", 1));
Assert.Equal(12, rds.HIncrBy("TestHIncrBy", "null1", 10));
}
[Fact]
public void HIncrByFloat() {
rds.Del("TestHIncrByFloat");
Assert.True(rds.HMSet("TestHIncrByFloat", "null1", base.Null, "string1", base.String, "bytes1", base.Bytes, "class1", base.Class, "class1array", new[] { base.Class, base.Class }));
Assert.Equal(0.5m, rds.HIncrByFloat("TestHIncrByFloat", "null1", 0.5m));
Assert.Throws<CSRedis.RedisException>(() => rds.HIncrByFloat("TestHIncrByFloat", "string1", 1.5m));
Assert.Throws<CSRedis.RedisException>(() => rds.HIncrByFloat("TestHIncrByFloat", "bytes1", 5));
Assert.Equal(3.8m, rds.HIncrByFloat("TestHIncrByFloat", "null1", 3.3m));
Assert.Equal(14.0m, rds.HIncrByFloat("TestHIncrByFloat", "null1", 10.2m));
}
[Fact]
public void HKeys() {
Assert.True(rds.HMSet("TestHKeys", "string1", base.String, "bytes1", base.Bytes, "class1", base.Class, "class1array", new[] { base.Class, base.Class }));
Assert.Equal(4, rds.HKeys("TestHKeys").Length);
Assert.Contains("string1", rds.HKeys("TestHKeys"));
Assert.Contains("bytes1", rds.HKeys("TestHKeys"));
Assert.Contains("class1", rds.HKeys("TestHKeys"));
Assert.Contains("class1array", rds.HKeys("TestHKeys"));
}
[Fact]
public void HLen() {
Assert.True(rds.HMSet("TestHLen", "string1", base.String, "bytes1", base.Bytes, "class1", base.Class, "class1array", new[] { base.Class, base.Class }));
Assert.Equal(4, rds.HLen("TestHLen"));
}
[Fact]
public void HMGet() {
Assert.True(rds.HMSet("TestHMGet", "string1", base.String, "bytes1", base.Bytes, "class1", base.Class, "class1array", new[] { base.Class, base.Class }));
Assert.True(rds.HMSet("TestHMGet", "string2", base.String, "bytes2", base.Bytes, "class2", base.Class, "class2array", new[] { base.Class, base.Class }));
Assert.Equal(2, rds.HMGet("TestHMGet", "string1", "string2").Length);
Assert.Contains(base.String, rds.HMGet("TestHMGet", "string1", "string2"));
Assert.Equal(2, rds.HMGet<TestClass>("TestHMGet", "class1", "class2").Length);
Assert.Contains(base.Class.ToString(), rds.HMGet<TestClass>("TestHMGet", "class1", "class2")?.Select(a => a.ToString()));
}
[Fact]
public void HMSet() {
Assert.True(rds.HMSet("TestHMSet", "string1", base.String, "bytes1", base.Bytes, "class1", base.Class, "class1array", new[] { base.Class, base.Class }));
Assert.Equal(4, rds.HMGet("TestHMSet", "string1", "bytes1", "class1", "class1array").Length);
Assert.Contains(base.String, rds.HMGet("TestHMSet", "string1", "bytes1", "class1", "class1array"));
Assert.Contains(Encoding.UTF8.GetString(base.Bytes), rds.HMGet("TestHMSet", "string1", "bytes1", "class1", "class1array"));
Assert.Contains(base.Class.ToString(), rds.HMGet("TestHMSet", "string1", "bytes1", "class1", "class1array"));
}
[Fact]
public void HSet() {
Assert.True(rds.HSet("TestHSet", "string1", base.String));
Assert.Equal(base.String, rds.HGet("TestHSet", "string1"));
Assert.True(rds.HSet("TestHSet", "bytes1", base.Bytes));
Assert.Equal(base.Bytes, rds.HGet<byte[]>("TestHSet", "bytes1"));
Assert.True(rds.HSet("TestHSet", "class1", base.Class));
Assert.Equal(base.Class.ToString(), rds.HGet<TestClass>("TestHSet", "class1").ToString());
}
[Fact]
public void HSetNx() {
Assert.True(rds.HSet("TestHSetNx", "string1", base.String));
Assert.Equal(base.String, rds.HGet("TestHSetNx", "string1"));
Assert.False(rds.HSet("TestHSetNx", "string1", base.String));
Assert.True(rds.HSet("TestHSetNx", "bytes1", base.Bytes));
Assert.Equal(base.Bytes, rds.HGet<byte[]>("TestHSetNx", "bytes1"));
Assert.False(rds.HSet("TestHSetNx", "bytes1", base.Bytes));
Assert.True(rds.HSet("TestHSetNx", "class1", base.Class));
Assert.Equal(base.Class.ToString(), rds.HGet<TestClass>("TestHSetNx", "class1").ToString());
Assert.False(rds.HSet("TestHSetNx", "class1", base.Class));
}
[Fact]
public void HVals() {
Assert.True(rds.HMSet("TestHVals1", "string1", base.String, "bytes1", base.Bytes, "class1", base.Class, "class1array1", new[] { base.Class, base.Class }));
Assert.True(rds.HMSet("TestHVals1", "string2", base.String, "bytes2", base.Bytes, "class2", base.Class, "class2array2", new[] { base.Class, base.Class }));
Assert.Equal(8, rds.HVals("TestHVals1").Length);
Assert.True(rds.HMSet("TestHVals2", "string1", base.String, "string2", base.String));
Assert.Equal(2, rds.HVals("TestHVals2").Length);
Assert.Contains(base.String, rds.HVals("TestHVals2"));
Assert.True(rds.HMSet("TestHVals3", "bytes1", base.Bytes, "bytes2", base.Bytes));
Assert.Equal(2, rds.HVals<byte[]>("TestHVals3").Length);
Assert.Contains(base.Bytes, rds.HVals<byte[]>("TestHVals3"));
Assert.True(rds.HMSet("TestHVals4", "class1", base.Class, "class2", base.Class));
Assert.Equal(2, rds.HVals<TestClass>("TestHVals4").Length);
Assert.Contains(base.Class.ToString(), rds.HVals<TestClass>("TestHVals4").Select(a => a.ToString()));
Assert.True(rds.HMSet("TestHVals5", "class2array1", new[] { base.Class, base.Class }, "class2array2", new[] { base.Class, base.Class }));
Assert.Equal(2, rds.HVals<TestClass[]>("TestHVals5").Length);
}
[Fact]
public void HScan() { }
}
}
|
2877025939/tabelVew-CollectionView | 4,091 | tabelVew嵌套CollectionView/tabelVew嵌套CollectionView/Masonry/Masonry/View+MASAdditions.h | //
// UIView+MASAdditions.h
// Masonry
//
// Created by Jonas Budelmann on 20/07/13.
// Copyright (c) 2013 cloudling. All rights reserved.
//
#import "MASUtilities.h"
#import "MASConstraintMaker.h"
#import "MASViewAttribute.h"
/**
* Provides constraint maker block
* and convience methods for creating MASViewAttribute which are view + NSLayoutAttribute pairs
*/
@interface MAS_VIEW (MASAdditions)
/**
* following properties return a new MASViewAttribute with current view and appropriate NSLayoutAttribute
*/
@property (nonatomic, strong, readonly) MASViewAttribute *mas_left;
@property (nonatomic, strong, readonly) MASViewAttribute *mas_top;
@property (nonatomic, strong, readonly) MASViewAttribute *mas_right;
@property (nonatomic, strong, readonly) MASViewAttribute *mas_bottom;
@property (nonatomic, strong, readonly) MASViewAttribute *mas_leading;
@property (nonatomic, strong, readonly) MASViewAttribute *mas_trailing;
@property (nonatomic, strong, readonly) MASViewAttribute *mas_width;
@property (nonatomic, strong, readonly) MASViewAttribute *mas_height;
@property (nonatomic, strong, readonly) MASViewAttribute *mas_centerX;
@property (nonatomic, strong, readonly) MASViewAttribute *mas_centerY;
@property (nonatomic, strong, readonly) MASViewAttribute *mas_baseline;
@property (nonatomic, strong, readonly) MASViewAttribute *(^mas_attribute)(NSLayoutAttribute attr);
#if (__IPHONE_OS_VERSION_MIN_REQUIRED >= 80000) || (__TV_OS_VERSION_MIN_REQUIRED >= 9000) || (__MAC_OS_X_VERSION_MIN_REQUIRED >= 101100)
@property (nonatomic, strong, readonly) MASViewAttribute *mas_firstBaseline;
@property (nonatomic, strong, readonly) MASViewAttribute *mas_lastBaseline;
#endif
#if TARGET_OS_IPHONE || TARGET_OS_TV
@property (nonatomic, strong, readonly) MASViewAttribute *mas_leftMargin;
@property (nonatomic, strong, readonly) MASViewAttribute *mas_rightMargin;
@property (nonatomic, strong, readonly) MASViewAttribute *mas_topMargin;
@property (nonatomic, strong, readonly) MASViewAttribute *mas_bottomMargin;
@property (nonatomic, strong, readonly) MASViewAttribute *mas_leadingMargin;
@property (nonatomic, strong, readonly) MASViewAttribute *mas_trailingMargin;
@property (nonatomic, strong, readonly) MASViewAttribute *mas_centerXWithinMargins;
@property (nonatomic, strong, readonly) MASViewAttribute *mas_centerYWithinMargins;
#endif
/**
* a key to associate with this view
*/
@property (nonatomic, strong) id mas_key;
/**
* Finds the closest common superview between this view and another view
*
* @param view other view
*
* @return returns nil if common superview could not be found
*/
- (instancetype)mas_closestCommonSuperview:(MAS_VIEW *)view;
/**
* Creates a MASConstraintMaker with the callee view.
* Any constraints defined are added to the view or the appropriate superview once the block has finished executing
*
* @param block scope within which you can build up the constraints which you wish to apply to the view.
*
* @return Array of created MASConstraints
*/
- (NSArray *)mas_makeConstraints:(void(^)(MASConstraintMaker *make))block;
/**
* Creates a MASConstraintMaker with the callee view.
* Any constraints defined are added to the view or the appropriate superview once the block has finished executing.
* If an existing constraint exists then it will be updated instead.
*
* @param block scope within which you can build up the constraints which you wish to apply to the view.
*
* @return Array of created/updated MASConstraints
*/
- (NSArray *)mas_updateConstraints:(void(^)(MASConstraintMaker *make))block;
/**
* Creates a MASConstraintMaker with the callee view.
* Any constraints defined are added to the view or the appropriate superview once the block has finished executing.
* All constraints previously installed for the view will be removed.
*
* @param block scope within which you can build up the constraints which you wish to apply to the view.
*
* @return Array of created/updated MASConstraints
*/
- (NSArray *)mas_remakeConstraints:(void(^)(MASConstraintMaker *make))block;
@end
|
2877025939/tabelVew-CollectionView | 5,718 | tabelVew嵌套CollectionView/tabelVew嵌套CollectionView/Masonry/Masonry/NSLayoutConstraint+MASDebugAdditions.m | //
// NSLayoutConstraint+MASDebugAdditions.m
// Masonry
//
// Created by Jonas Budelmann on 3/08/13.
// Copyright (c) 2013 Jonas Budelmann. All rights reserved.
//
#import "NSLayoutConstraint+MASDebugAdditions.h"
#import "MASConstraint.h"
#import "MASLayoutConstraint.h"
@implementation NSLayoutConstraint (MASDebugAdditions)
#pragma mark - description maps
+ (NSDictionary *)layoutRelationDescriptionsByValue {
static dispatch_once_t once;
static NSDictionary *descriptionMap;
dispatch_once(&once, ^{
descriptionMap = @{
@(NSLayoutRelationEqual) : @"==",
@(NSLayoutRelationGreaterThanOrEqual) : @">=",
@(NSLayoutRelationLessThanOrEqual) : @"<=",
};
});
return descriptionMap;
}
+ (NSDictionary *)layoutAttributeDescriptionsByValue {
static dispatch_once_t once;
static NSDictionary *descriptionMap;
dispatch_once(&once, ^{
descriptionMap = @{
@(NSLayoutAttributeTop) : @"top",
@(NSLayoutAttributeLeft) : @"left",
@(NSLayoutAttributeBottom) : @"bottom",
@(NSLayoutAttributeRight) : @"right",
@(NSLayoutAttributeLeading) : @"leading",
@(NSLayoutAttributeTrailing) : @"trailing",
@(NSLayoutAttributeWidth) : @"width",
@(NSLayoutAttributeHeight) : @"height",
@(NSLayoutAttributeCenterX) : @"centerX",
@(NSLayoutAttributeCenterY) : @"centerY",
@(NSLayoutAttributeBaseline) : @"baseline",
#if (__IPHONE_OS_VERSION_MIN_REQUIRED >= 80000) || (__TV_OS_VERSION_MIN_REQUIRED >= 9000) || (__MAC_OS_X_VERSION_MIN_REQUIRED >= 101100)
@(NSLayoutAttributeFirstBaseline) : @"firstBaseline",
@(NSLayoutAttributeLastBaseline) : @"lastBaseline",
#endif
#if TARGET_OS_IPHONE || TARGET_OS_TV
@(NSLayoutAttributeLeftMargin) : @"leftMargin",
@(NSLayoutAttributeRightMargin) : @"rightMargin",
@(NSLayoutAttributeTopMargin) : @"topMargin",
@(NSLayoutAttributeBottomMargin) : @"bottomMargin",
@(NSLayoutAttributeLeadingMargin) : @"leadingMargin",
@(NSLayoutAttributeTrailingMargin) : @"trailingMargin",
@(NSLayoutAttributeCenterXWithinMargins) : @"centerXWithinMargins",
@(NSLayoutAttributeCenterYWithinMargins) : @"centerYWithinMargins",
#endif
};
});
return descriptionMap;
}
+ (NSDictionary *)layoutPriorityDescriptionsByValue {
static dispatch_once_t once;
static NSDictionary *descriptionMap;
dispatch_once(&once, ^{
#if TARGET_OS_IPHONE || TARGET_OS_TV
descriptionMap = @{
@(MASLayoutPriorityDefaultHigh) : @"high",
@(MASLayoutPriorityDefaultLow) : @"low",
@(MASLayoutPriorityDefaultMedium) : @"medium",
@(MASLayoutPriorityRequired) : @"required",
@(MASLayoutPriorityFittingSizeLevel) : @"fitting size",
};
#elif TARGET_OS_MAC
descriptionMap = @{
@(MASLayoutPriorityDefaultHigh) : @"high",
@(MASLayoutPriorityDragThatCanResizeWindow) : @"drag can resize window",
@(MASLayoutPriorityDefaultMedium) : @"medium",
@(MASLayoutPriorityWindowSizeStayPut) : @"window size stay put",
@(MASLayoutPriorityDragThatCannotResizeWindow) : @"drag cannot resize window",
@(MASLayoutPriorityDefaultLow) : @"low",
@(MASLayoutPriorityFittingSizeCompression) : @"fitting size",
@(MASLayoutPriorityRequired) : @"required",
};
#endif
});
return descriptionMap;
}
#pragma mark - description override
+ (NSString *)descriptionForObject:(id)obj {
if ([obj respondsToSelector:@selector(mas_key)] && [obj mas_key]) {
return [NSString stringWithFormat:@"%@:%@", [obj class], [obj mas_key]];
}
return [NSString stringWithFormat:@"%@:%p", [obj class], obj];
}
- (NSString *)description {
NSMutableString *description = [[NSMutableString alloc] initWithString:@"<"];
[description appendString:[self.class descriptionForObject:self]];
[description appendFormat:@" %@", [self.class descriptionForObject:self.firstItem]];
if (self.firstAttribute != NSLayoutAttributeNotAnAttribute) {
[description appendFormat:@".%@", self.class.layoutAttributeDescriptionsByValue[@(self.firstAttribute)]];
}
[description appendFormat:@" %@", self.class.layoutRelationDescriptionsByValue[@(self.relation)]];
if (self.secondItem) {
[description appendFormat:@" %@", [self.class descriptionForObject:self.secondItem]];
}
if (self.secondAttribute != NSLayoutAttributeNotAnAttribute) {
[description appendFormat:@".%@", self.class.layoutAttributeDescriptionsByValue[@(self.secondAttribute)]];
}
if (self.multiplier != 1) {
[description appendFormat:@" * %g", self.multiplier];
}
if (self.secondAttribute == NSLayoutAttributeNotAnAttribute) {
[description appendFormat:@" %g", self.constant];
} else {
if (self.constant) {
[description appendFormat:@" %@ %g", (self.constant < 0 ? @"-" : @"+"), ABS(self.constant)];
}
}
if (self.priority != MASLayoutPriorityRequired) {
[description appendFormat:@" ^%@", self.class.layoutPriorityDescriptionsByValue[@(self.priority)] ?: [NSNumber numberWithDouble:self.priority]];
}
[description appendString:@">"];
return description;
}
@end
|
2881099/csredis | 9,857 | test/CSRedisCore.Tests/CSRedisClientKeyTests.cs | using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using Xunit;
namespace CSRedisCore.Tests {
public class CSRedisClientKeyTests : TestBase {
[Fact]
public void Del() {
Assert.True(rds.MSet("TestDel_null1", base.Null, "TestDel_string1", base.String, "TestDel_bytes1", base.Bytes, "TestDel_class1", base.Class));
Assert.Equal(4, rds.Del("TestDel_null1", "TestDel_string1", "TestDel_bytes1", "TestDel_class1"));
}
[Fact]
public void Dump() {
Assert.True(rds.MSet("TestDump_null1", base.Null, "TestDump_string1", base.String, "TestDump_bytes1", base.Bytes, "TestDump_class1", base.Class));
Assert.True(rds.Restore("TestDump_null2", rds.Dump("TestDump_null1")));
Assert.Equal(rds.Get("TestDump_null2"), rds.Get("TestDump_null1"));
Assert.True(rds.Restore("TestDump_string2", rds.Dump("TestDump_string1")));
Assert.Equal(rds.Get("TestDump_string2"), rds.Get("TestDump_string1"));
Assert.True(rds.Restore("TestDump_bytes2", rds.Dump("TestDump_bytes1")));
Assert.Equal(rds.Get<byte[]>("TestDump_bytes2"), rds.Get<byte[]>("TestDump_bytes1"));
Assert.True(rds.Restore("TestDump_class2", rds.Dump("TestDump_class1")));
Assert.Equal(rds.Get<TestClass>("TestDump_class2").ToString(), rds.Get<TestClass>("TestDump_class1").ToString());
}
[Fact]
public void Exists() {
Assert.False(rds.Exists("TestExists_null1"));
Assert.True(rds.Set("TestExists_null1", 1));
Assert.True(rds.Exists("TestExists_null1"));
Assert.Equal(1, rds.Del("TestExists_null1"));
Assert.False(rds.Exists("TestExists_null1"));
}
[Fact]
public void Expire() {
Assert.True(rds.MSet("TestExpire_null1", base.Null, "TestExpire_string1", base.String, "TestExpire_bytes1", base.Bytes, "TestExpire_class1", base.Class));
Assert.True(rds.Expire("TestExpire_null1", 10));
Assert.Equal(10, rds.Ttl("TestExpire_null1"));
Assert.True(rds.Expire("TestExpire_string1", TimeSpan.FromHours(1)));
Assert.Equal(60 * 60, rds.Ttl("TestExpire_string1"));
}
[Fact]
public void ExpireAt() {
Assert.True(rds.MSet("TestExpireAt_null1", base.Null, "TestExpireAt_string1", base.String, "TestExpireAt_bytes1", base.Bytes, "TestExpireAt_class1", base.Class));
Assert.True(rds.ExpireAt("TestExpireAt_null1", DateTime.UtcNow.AddSeconds(10)));
Assert.InRange(rds.Ttl("TestExpireAt_null1"), 9, 20);
Assert.True(rds.ExpireAt("TestExpireAt_string1", DateTime.UtcNow.AddHours(1)));
Assert.InRange(rds.Ttl("TestExpireAt_string1"), 60 * 60 - 10, 60 * 60 + 10);
}
[Fact]
public void Keys() {
Assert.True(rds.MSet("TestKeys_null1", base.Null, "TestKeys_string1", base.String, "TestKeys_bytes1", base.Bytes, "TestKeys_class1", base.Class));
Assert.Equal(4, rds.Keys("TestKeys_*").Length);
}
[Fact]
public void Move() {
Assert.True(rds.MSet("TestMove_null1", base.Null, "TestMove_string1", base.String, "TestMove_bytes1", base.Bytes, "TestMove_class1", base.Class));
Assert.True(rds.Move("TestMove_string1", 1));
Assert.False(rds.Exists("TestMove_string1"));
using (var conn = rds.Nodes.First().Value.Get()) {
conn.Value.Select(1);
Assert.Equal(base.String, conn.Value.Get("TestMove_string1"));
conn.Value.Select(2);
}
Assert.True(rds.Set("TestMove_string1", base.String));
Assert.False(rds.Move("TestMove_string1", 1));
Assert.Equal(base.String, rds.Get("TestMove_string1"));
using (var conn = rds.Nodes.First().Value.Get()) {
conn.Value.Select(1);
Assert.Equal(base.String, conn.Value.Get("TestMove_string1"));
conn.Value.Select(2);
}
}
[Fact]
public void ObjectEncoding() {
Assert.True(rds.MSet("TestObjectEncoding_null1", base.Null, "TestObjectEncoding_string1", base.String, "TestObjectEncoding_bytes1", base.Bytes, "TestObjectEncoding_class1", base.Class));
rds.ObjectEncoding("TestObjectEncoding_string1");
}
[Fact]
public void ObjectRefCount() {
Assert.True(rds.MSet("TestObjectRefCount_null1", base.Null, "TestObjectRefCount_string1", base.String, "TestObjectRefCount_bytes1", base.Bytes, "TestObjectRefCount_class1", base.Class));
rds.Exists("TestObjectRefCount_string1");
rds.Get("TestObjectRefCount_string1");
Assert.Null(rds.ObjectRefCount("TestObjectRefCount_bytes11"));
Assert.Equal(1, rds.ObjectRefCount("TestObjectRefCount_string1"));
}
[Fact]
public void ObjectIdleTime() { }
[Fact]
public void Persist() {
Assert.True(rds.MSet("TestPersist_null1", base.Null, "TestPersist_string1", base.String, "TestPersist_bytes1", base.Bytes, "TestPersist_class1", base.Class));
Assert.True(rds.Expire("TestPersist_null1", 10));
Assert.Equal(10, rds.Ttl("TestPersist_null1"));
Assert.True(rds.Expire("TestPersist_string1", TimeSpan.FromHours(1)));
Assert.Equal(60 * 60, rds.Ttl("TestPersist_string1"));
Assert.True(rds.Persist("TestPersist_null1"));
Assert.False(rds.Persist("TestPersist_null11"));
Assert.True(rds.Persist("TestPersist_string1"));
Assert.False(rds.Persist("TestPersist_string11"));
Assert.Equal(-1, rds.Ttl("TestPersist_null1"));
Assert.Equal(-1, rds.Ttl("TestPersist_string1"));
}
[Fact]
public void PExpire() {
Assert.True(rds.MSet("TestPExpire_null1", base.Null, "TestPExpire_string1", base.String, "TestPExpire_bytes1", base.Bytes, "TestPExpire_class1", base.Class));
Assert.True(rds.PExpire("TestPExpire_null1", 10000));
Assert.InRange(rds.PTtl ("TestPExpire_null1"), 9000, 10000);
Assert.True(rds.PExpire("TestPExpire_string1", TimeSpan.FromHours(1)));
Assert.InRange(rds.PTtl("TestPExpire_string1"), 1000 * 60 * 60 - 1000, 1000 * 60 * 60);
}
[Fact]
public void PExpireAt() {
Assert.True(rds.MSet("TestPExpireAt_null1", base.Null, "TestPExpireAt_string1", base.String, "TestPExpireAt_bytes1", base.Bytes, "TestPExpireAt_class1", base.Class));
Assert.True(rds.ExpireAt("TestPExpireAt_null1", DateTime.UtcNow.AddSeconds(10)));
Assert.InRange(rds.PTtl("TestPExpireAt_null1"), 9000, 20000);
Assert.True(rds.ExpireAt("TestPExpireAt_string1", DateTime.UtcNow.AddHours(1)));
Assert.InRange(rds.PTtl("TestPExpireAt_string1"), 1000 * 60 * 60 - 10000, 1000 * 60 * 60 + 10000);
}
[Fact]
public void PTtl() {
Assert.True(rds.MSet("TestPTtl_null1", base.Null, "TestPTtl_string1", base.String, "TestPTtl_bytes1", base.Bytes, "TestPTtl_class1", base.Class));
Assert.True(rds.PExpire("TestPTtl_null1", 1000));
Assert.InRange(rds.PTtl("TestPTtl_null1"), 500, 1000);
Assert.InRange(rds.PTtl("TestPTtl_null11"), long.MinValue, -1);
}
[Fact]
public void RandomKey() {
Assert.True(rds.MSet("TestRandomKey_null1", base.Null, "TestRandomKey_string1", base.String, "TestRandomKey_bytes1", base.Bytes, "TestRandomKey_class1", base.Class));
Assert.NotNull(rds.RandomKey());
}
[Fact]
public void Rename() {
Assert.True(rds.MSet("TestRename_null1", base.Null, "TestRename_string1", base.String, "TestRename_bytes1", base.Bytes, "TestRename_class1", base.Class));
Assert.Equal(base.String, rds.Get("TestRename_string1"));
Assert.True(rds.Rename("TestRename_string1", "TestRename_string11"));
Assert.False(rds.Exists("TestRename_string1"));
Assert.Equal(base.String, rds.Get("TestRename_string11"));
Assert.True(rds.Rename("TestRename_class1", "TestRename_string11"));
Assert.False(rds.Exists("TestRename_class1"));
Assert.Equal(base.Class.ToString(), rds.Get<TestClass>("TestRename_string11").ToString());
}
[Fact]
public void RenameNx() {
Assert.True(rds.MSet("TestRenameNx_null1", base.Null, "TestRenameNx_string1", base.String, "TestRenameNx_bytes1", base.Bytes, "TestRenameNx_class1", base.Class));
Assert.Equal(base.String, rds.Get("TestRenameNx_string1"));
Assert.True(rds.Rename("TestRenameNx_string1", "TestRenameNx_string11"));
Assert.False(rds.Exists("TestRenameNx_string1"));
Assert.Equal(base.String, rds.Get("TestRenameNx_string11"));
Assert.True(rds.Rename("TestRenameNx_class1", "TestRename_string11"));
Assert.False(rds.Exists("TestRenameNx_class1"));
Assert.Equal(base.Class.ToString(), rds.Get<TestClass>("TestRename_string11").ToString());
}
[Fact]
public void Restore() {
Assert.True(rds.MSet("TestRestore_null1", base.Null, "TestRestore_string1", base.String, "TestRestore_bytes1", base.Bytes, "TestRestore_class1", base.Class));
Assert.True(rds.Restore("TestRestore_null2", rds.Dump("TestRestore_null1")));
Assert.Equal(rds.Get("TestRestore_null2"), rds.Get("TestRestore_null1"));
Assert.True(rds.Restore("TestRestore_string2", rds.Dump("TestRestore_string1")));
Assert.Equal(rds.Get("TestRestore_string2"), rds.Get("TestRestore_string1"));
Assert.True(rds.Restore("TestRestore_bytes2", rds.Dump("TestRestore_bytes1")));
Assert.Equal(rds.Get<byte[]>("TestRestore_bytes2"), rds.Get<byte[]>("TestRestore_bytes1"));
Assert.True(rds.Restore("TestRestore_class2", rds.Dump("TestRestore_class1")));
Assert.Equal(rds.Get<TestClass>("TestRestore_class2").ToString(), rds.Get<TestClass>("TestRestore_class1").ToString());
}
[Fact]
public void Sort() { }
[Fact]
public void SortAndStore() { }
[Fact]
public void Ttl() {
Assert.True(rds.MSet("TestTtl_null1", base.Null, "TestTtl_string1", base.String, "TestTtl_bytes1", base.Bytes, "TestTtl_class1", base.Class));
Assert.True(rds.Expire("TestTtl_null1", 10));
Assert.InRange(rds.Ttl("TestTtl_null1"), 5, 10);
Assert.InRange(rds.Ttl("TestTtl_null11"), long.MinValue, -1);
}
[Fact]
public void Type() {
Assert.True(rds.MSet("TestType_null1", base.Null, "TestType_string1", base.String, "TestType_bytes1", base.Bytes, "TestType_class1", base.Class));
Assert.Equal(CSRedis.KeyType.None, rds.Type("TestType_string111111111123"));
Assert.Equal(CSRedis.KeyType.String, rds.Type("TestType_string1"));
}
[Fact]
public void Scan() { }
}
}
|
27182812/ChatGLM-LLaMA-chinese-insturct | 67,974 | src/transformers/models/deberta_v2/modeling_deberta_v2.py | # coding=utf-8
# Copyright 2020 Microsoft and the Hugging Face Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch DeBERTa-v2 model."""
from collections.abc import Sequence
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, LayerNorm, MSELoss
from ...activations import ACT2FN
from ...modeling_outputs import (
BaseModelOutput,
MaskedLMOutput,
MultipleChoiceModelOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
)
from ...modeling_utils import PreTrainedModel
from ...pytorch_utils import softmax_backward_data
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_deberta_v2 import DebertaV2Config
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "DebertaV2Config"
_CHECKPOINT_FOR_DOC = "microsoft/deberta-v2-xlarge"
_QA_TARGET_START_INDEX = 2
_QA_TARGET_END_INDEX = 9
DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST = [
"microsoft/deberta-v2-xlarge",
"microsoft/deberta-v2-xxlarge",
"microsoft/deberta-v2-xlarge-mnli",
"microsoft/deberta-v2-xxlarge-mnli",
]
# Copied from transformers.models.deberta.modeling_deberta.ContextPooler
class ContextPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.pooler_hidden_size, config.pooler_hidden_size)
self.dropout = StableDropout(config.pooler_dropout)
self.config = config
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
context_token = hidden_states[:, 0]
context_token = self.dropout(context_token)
pooled_output = self.dense(context_token)
pooled_output = ACT2FN[self.config.pooler_hidden_act](pooled_output)
return pooled_output
@property
def output_dim(self):
return self.config.hidden_size
# Copied from transformers.models.deberta.modeling_deberta.XSoftmax with deberta->deberta_v2
class XSoftmax(torch.autograd.Function):
"""
Masked Softmax which is optimized for saving memory
Args:
input (`torch.tensor`): The input tensor that will apply softmax.
mask (`torch.IntTensor`):
The mask matrix where 0 indicate that element will be ignored in the softmax calculation.
dim (int): The dimension that will apply softmax
Example:
```python
>>> import torch
>>> from transformers.models.deberta_v2.modeling_deberta_v2 import XSoftmax
>>> # Make a tensor
>>> x = torch.randn([4, 20, 100])
>>> # Create a mask
>>> mask = (x > 0).int()
>>> # Specify the dimension to apply softmax
>>> dim = -1
>>> y = XSoftmax.apply(x, mask, dim)
```"""
@staticmethod
def forward(self, input, mask, dim):
self.dim = dim
rmask = ~(mask.to(torch.bool))
output = input.masked_fill(rmask, torch.tensor(torch.finfo(input.dtype).min))
output = torch.softmax(output, self.dim)
output.masked_fill_(rmask, 0)
self.save_for_backward(output)
return output
@staticmethod
def backward(self, grad_output):
(output,) = self.saved_tensors
inputGrad = softmax_backward_data(self, grad_output, output, self.dim, output)
return inputGrad, None, None
@staticmethod
def symbolic(g, self, mask, dim):
import torch.onnx.symbolic_helper as sym_help
from torch.onnx.symbolic_opset9 import masked_fill, softmax
mask_cast_value = g.op("Cast", mask, to_i=sym_help.cast_pytorch_to_onnx["Long"])
r_mask = g.op(
"Cast",
g.op("Sub", g.op("Constant", value_t=torch.tensor(1, dtype=torch.int64)), mask_cast_value),
to_i=sym_help.cast_pytorch_to_onnx["Byte"],
)
output = masked_fill(
g, self, r_mask, g.op("Constant", value_t=torch.tensor(torch.finfo(self.type().dtype()).min))
)
output = softmax(g, output, dim)
return masked_fill(g, output, r_mask, g.op("Constant", value_t=torch.tensor(0, dtype=torch.bool)))
# Copied from transformers.models.deberta.modeling_deberta.DropoutContext
class DropoutContext(object):
def __init__(self):
self.dropout = 0
self.mask = None
self.scale = 1
self.reuse_mask = True
# Copied from transformers.models.deberta.modeling_deberta.get_mask
def get_mask(input, local_context):
if not isinstance(local_context, DropoutContext):
dropout = local_context
mask = None
else:
dropout = local_context.dropout
dropout *= local_context.scale
mask = local_context.mask if local_context.reuse_mask else None
if dropout > 0 and mask is None:
mask = (1 - torch.empty_like(input).bernoulli_(1 - dropout)).to(torch.bool)
if isinstance(local_context, DropoutContext):
if local_context.mask is None:
local_context.mask = mask
return mask, dropout
# Copied from transformers.models.deberta.modeling_deberta.XDropout
class XDropout(torch.autograd.Function):
"""Optimized dropout function to save computation and memory by using mask operation instead of multiplication."""
@staticmethod
def forward(ctx, input, local_ctx):
mask, dropout = get_mask(input, local_ctx)
ctx.scale = 1.0 / (1 - dropout)
if dropout > 0:
ctx.save_for_backward(mask)
return input.masked_fill(mask, 0) * ctx.scale
else:
return input
@staticmethod
def backward(ctx, grad_output):
if ctx.scale > 1:
(mask,) = ctx.saved_tensors
return grad_output.masked_fill(mask, 0) * ctx.scale, None
else:
return grad_output, None
@staticmethod
def symbolic(g: torch._C.Graph, input: torch._C.Value, local_ctx: Union[float, DropoutContext]) -> torch._C.Value:
from torch.onnx import symbolic_opset12
dropout_p = local_ctx
if isinstance(local_ctx, DropoutContext):
dropout_p = local_ctx.dropout
# StableDropout only calls this function when training.
train = True
# TODO: We should check if the opset_version being used to export
# is > 12 here, but there's no good way to do that. As-is, if the
# opset_version < 12, export will fail with a CheckerError.
# Once https://github.com/pytorch/pytorch/issues/78391 is fixed, do something like:
# if opset_version < 12:
# return torch.onnx.symbolic_opset9.dropout(g, input, dropout_p, train)
return symbolic_opset12.dropout(g, input, dropout_p, train)
# Copied from transformers.models.deberta.modeling_deberta.StableDropout
class StableDropout(nn.Module):
"""
Optimized dropout module for stabilizing the training
Args:
drop_prob (float): the dropout probabilities
"""
def __init__(self, drop_prob):
super().__init__()
self.drop_prob = drop_prob
self.count = 0
self.context_stack = None
def forward(self, x):
"""
Call the module
Args:
x (`torch.tensor`): The input tensor to apply dropout
"""
if self.training and self.drop_prob > 0:
return XDropout.apply(x, self.get_context())
return x
def clear_context(self):
self.count = 0
self.context_stack = None
def init_context(self, reuse_mask=True, scale=1):
if self.context_stack is None:
self.context_stack = []
self.count = 0
for c in self.context_stack:
c.reuse_mask = reuse_mask
c.scale = scale
def get_context(self):
if self.context_stack is not None:
if self.count >= len(self.context_stack):
self.context_stack.append(DropoutContext())
ctx = self.context_stack[self.count]
ctx.dropout = self.drop_prob
self.count += 1
return ctx
else:
return self.drop_prob
# Copied from transformers.models.deberta.modeling_deberta.DebertaSelfOutput with DebertaLayerNorm->LayerNorm
class DebertaV2SelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps)
self.dropout = StableDropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
# Copied from transformers.models.deberta.modeling_deberta.DebertaAttention with Deberta->DebertaV2
class DebertaV2Attention(nn.Module):
def __init__(self, config):
super().__init__()
self.self = DisentangledSelfAttention(config)
self.output = DebertaV2SelfOutput(config)
self.config = config
def forward(
self,
hidden_states,
attention_mask,
output_attentions=False,
query_states=None,
relative_pos=None,
rel_embeddings=None,
):
self_output = self.self(
hidden_states,
attention_mask,
output_attentions,
query_states=query_states,
relative_pos=relative_pos,
rel_embeddings=rel_embeddings,
)
if output_attentions:
self_output, att_matrix = self_output
if query_states is None:
query_states = hidden_states
attention_output = self.output(self_output, query_states)
if output_attentions:
return (attention_output, att_matrix)
else:
return attention_output
# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->DebertaV2
class DebertaV2Intermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
# Copied from transformers.models.deberta.modeling_deberta.DebertaOutput with DebertaLayerNorm->LayerNorm
class DebertaV2Output(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps)
self.dropout = StableDropout(config.hidden_dropout_prob)
self.config = config
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
# Copied from transformers.models.deberta.modeling_deberta.DebertaLayer with Deberta->DebertaV2
class DebertaV2Layer(nn.Module):
def __init__(self, config):
super().__init__()
self.attention = DebertaV2Attention(config)
self.intermediate = DebertaV2Intermediate(config)
self.output = DebertaV2Output(config)
def forward(
self,
hidden_states,
attention_mask,
query_states=None,
relative_pos=None,
rel_embeddings=None,
output_attentions=False,
):
attention_output = self.attention(
hidden_states,
attention_mask,
output_attentions=output_attentions,
query_states=query_states,
relative_pos=relative_pos,
rel_embeddings=rel_embeddings,
)
if output_attentions:
attention_output, att_matrix = attention_output
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
if output_attentions:
return (layer_output, att_matrix)
else:
return layer_output
class ConvLayer(nn.Module):
def __init__(self, config):
super().__init__()
kernel_size = getattr(config, "conv_kernel_size", 3)
groups = getattr(config, "conv_groups", 1)
self.conv_act = getattr(config, "conv_act", "tanh")
self.conv = nn.Conv1d(
config.hidden_size, config.hidden_size, kernel_size, padding=(kernel_size - 1) // 2, groups=groups
)
self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps)
self.dropout = StableDropout(config.hidden_dropout_prob)
self.config = config
def forward(self, hidden_states, residual_states, input_mask):
out = self.conv(hidden_states.permute(0, 2, 1).contiguous()).permute(0, 2, 1).contiguous()
rmask = (1 - input_mask).bool()
out.masked_fill_(rmask.unsqueeze(-1).expand(out.size()), 0)
out = ACT2FN[self.conv_act](self.dropout(out))
layer_norm_input = residual_states + out
output = self.LayerNorm(layer_norm_input).to(layer_norm_input)
if input_mask is None:
output_states = output
else:
if input_mask.dim() != layer_norm_input.dim():
if input_mask.dim() == 4:
input_mask = input_mask.squeeze(1).squeeze(1)
input_mask = input_mask.unsqueeze(2)
input_mask = input_mask.to(output.dtype)
output_states = output * input_mask
return output_states
class DebertaV2Encoder(nn.Module):
"""Modified BertEncoder with relative position bias support"""
def __init__(self, config):
super().__init__()
self.layer = nn.ModuleList([DebertaV2Layer(config) for _ in range(config.num_hidden_layers)])
self.relative_attention = getattr(config, "relative_attention", False)
if self.relative_attention:
self.max_relative_positions = getattr(config, "max_relative_positions", -1)
if self.max_relative_positions < 1:
self.max_relative_positions = config.max_position_embeddings
self.position_buckets = getattr(config, "position_buckets", -1)
pos_ebd_size = self.max_relative_positions * 2
if self.position_buckets > 0:
pos_ebd_size = self.position_buckets * 2
self.rel_embeddings = nn.Embedding(pos_ebd_size, config.hidden_size)
self.norm_rel_ebd = [x.strip() for x in getattr(config, "norm_rel_ebd", "none").lower().split("|")]
if "layer_norm" in self.norm_rel_ebd:
self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps, elementwise_affine=True)
self.conv = ConvLayer(config) if getattr(config, "conv_kernel_size", 0) > 0 else None
self.gradient_checkpointing = False
def get_rel_embedding(self):
rel_embeddings = self.rel_embeddings.weight if self.relative_attention else None
if rel_embeddings is not None and ("layer_norm" in self.norm_rel_ebd):
rel_embeddings = self.LayerNorm(rel_embeddings)
return rel_embeddings
def get_attention_mask(self, attention_mask):
if attention_mask.dim() <= 2:
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
attention_mask = extended_attention_mask * extended_attention_mask.squeeze(-2).unsqueeze(-1)
attention_mask = attention_mask.byte()
elif attention_mask.dim() == 3:
attention_mask = attention_mask.unsqueeze(1)
return attention_mask
def get_rel_pos(self, hidden_states, query_states=None, relative_pos=None):
if self.relative_attention and relative_pos is None:
q = query_states.size(-2) if query_states is not None else hidden_states.size(-2)
relative_pos = build_relative_position(
q,
hidden_states.size(-2),
bucket_size=self.position_buckets,
max_position=self.max_relative_positions,
device=hidden_states.device,
)
return relative_pos
def forward(
self,
hidden_states,
attention_mask,
output_hidden_states=True,
output_attentions=False,
query_states=None,
relative_pos=None,
return_dict=True,
):
if attention_mask.dim() <= 2:
input_mask = attention_mask
else:
input_mask = (attention_mask.sum(-2) > 0).byte()
attention_mask = self.get_attention_mask(attention_mask)
relative_pos = self.get_rel_pos(hidden_states, query_states, relative_pos)
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
if isinstance(hidden_states, Sequence):
next_kv = hidden_states[0]
else:
next_kv = hidden_states
rel_embeddings = self.get_rel_embedding()
output_states = next_kv
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (output_states,)
if self.gradient_checkpointing and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
output_states = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
next_kv,
attention_mask,
query_states,
relative_pos,
rel_embeddings,
)
else:
output_states = layer_module(
next_kv,
attention_mask,
query_states=query_states,
relative_pos=relative_pos,
rel_embeddings=rel_embeddings,
output_attentions=output_attentions,
)
if output_attentions:
output_states, att_m = output_states
if i == 0 and self.conv is not None:
output_states = self.conv(hidden_states, output_states, input_mask)
if query_states is not None:
query_states = output_states
if isinstance(hidden_states, Sequence):
next_kv = hidden_states[i + 1] if i + 1 < len(self.layer) else None
else:
next_kv = output_states
if output_attentions:
all_attentions = all_attentions + (att_m,)
if output_hidden_states:
all_hidden_states = all_hidden_states + (output_states,)
if not return_dict:
return tuple(v for v in [output_states, all_hidden_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=output_states, hidden_states=all_hidden_states, attentions=all_attentions
)
def make_log_bucket_position(relative_pos, bucket_size, max_position):
sign = torch.sign(relative_pos)
mid = bucket_size // 2
abs_pos = torch.where(
(relative_pos < mid) & (relative_pos > -mid),
torch.tensor(mid - 1).type_as(relative_pos),
torch.abs(relative_pos),
)
log_pos = (
torch.ceil(torch.log(abs_pos / mid) / torch.log(torch.tensor((max_position - 1) / mid)) * (mid - 1)) + mid
)
bucket_pos = torch.where(abs_pos <= mid, relative_pos.type_as(log_pos), log_pos * sign)
return bucket_pos
def build_relative_position(query_size, key_size, bucket_size=-1, max_position=-1, device=None):
"""
Build relative position according to the query and key
We assume the absolute position of query \\(P_q\\) is range from (0, query_size) and the absolute position of key
\\(P_k\\) is range from (0, key_size), The relative positions from query to key is \\(R_{q \\rightarrow k} = P_q -
P_k\\)
Args:
query_size (int): the length of query
key_size (int): the length of key
bucket_size (int): the size of position bucket
max_position (int): the maximum allowed absolute position
device (`torch.device`): the device on which tensors will be created.
Return:
`torch.LongTensor`: A tensor with shape [1, query_size, key_size]
"""
q_ids = torch.arange(0, query_size, device=device)
k_ids = torch.arange(0, key_size, device=device)
rel_pos_ids = q_ids[:, None] - k_ids[None, :]
if bucket_size > 0 and max_position > 0:
rel_pos_ids = make_log_bucket_position(rel_pos_ids, bucket_size, max_position)
rel_pos_ids = rel_pos_ids.to(torch.long)
rel_pos_ids = rel_pos_ids[:query_size, :]
rel_pos_ids = rel_pos_ids.unsqueeze(0)
return rel_pos_ids
@torch.jit.script
# Copied from transformers.models.deberta.modeling_deberta.c2p_dynamic_expand
def c2p_dynamic_expand(c2p_pos, query_layer, relative_pos):
return c2p_pos.expand([query_layer.size(0), query_layer.size(1), query_layer.size(2), relative_pos.size(-1)])
@torch.jit.script
# Copied from transformers.models.deberta.modeling_deberta.p2c_dynamic_expand
def p2c_dynamic_expand(c2p_pos, query_layer, key_layer):
return c2p_pos.expand([query_layer.size(0), query_layer.size(1), key_layer.size(-2), key_layer.size(-2)])
@torch.jit.script
# Copied from transformers.models.deberta.modeling_deberta.pos_dynamic_expand
def pos_dynamic_expand(pos_index, p2c_att, key_layer):
return pos_index.expand(p2c_att.size()[:2] + (pos_index.size(-2), key_layer.size(-2)))
class DisentangledSelfAttention(nn.Module):
"""
Disentangled self-attention module
Parameters:
config (`DebertaV2Config`):
A model config class instance with the configuration to build a new model. The schema is similar to
*BertConfig*, for more details, please refer [`DebertaV2Config`]
"""
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.num_attention_heads = config.num_attention_heads
_attention_head_size = config.hidden_size // config.num_attention_heads
self.attention_head_size = getattr(config, "attention_head_size", _attention_head_size)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=True)
self.key_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=True)
self.value_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=True)
self.share_att_key = getattr(config, "share_att_key", False)
self.pos_att_type = config.pos_att_type if config.pos_att_type is not None else []
self.relative_attention = getattr(config, "relative_attention", False)
if self.relative_attention:
self.position_buckets = getattr(config, "position_buckets", -1)
self.max_relative_positions = getattr(config, "max_relative_positions", -1)
if self.max_relative_positions < 1:
self.max_relative_positions = config.max_position_embeddings
self.pos_ebd_size = self.max_relative_positions
if self.position_buckets > 0:
self.pos_ebd_size = self.position_buckets
self.pos_dropout = StableDropout(config.hidden_dropout_prob)
if not self.share_att_key:
if "c2p" in self.pos_att_type:
self.pos_key_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=True)
if "p2c" in self.pos_att_type:
self.pos_query_proj = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = StableDropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x, attention_heads):
new_x_shape = x.size()[:-1] + (attention_heads, -1)
x = x.view(new_x_shape)
return x.permute(0, 2, 1, 3).contiguous().view(-1, x.size(1), x.size(-1))
def forward(
self,
hidden_states,
attention_mask,
output_attentions=False,
query_states=None,
relative_pos=None,
rel_embeddings=None,
):
"""
Call the module
Args:
hidden_states (`torch.FloatTensor`):
Input states to the module usually the output from previous layer, it will be the Q,K and V in
*Attention(Q,K,V)*
attention_mask (`torch.ByteTensor`):
An attention mask matrix of shape [*B*, *N*, *N*] where *B* is the batch size, *N* is the maximum
sequence length in which element [i,j] = *1* means the *i* th token in the input can attend to the *j*
th token.
output_attentions (`bool`, optional):
Whether return the attention matrix.
query_states (`torch.FloatTensor`, optional):
The *Q* state in *Attention(Q,K,V)*.
relative_pos (`torch.LongTensor`):
The relative position encoding between the tokens in the sequence. It's of shape [*B*, *N*, *N*] with
values ranging in [*-max_relative_positions*, *max_relative_positions*].
rel_embeddings (`torch.FloatTensor`):
The embedding of relative distances. It's a tensor of shape [\\(2 \\times
\\text{max_relative_positions}\\), *hidden_size*].
"""
if query_states is None:
query_states = hidden_states
query_layer = self.transpose_for_scores(self.query_proj(query_states), self.num_attention_heads)
key_layer = self.transpose_for_scores(self.key_proj(hidden_states), self.num_attention_heads)
value_layer = self.transpose_for_scores(self.value_proj(hidden_states), self.num_attention_heads)
rel_att = None
# Take the dot product between "query" and "key" to get the raw attention scores.
scale_factor = 1
if "c2p" in self.pos_att_type:
scale_factor += 1
if "p2c" in self.pos_att_type:
scale_factor += 1
scale = torch.sqrt(torch.tensor(query_layer.size(-1), dtype=torch.float) * scale_factor)
attention_scores = torch.bmm(query_layer, key_layer.transpose(-1, -2)) / scale.to(dtype=query_layer.dtype)
if self.relative_attention:
rel_embeddings = self.pos_dropout(rel_embeddings)
rel_att = self.disentangled_attention_bias(
query_layer, key_layer, relative_pos, rel_embeddings, scale_factor
)
if rel_att is not None:
attention_scores = attention_scores + rel_att
attention_scores = attention_scores
attention_scores = attention_scores.view(
-1, self.num_attention_heads, attention_scores.size(-2), attention_scores.size(-1)
)
# bsz x height x length x dimension
attention_probs = XSoftmax.apply(attention_scores, attention_mask, -1)
attention_probs = self.dropout(attention_probs)
context_layer = torch.bmm(
attention_probs.view(-1, attention_probs.size(-2), attention_probs.size(-1)), value_layer
)
context_layer = (
context_layer.view(-1, self.num_attention_heads, context_layer.size(-2), context_layer.size(-1))
.permute(0, 2, 1, 3)
.contiguous()
)
new_context_layer_shape = context_layer.size()[:-2] + (-1,)
context_layer = context_layer.view(new_context_layer_shape)
if output_attentions:
return (context_layer, attention_probs)
else:
return context_layer
def disentangled_attention_bias(self, query_layer, key_layer, relative_pos, rel_embeddings, scale_factor):
if relative_pos is None:
q = query_layer.size(-2)
relative_pos = build_relative_position(
q,
key_layer.size(-2),
bucket_size=self.position_buckets,
max_position=self.max_relative_positions,
device=query_layer.device,
)
if relative_pos.dim() == 2:
relative_pos = relative_pos.unsqueeze(0).unsqueeze(0)
elif relative_pos.dim() == 3:
relative_pos = relative_pos.unsqueeze(1)
# bsz x height x query x key
elif relative_pos.dim() != 4:
raise ValueError(f"Relative position ids must be of dim 2 or 3 or 4. {relative_pos.dim()}")
att_span = self.pos_ebd_size
relative_pos = relative_pos.long().to(query_layer.device)
rel_embeddings = rel_embeddings[0 : att_span * 2, :].unsqueeze(0)
if self.share_att_key:
pos_query_layer = self.transpose_for_scores(
self.query_proj(rel_embeddings), self.num_attention_heads
).repeat(query_layer.size(0) // self.num_attention_heads, 1, 1)
pos_key_layer = self.transpose_for_scores(self.key_proj(rel_embeddings), self.num_attention_heads).repeat(
query_layer.size(0) // self.num_attention_heads, 1, 1
)
else:
if "c2p" in self.pos_att_type:
pos_key_layer = self.transpose_for_scores(
self.pos_key_proj(rel_embeddings), self.num_attention_heads
).repeat(
query_layer.size(0) // self.num_attention_heads, 1, 1
) # .split(self.all_head_size, dim=-1)
if "p2c" in self.pos_att_type:
pos_query_layer = self.transpose_for_scores(
self.pos_query_proj(rel_embeddings), self.num_attention_heads
).repeat(
query_layer.size(0) // self.num_attention_heads, 1, 1
) # .split(self.all_head_size, dim=-1)
score = 0
# content->position
if "c2p" in self.pos_att_type:
scale = torch.sqrt(torch.tensor(pos_key_layer.size(-1), dtype=torch.float) * scale_factor)
c2p_att = torch.bmm(query_layer, pos_key_layer.transpose(-1, -2))
c2p_pos = torch.clamp(relative_pos + att_span, 0, att_span * 2 - 1)
c2p_att = torch.gather(
c2p_att,
dim=-1,
index=c2p_pos.squeeze(0).expand([query_layer.size(0), query_layer.size(1), relative_pos.size(-1)]),
)
score += c2p_att / scale.to(dtype=c2p_att.dtype)
# position->content
if "p2c" in self.pos_att_type:
scale = torch.sqrt(torch.tensor(pos_query_layer.size(-1), dtype=torch.float) * scale_factor)
if key_layer.size(-2) != query_layer.size(-2):
r_pos = build_relative_position(
key_layer.size(-2),
key_layer.size(-2),
bucket_size=self.position_buckets,
max_position=self.max_relative_positions,
device=query_layer.device,
)
r_pos = r_pos.unsqueeze(0)
else:
r_pos = relative_pos
p2c_pos = torch.clamp(-r_pos + att_span, 0, att_span * 2 - 1)
p2c_att = torch.bmm(key_layer, pos_query_layer.transpose(-1, -2))
p2c_att = torch.gather(
p2c_att,
dim=-1,
index=p2c_pos.squeeze(0).expand([query_layer.size(0), key_layer.size(-2), key_layer.size(-2)]),
).transpose(-1, -2)
score += p2c_att / scale.to(dtype=p2c_att.dtype)
return score
# Copied from transformers.models.deberta.modeling_deberta.DebertaEmbeddings with DebertaLayerNorm->LayerNorm
class DebertaV2Embeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
pad_token_id = getattr(config, "pad_token_id", 0)
self.embedding_size = getattr(config, "embedding_size", config.hidden_size)
self.word_embeddings = nn.Embedding(config.vocab_size, self.embedding_size, padding_idx=pad_token_id)
self.position_biased_input = getattr(config, "position_biased_input", True)
if not self.position_biased_input:
self.position_embeddings = None
else:
self.position_embeddings = nn.Embedding(config.max_position_embeddings, self.embedding_size)
if config.type_vocab_size > 0:
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, self.embedding_size)
if self.embedding_size != config.hidden_size:
self.embed_proj = nn.Linear(self.embedding_size, config.hidden_size, bias=False)
self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps)
self.dropout = StableDropout(config.hidden_dropout_prob)
self.config = config
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
def forward(self, input_ids=None, token_type_ids=None, position_ids=None, mask=None, inputs_embeds=None):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, :seq_length]
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
if self.position_embeddings is not None:
position_embeddings = self.position_embeddings(position_ids.long())
else:
position_embeddings = torch.zeros_like(inputs_embeds)
embeddings = inputs_embeds
if self.position_biased_input:
embeddings += position_embeddings
if self.config.type_vocab_size > 0:
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings += token_type_embeddings
if self.embedding_size != self.config.hidden_size:
embeddings = self.embed_proj(embeddings)
embeddings = self.LayerNorm(embeddings)
if mask is not None:
if mask.dim() != embeddings.dim():
if mask.dim() == 4:
mask = mask.squeeze(1).squeeze(1)
mask = mask.unsqueeze(2)
mask = mask.to(embeddings.dtype)
embeddings = embeddings * mask
embeddings = self.dropout(embeddings)
return embeddings
# Copied from transformers.models.deberta.modeling_deberta.DebertaPreTrainedModel with Deberta->DebertaV2
class DebertaV2PreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = DebertaV2Config
base_model_prefix = "deberta"
_keys_to_ignore_on_load_missing = ["position_ids"]
_keys_to_ignore_on_load_unexpected = ["position_embeddings"]
supports_gradient_checkpointing = True
def _init_weights(self, module):
"""Initialize the weights."""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, DebertaV2Encoder):
module.gradient_checkpointing = value
DEBERTA_START_DOCSTRING = r"""
The DeBERTa model was proposed in [DeBERTa: Decoding-enhanced BERT with Disentangled
Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It's build
on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two
improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
and behavior.
Parameters:
config ([`DebertaV2Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
DEBERTA_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare DeBERTa Model transformer outputting raw hidden-states without any specific head on top.",
DEBERTA_START_DOCSTRING,
)
# Copied from transformers.models.deberta.modeling_deberta.DebertaModel with Deberta->DebertaV2
class DebertaV2Model(DebertaV2PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.embeddings = DebertaV2Embeddings(config)
self.encoder = DebertaV2Encoder(config)
self.z_steps = 0
self.config = config
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, new_embeddings):
self.embeddings.word_embeddings = new_embeddings
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
raise NotImplementedError("The prune function is not implemented in DeBERTa model.")
@add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BaseModelOutput]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
embedding_output = self.embeddings(
input_ids=input_ids,
token_type_ids=token_type_ids,
position_ids=position_ids,
mask=attention_mask,
inputs_embeds=inputs_embeds,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask,
output_hidden_states=True,
output_attentions=output_attentions,
return_dict=return_dict,
)
encoded_layers = encoder_outputs[1]
if self.z_steps > 1:
hidden_states = encoded_layers[-2]
layers = [self.encoder.layer[-1] for _ in range(self.z_steps)]
query_states = encoded_layers[-1]
rel_embeddings = self.encoder.get_rel_embedding()
attention_mask = self.encoder.get_attention_mask(attention_mask)
rel_pos = self.encoder.get_rel_pos(embedding_output)
for layer in layers[1:]:
query_states = layer(
hidden_states,
attention_mask,
output_attentions=False,
query_states=query_states,
relative_pos=rel_pos,
rel_embeddings=rel_embeddings,
)
encoded_layers.append(query_states)
sequence_output = encoded_layers[-1]
if not return_dict:
return (sequence_output,) + encoder_outputs[(1 if output_hidden_states else 2) :]
return BaseModelOutput(
last_hidden_state=sequence_output,
hidden_states=encoder_outputs.hidden_states if output_hidden_states else None,
attentions=encoder_outputs.attentions,
)
@add_start_docstrings("""DeBERTa Model with a `language modeling` head on top.""", DEBERTA_START_DOCSTRING)
class DebertaV2ForMaskedLM(DebertaV2PreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias", "cls.predictions.decoder.weight"]
def __init__(self, config):
super().__init__(config)
self.deberta = DebertaV2Model(config)
self.cls = DebertaV2OnlyMLMHead(config)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
mask="[MASK]",
)
# Copied from transformers.models.deberta.modeling_deberta.DebertaForMaskedLM.forward with Deberta->DebertaV2
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, MaskedLMOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.deberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss() # -100 index = padding token
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[1:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return MaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
# copied from transformers.models.bert.BertPredictionHeadTransform with bert -> deberta
class DebertaV2PredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
# copied from transformers.models.bert.BertLMPredictionHead with bert -> deberta
class DebertaV2LMPredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = DebertaV2PredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
# copied from transformers.models.bert.BertOnlyMLMHead with bert -> deberta
class DebertaV2OnlyMLMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = DebertaV2LMPredictionHead(config)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
@add_start_docstrings(
"""
DeBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the
pooled output) e.g. for GLUE tasks.
""",
DEBERTA_START_DOCSTRING,
)
class DebertaV2ForSequenceClassification(DebertaV2PreTrainedModel):
def __init__(self, config):
super().__init__(config)
num_labels = getattr(config, "num_labels", 2)
self.num_labels = num_labels
self.deberta = DebertaV2Model(config)
self.pooler = ContextPooler(config)
output_dim = self.pooler.output_dim
self.classifier = nn.Linear(output_dim, num_labels)
drop_out = getattr(config, "cls_dropout", None)
drop_out = self.config.hidden_dropout_prob if drop_out is None else drop_out
self.dropout = StableDropout(drop_out)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.deberta.get_input_embeddings()
def set_input_embeddings(self, new_embeddings):
self.deberta.set_input_embeddings(new_embeddings)
@add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
# Copied from transformers.models.deberta.modeling_deberta.DebertaForSequenceClassification.forward with Deberta->DebertaV2
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, SequenceClassifierOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.deberta(
input_ids,
token_type_ids=token_type_ids,
attention_mask=attention_mask,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
encoder_layer = outputs[0]
pooled_output = self.pooler(encoder_layer)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
# regression task
loss_fn = nn.MSELoss()
logits = logits.view(-1).to(labels.dtype)
loss = loss_fn(logits, labels.view(-1))
elif labels.dim() == 1 or labels.size(-1) == 1:
label_index = (labels >= 0).nonzero()
labels = labels.long()
if label_index.size(0) > 0:
labeled_logits = torch.gather(
logits, 0, label_index.expand(label_index.size(0), logits.size(1))
)
labels = torch.gather(labels, 0, label_index.view(-1))
loss_fct = CrossEntropyLoss()
loss = loss_fct(labeled_logits.view(-1, self.num_labels).float(), labels.view(-1))
else:
loss = torch.tensor(0).to(logits)
else:
log_softmax = nn.LogSoftmax(-1)
loss = -((log_softmax(logits) * labels).sum(-1)).mean()
elif self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions
)
@add_start_docstrings(
"""
DeBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
Named-Entity-Recognition (NER) tasks.
""",
DEBERTA_START_DOCSTRING,
)
# Copied from transformers.models.deberta.modeling_deberta.DebertaForTokenClassification with Deberta->DebertaV2
class DebertaV2ForTokenClassification(DebertaV2PreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.deberta = DebertaV2Model(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, TokenClassifierOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.deberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions
)
@add_start_docstrings(
"""
DeBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
DEBERTA_START_DOCSTRING,
)
class DebertaV2ForQuestionAnswering(DebertaV2PreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.deberta = DebertaV2Model(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=QuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
qa_target_start_index=_QA_TARGET_START_INDEX,
qa_target_end_index=_QA_TARGET_END_INDEX,
)
# Copied from transformers.models.deberta.modeling_deberta.DebertaForQuestionAnswering.forward with Deberta->DebertaV2
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
start_positions: Optional[torch.Tensor] = None,
end_positions: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, QuestionAnsweringModelOutput]:
r"""
start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.deberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[1:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
DeBERTa Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
softmax) e.g. for RocStories/SWAG tasks.
""",
DEBERTA_START_DOCSTRING,
)
class DebertaV2ForMultipleChoice(DebertaV2PreTrainedModel):
def __init__(self, config):
super().__init__(config)
num_labels = getattr(config, "num_labels", 2)
self.num_labels = num_labels
self.deberta = DebertaV2Model(config)
self.pooler = ContextPooler(config)
output_dim = self.pooler.output_dim
self.classifier = nn.Linear(output_dim, 1)
drop_out = getattr(config, "cls_dropout", None)
drop_out = self.config.hidden_dropout_prob if drop_out is None else drop_out
self.dropout = StableDropout(drop_out)
self.init_weights()
def get_input_embeddings(self):
return self.deberta.get_input_embeddings()
def set_input_embeddings(self, new_embeddings):
self.deberta.set_input_embeddings(new_embeddings)
@add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, MultipleChoiceModelOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
`input_ids` above)
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
flat_inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.deberta(
flat_input_ids,
position_ids=flat_position_ids,
token_type_ids=flat_token_type_ids,
attention_mask=flat_attention_mask,
inputs_embeds=flat_inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
encoder_layer = outputs[0]
pooled_output = self.pooler(encoder_layer)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return MultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
|
2877025939/tabelVew-CollectionView | 3,776 | tabelVew嵌套CollectionView/tabelVew嵌套CollectionView/Masonry/Masonry/View+MASShorthandAdditions.h | //
// UIView+MASShorthandAdditions.h
// Masonry
//
// Created by Jonas Budelmann on 22/07/13.
// Copyright (c) 2013 Jonas Budelmann. All rights reserved.
//
#import "View+MASAdditions.h"
#ifdef MAS_SHORTHAND
/**
* Shorthand view additions without the 'mas_' prefixes,
* only enabled if MAS_SHORTHAND is defined
*/
@interface MAS_VIEW (MASShorthandAdditions)
@property (nonatomic, strong, readonly) MASViewAttribute *left;
@property (nonatomic, strong, readonly) MASViewAttribute *top;
@property (nonatomic, strong, readonly) MASViewAttribute *right;
@property (nonatomic, strong, readonly) MASViewAttribute *bottom;
@property (nonatomic, strong, readonly) MASViewAttribute *leading;
@property (nonatomic, strong, readonly) MASViewAttribute *trailing;
@property (nonatomic, strong, readonly) MASViewAttribute *width;
@property (nonatomic, strong, readonly) MASViewAttribute *height;
@property (nonatomic, strong, readonly) MASViewAttribute *centerX;
@property (nonatomic, strong, readonly) MASViewAttribute *centerY;
@property (nonatomic, strong, readonly) MASViewAttribute *baseline;
@property (nonatomic, strong, readonly) MASViewAttribute *(^attribute)(NSLayoutAttribute attr);
#if (__IPHONE_OS_VERSION_MIN_REQUIRED >= 80000) || (__TV_OS_VERSION_MIN_REQUIRED >= 9000) || (__MAC_OS_X_VERSION_MIN_REQUIRED >= 101100)
@property (nonatomic, strong, readonly) MASViewAttribute *firstBaseline;
@property (nonatomic, strong, readonly) MASViewAttribute *lastBaseline;
#endif
#if TARGET_OS_IPHONE || TARGET_OS_TV
@property (nonatomic, strong, readonly) MASViewAttribute *leftMargin;
@property (nonatomic, strong, readonly) MASViewAttribute *rightMargin;
@property (nonatomic, strong, readonly) MASViewAttribute *topMargin;
@property (nonatomic, strong, readonly) MASViewAttribute *bottomMargin;
@property (nonatomic, strong, readonly) MASViewAttribute *leadingMargin;
@property (nonatomic, strong, readonly) MASViewAttribute *trailingMargin;
@property (nonatomic, strong, readonly) MASViewAttribute *centerXWithinMargins;
@property (nonatomic, strong, readonly) MASViewAttribute *centerYWithinMargins;
#endif
- (NSArray *)makeConstraints:(void(^)(MASConstraintMaker *make))block;
- (NSArray *)updateConstraints:(void(^)(MASConstraintMaker *make))block;
- (NSArray *)remakeConstraints:(void(^)(MASConstraintMaker *make))block;
@end
#define MAS_ATTR_FORWARD(attr) \
- (MASViewAttribute *)attr { \
return [self mas_##attr]; \
}
@implementation MAS_VIEW (MASShorthandAdditions)
MAS_ATTR_FORWARD(top);
MAS_ATTR_FORWARD(left);
MAS_ATTR_FORWARD(bottom);
MAS_ATTR_FORWARD(right);
MAS_ATTR_FORWARD(leading);
MAS_ATTR_FORWARD(trailing);
MAS_ATTR_FORWARD(width);
MAS_ATTR_FORWARD(height);
MAS_ATTR_FORWARD(centerX);
MAS_ATTR_FORWARD(centerY);
MAS_ATTR_FORWARD(baseline);
#if (__IPHONE_OS_VERSION_MIN_REQUIRED >= 80000) || (__TV_OS_VERSION_MIN_REQUIRED >= 9000) || (__MAC_OS_X_VERSION_MIN_REQUIRED >= 101100)
MAS_ATTR_FORWARD(firstBaseline);
MAS_ATTR_FORWARD(lastBaseline);
#endif
#if TARGET_OS_IPHONE || TARGET_OS_TV
MAS_ATTR_FORWARD(leftMargin);
MAS_ATTR_FORWARD(rightMargin);
MAS_ATTR_FORWARD(topMargin);
MAS_ATTR_FORWARD(bottomMargin);
MAS_ATTR_FORWARD(leadingMargin);
MAS_ATTR_FORWARD(trailingMargin);
MAS_ATTR_FORWARD(centerXWithinMargins);
MAS_ATTR_FORWARD(centerYWithinMargins);
#endif
- (MASViewAttribute *(^)(NSLayoutAttribute))attribute {
return [self mas_attribute];
}
- (NSArray *)makeConstraints:(void(^)(MASConstraintMaker *))block {
return [self mas_makeConstraints:block];
}
- (NSArray *)updateConstraints:(void(^)(MASConstraintMaker *))block {
return [self mas_updateConstraints:block];
}
- (NSArray *)remakeConstraints:(void(^)(MASConstraintMaker *))block {
return [self mas_remakeConstraints:block];
}
@end
#endif
|
2877025939/tabelVew-CollectionView | 1,302 | tabelVew嵌套CollectionView/tabelVew嵌套CollectionView/Masonry/Masonry/ViewController+MASAdditions.m | //
// UIViewController+MASAdditions.m
// Masonry
//
// Created by Craig Siemens on 2015-06-23.
//
//
#import "ViewController+MASAdditions.h"
#ifdef MAS_VIEW_CONTROLLER
@implementation MAS_VIEW_CONTROLLER (MASAdditions)
- (MASViewAttribute *)mas_topLayoutGuide {
return [[MASViewAttribute alloc] initWithView:self.view item:self.topLayoutGuide layoutAttribute:NSLayoutAttributeBottom];
}
- (MASViewAttribute *)mas_topLayoutGuideTop {
return [[MASViewAttribute alloc] initWithView:self.view item:self.topLayoutGuide layoutAttribute:NSLayoutAttributeTop];
}
- (MASViewAttribute *)mas_topLayoutGuideBottom {
return [[MASViewAttribute alloc] initWithView:self.view item:self.topLayoutGuide layoutAttribute:NSLayoutAttributeBottom];
}
- (MASViewAttribute *)mas_bottomLayoutGuide {
return [[MASViewAttribute alloc] initWithView:self.view item:self.bottomLayoutGuide layoutAttribute:NSLayoutAttributeTop];
}
- (MASViewAttribute *)mas_bottomLayoutGuideTop {
return [[MASViewAttribute alloc] initWithView:self.view item:self.bottomLayoutGuide layoutAttribute:NSLayoutAttributeTop];
}
- (MASViewAttribute *)mas_bottomLayoutGuideBottom {
return [[MASViewAttribute alloc] initWithView:self.view item:self.bottomLayoutGuide layoutAttribute:NSLayoutAttributeBottom];
}
@end
#endif
|
2877025939/tabelVew-CollectionView | 4,730 | tabelVew嵌套CollectionView/tabelVew嵌套CollectionView/Masonry/Masonry/MASCompositeConstraint.m | //
// MASCompositeConstraint.m
// Masonry
//
// Created by Jonas Budelmann on 21/07/13.
// Copyright (c) 2013 cloudling. All rights reserved.
//
#import "MASCompositeConstraint.h"
#import "MASConstraint+Private.h"
@interface MASCompositeConstraint () <MASConstraintDelegate>
@property (nonatomic, strong) id mas_key;
@property (nonatomic, strong) NSMutableArray *childConstraints;
@end
@implementation MASCompositeConstraint
- (id)initWithChildren:(NSArray *)children {
self = [super init];
if (!self) return nil;
_childConstraints = [children mutableCopy];
for (MASConstraint *constraint in _childConstraints) {
constraint.delegate = self;
}
return self;
}
#pragma mark - MASConstraintDelegate
- (void)constraint:(MASConstraint *)constraint shouldBeReplacedWithConstraint:(MASConstraint *)replacementConstraint {
NSUInteger index = [self.childConstraints indexOfObject:constraint];
NSAssert(index != NSNotFound, @"Could not find constraint %@", constraint);
[self.childConstraints replaceObjectAtIndex:index withObject:replacementConstraint];
}
- (MASConstraint *)constraint:(MASConstraint __unused *)constraint addConstraintWithLayoutAttribute:(NSLayoutAttribute)layoutAttribute {
id<MASConstraintDelegate> strongDelegate = self.delegate;
MASConstraint *newConstraint = [strongDelegate constraint:self addConstraintWithLayoutAttribute:layoutAttribute];
newConstraint.delegate = self;
[self.childConstraints addObject:newConstraint];
return newConstraint;
}
#pragma mark - NSLayoutConstraint multiplier proxies
- (MASConstraint * (^)(CGFloat))multipliedBy {
return ^id(CGFloat multiplier) {
for (MASConstraint *constraint in self.childConstraints) {
constraint.multipliedBy(multiplier);
}
return self;
};
}
- (MASConstraint * (^)(CGFloat))dividedBy {
return ^id(CGFloat divider) {
for (MASConstraint *constraint in self.childConstraints) {
constraint.dividedBy(divider);
}
return self;
};
}
#pragma mark - MASLayoutPriority proxy
- (MASConstraint * (^)(MASLayoutPriority))priority {
return ^id(MASLayoutPriority priority) {
for (MASConstraint *constraint in self.childConstraints) {
constraint.priority(priority);
}
return self;
};
}
#pragma mark - NSLayoutRelation proxy
- (MASConstraint * (^)(id, NSLayoutRelation))equalToWithRelation {
return ^id(id attr, NSLayoutRelation relation) {
for (MASConstraint *constraint in self.childConstraints.copy) {
constraint.equalToWithRelation(attr, relation);
}
return self;
};
}
#pragma mark - attribute chaining
- (MASConstraint *)addConstraintWithLayoutAttribute:(NSLayoutAttribute)layoutAttribute {
[self constraint:self addConstraintWithLayoutAttribute:layoutAttribute];
return self;
}
#pragma mark - Animator proxy
#if TARGET_OS_MAC && !(TARGET_OS_IPHONE || TARGET_OS_TV)
- (MASConstraint *)animator {
for (MASConstraint *constraint in self.childConstraints) {
[constraint animator];
}
return self;
}
#endif
#pragma mark - debug helpers
- (MASConstraint * (^)(id))key {
return ^id(id key) {
self.mas_key = key;
int i = 0;
for (MASConstraint *constraint in self.childConstraints) {
constraint.key([NSString stringWithFormat:@"%@[%d]", key, i++]);
}
return self;
};
}
#pragma mark - NSLayoutConstraint constant setters
- (void)setInsets:(MASEdgeInsets)insets {
for (MASConstraint *constraint in self.childConstraints) {
constraint.insets = insets;
}
}
- (void)setOffset:(CGFloat)offset {
for (MASConstraint *constraint in self.childConstraints) {
constraint.offset = offset;
}
}
- (void)setSizeOffset:(CGSize)sizeOffset {
for (MASConstraint *constraint in self.childConstraints) {
constraint.sizeOffset = sizeOffset;
}
}
- (void)setCenterOffset:(CGPoint)centerOffset {
for (MASConstraint *constraint in self.childConstraints) {
constraint.centerOffset = centerOffset;
}
}
#pragma mark - MASConstraint
- (void)activate {
for (MASConstraint *constraint in self.childConstraints) {
[constraint activate];
}
}
- (void)deactivate {
for (MASConstraint *constraint in self.childConstraints) {
[constraint deactivate];
}
}
- (void)install {
for (MASConstraint *constraint in self.childConstraints) {
constraint.updateExisting = self.updateExisting;
[constraint install];
}
}
- (void)uninstall {
for (MASConstraint *constraint in self.childConstraints) {
[constraint uninstall];
}
}
@end
|
2877025939/tabelVew-CollectionView | 10,293 | tabelVew嵌套CollectionView/tabelVew嵌套CollectionView/Masonry/Masonry/MASConstraintMaker.m | //
// MASConstraintBuilder.m
// Masonry
//
// Created by Jonas Budelmann on 20/07/13.
// Copyright (c) 2013 cloudling. All rights reserved.
//
#import "MASConstraintMaker.h"
#import "MASViewConstraint.h"
#import "MASCompositeConstraint.h"
#import "MASConstraint+Private.h"
#import "MASViewAttribute.h"
#import "View+MASAdditions.h"
@interface MASConstraintMaker () <MASConstraintDelegate>
@property (nonatomic, weak) MAS_VIEW *view;
@property (nonatomic, strong) NSMutableArray *constraints;
@end
@implementation MASConstraintMaker
- (id)initWithView:(MAS_VIEW *)view {
self = [super init];
if (!self) return nil;
self.view = view;
self.constraints = NSMutableArray.new;
return self;
}
- (NSArray *)install {
if (self.removeExisting) {
NSArray *installedConstraints = [MASViewConstraint installedConstraintsForView:self.view];
for (MASConstraint *constraint in installedConstraints) {
[constraint uninstall];
}
}
NSArray *constraints = self.constraints.copy;
for (MASConstraint *constraint in constraints) {
constraint.updateExisting = self.updateExisting;
[constraint install];
}
[self.constraints removeAllObjects];
return constraints;
}
#pragma mark - MASConstraintDelegate
- (void)constraint:(MASConstraint *)constraint shouldBeReplacedWithConstraint:(MASConstraint *)replacementConstraint {
NSUInteger index = [self.constraints indexOfObject:constraint];
NSAssert(index != NSNotFound, @"Could not find constraint %@", constraint);
[self.constraints replaceObjectAtIndex:index withObject:replacementConstraint];
}
- (MASConstraint *)constraint:(MASConstraint *)constraint addConstraintWithLayoutAttribute:(NSLayoutAttribute)layoutAttribute {
MASViewAttribute *viewAttribute = [[MASViewAttribute alloc] initWithView:self.view layoutAttribute:layoutAttribute];
MASViewConstraint *newConstraint = [[MASViewConstraint alloc] initWithFirstViewAttribute:viewAttribute];
if ([constraint isKindOfClass:MASViewConstraint.class]) {
//replace with composite constraint
NSArray *children = @[constraint, newConstraint];
MASCompositeConstraint *compositeConstraint = [[MASCompositeConstraint alloc] initWithChildren:children];
compositeConstraint.delegate = self;
[self constraint:constraint shouldBeReplacedWithConstraint:compositeConstraint];
return compositeConstraint;
}
if (!constraint) {
newConstraint.delegate = self;
[self.constraints addObject:newConstraint];
}
return newConstraint;
}
- (MASConstraint *)addConstraintWithAttributes:(MASAttribute)attrs {
__unused MASAttribute anyAttribute = (MASAttributeLeft | MASAttributeRight | MASAttributeTop | MASAttributeBottom | MASAttributeLeading
| MASAttributeTrailing | MASAttributeWidth | MASAttributeHeight | MASAttributeCenterX
| MASAttributeCenterY | MASAttributeBaseline
#if (__IPHONE_OS_VERSION_MIN_REQUIRED >= 80000) || (__TV_OS_VERSION_MIN_REQUIRED >= 9000) || (__MAC_OS_X_VERSION_MIN_REQUIRED >= 101100)
| MASAttributeFirstBaseline | MASAttributeLastBaseline
#endif
#if TARGET_OS_IPHONE || TARGET_OS_TV
| MASAttributeLeftMargin | MASAttributeRightMargin | MASAttributeTopMargin | MASAttributeBottomMargin
| MASAttributeLeadingMargin | MASAttributeTrailingMargin | MASAttributeCenterXWithinMargins
| MASAttributeCenterYWithinMargins
#endif
);
NSAssert((attrs & anyAttribute) != 0, @"You didn't pass any attribute to make.attributes(...)");
NSMutableArray *attributes = [NSMutableArray array];
if (attrs & MASAttributeLeft) [attributes addObject:self.view.mas_left];
if (attrs & MASAttributeRight) [attributes addObject:self.view.mas_right];
if (attrs & MASAttributeTop) [attributes addObject:self.view.mas_top];
if (attrs & MASAttributeBottom) [attributes addObject:self.view.mas_bottom];
if (attrs & MASAttributeLeading) [attributes addObject:self.view.mas_leading];
if (attrs & MASAttributeTrailing) [attributes addObject:self.view.mas_trailing];
if (attrs & MASAttributeWidth) [attributes addObject:self.view.mas_width];
if (attrs & MASAttributeHeight) [attributes addObject:self.view.mas_height];
if (attrs & MASAttributeCenterX) [attributes addObject:self.view.mas_centerX];
if (attrs & MASAttributeCenterY) [attributes addObject:self.view.mas_centerY];
if (attrs & MASAttributeBaseline) [attributes addObject:self.view.mas_baseline];
#if (__IPHONE_OS_VERSION_MIN_REQUIRED >= 80000) || (__TV_OS_VERSION_MIN_REQUIRED >= 9000) || (__MAC_OS_X_VERSION_MIN_REQUIRED >= 101100)
if (attrs & MASAttributeFirstBaseline) [attributes addObject:self.view.mas_firstBaseline];
if (attrs & MASAttributeLastBaseline) [attributes addObject:self.view.mas_lastBaseline];
#endif
#if TARGET_OS_IPHONE || TARGET_OS_TV
if (attrs & MASAttributeLeftMargin) [attributes addObject:self.view.mas_leftMargin];
if (attrs & MASAttributeRightMargin) [attributes addObject:self.view.mas_rightMargin];
if (attrs & MASAttributeTopMargin) [attributes addObject:self.view.mas_topMargin];
if (attrs & MASAttributeBottomMargin) [attributes addObject:self.view.mas_bottomMargin];
if (attrs & MASAttributeLeadingMargin) [attributes addObject:self.view.mas_leadingMargin];
if (attrs & MASAttributeTrailingMargin) [attributes addObject:self.view.mas_trailingMargin];
if (attrs & MASAttributeCenterXWithinMargins) [attributes addObject:self.view.mas_centerXWithinMargins];
if (attrs & MASAttributeCenterYWithinMargins) [attributes addObject:self.view.mas_centerYWithinMargins];
#endif
NSMutableArray *children = [NSMutableArray arrayWithCapacity:attributes.count];
for (MASViewAttribute *a in attributes) {
[children addObject:[[MASViewConstraint alloc] initWithFirstViewAttribute:a]];
}
MASCompositeConstraint *constraint = [[MASCompositeConstraint alloc] initWithChildren:children];
constraint.delegate = self;
[self.constraints addObject:constraint];
return constraint;
}
#pragma mark - standard Attributes
- (MASConstraint *)addConstraintWithLayoutAttribute:(NSLayoutAttribute)layoutAttribute {
return [self constraint:nil addConstraintWithLayoutAttribute:layoutAttribute];
}
- (MASConstraint *)left {
return [self addConstraintWithLayoutAttribute:NSLayoutAttributeLeft];
}
- (MASConstraint *)top {
return [self addConstraintWithLayoutAttribute:NSLayoutAttributeTop];
}
- (MASConstraint *)right {
return [self addConstraintWithLayoutAttribute:NSLayoutAttributeRight];
}
- (MASConstraint *)bottom {
return [self addConstraintWithLayoutAttribute:NSLayoutAttributeBottom];
}
- (MASConstraint *)leading {
return [self addConstraintWithLayoutAttribute:NSLayoutAttributeLeading];
}
- (MASConstraint *)trailing {
return [self addConstraintWithLayoutAttribute:NSLayoutAttributeTrailing];
}
- (MASConstraint *)width {
return [self addConstraintWithLayoutAttribute:NSLayoutAttributeWidth];
}
- (MASConstraint *)height {
return [self addConstraintWithLayoutAttribute:NSLayoutAttributeHeight];
}
- (MASConstraint *)centerX {
return [self addConstraintWithLayoutAttribute:NSLayoutAttributeCenterX];
}
- (MASConstraint *)centerY {
return [self addConstraintWithLayoutAttribute:NSLayoutAttributeCenterY];
}
- (MASConstraint *)baseline {
return [self addConstraintWithLayoutAttribute:NSLayoutAttributeBaseline];
}
- (MASConstraint *(^)(MASAttribute))attributes {
return ^(MASAttribute attrs){
return [self addConstraintWithAttributes:attrs];
};
}
#if (__IPHONE_OS_VERSION_MIN_REQUIRED >= 80000) || (__TV_OS_VERSION_MIN_REQUIRED >= 9000) || (__MAC_OS_X_VERSION_MIN_REQUIRED >= 101100)
- (MASConstraint *)firstBaseline {
return [self addConstraintWithLayoutAttribute:NSLayoutAttributeFirstBaseline];
}
- (MASConstraint *)lastBaseline {
return [self addConstraintWithLayoutAttribute:NSLayoutAttributeLastBaseline];
}
#endif
#if TARGET_OS_IPHONE || TARGET_OS_TV
- (MASConstraint *)leftMargin {
return [self addConstraintWithLayoutAttribute:NSLayoutAttributeLeftMargin];
}
- (MASConstraint *)rightMargin {
return [self addConstraintWithLayoutAttribute:NSLayoutAttributeRightMargin];
}
- (MASConstraint *)topMargin {
return [self addConstraintWithLayoutAttribute:NSLayoutAttributeTopMargin];
}
- (MASConstraint *)bottomMargin {
return [self addConstraintWithLayoutAttribute:NSLayoutAttributeBottomMargin];
}
- (MASConstraint *)leadingMargin {
return [self addConstraintWithLayoutAttribute:NSLayoutAttributeLeadingMargin];
}
- (MASConstraint *)trailingMargin {
return [self addConstraintWithLayoutAttribute:NSLayoutAttributeTrailingMargin];
}
- (MASConstraint *)centerXWithinMargins {
return [self addConstraintWithLayoutAttribute:NSLayoutAttributeCenterXWithinMargins];
}
- (MASConstraint *)centerYWithinMargins {
return [self addConstraintWithLayoutAttribute:NSLayoutAttributeCenterYWithinMargins];
}
#endif
#pragma mark - composite Attributes
- (MASConstraint *)edges {
return [self addConstraintWithAttributes:MASAttributeTop | MASAttributeLeft | MASAttributeRight | MASAttributeBottom];
}
- (MASConstraint *)size {
return [self addConstraintWithAttributes:MASAttributeWidth | MASAttributeHeight];
}
- (MASConstraint *)center {
return [self addConstraintWithAttributes:MASAttributeCenterX | MASAttributeCenterY];
}
#pragma mark - grouping
- (MASConstraint *(^)(dispatch_block_t group))group {
return ^id(dispatch_block_t group) {
NSInteger previousCount = self.constraints.count;
group();
NSArray *children = [self.constraints subarrayWithRange:NSMakeRange(previousCount, self.constraints.count - previousCount)];
MASCompositeConstraint *constraint = [[MASCompositeConstraint alloc] initWithChildren:children];
constraint.delegate = self;
return constraint;
};
}
@end
|
2881099/csredis | 17,441 | test/CSRedisCore.Tests/Resp3HelperTests.cs | using CSRedis.Internal;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Net;
using System.Net.Sockets;
using System.Text;
using System.Threading;
using System.Threading.Tasks;
using Xunit;
namespace CSRedisCore.Tests {
public class Resp3HelperTests {
public class RedisSocket : IDisposable
{
Socket _socket;
public NetworkStream Stream { get; }
public RedisSocket(Socket socket)
{
_socket = socket;
Stream = new NetworkStream(_socket, true);
}
public void Dispose()
{
_socket.Shutdown(SocketShutdown.Both);
_socket.Close();
_socket.Dispose();
}
public static RedisSocket GetRedisSocket()
{
var endpoint = new IPEndPoint(IPAddress.Parse("192.168.164.10"), 6379);
var _socket = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp);
_socket.Connect(endpoint);
return new RedisSocket(_socket);
}
}
static object[] PrepareCmd(string cmd, string subcmd = null, params object[] parms)
{
if (string.IsNullOrWhiteSpace(cmd)) throw new ArgumentNullException("Redis command not is null or empty.");
object[] args = null;
if (parms?.Any() != true)
{
if (string.IsNullOrWhiteSpace(subcmd) == false) args = new object[] { cmd, subcmd };
else args = cmd.Split(' ').Where(a => string.IsNullOrWhiteSpace(a) == false).ToArray();
}
else
{
var issubcmd = string.IsNullOrWhiteSpace(subcmd) == false;
args = new object[parms.Length + 1 + (issubcmd ? 1 : 0)];
var argsIdx = 0;
args[argsIdx++] = cmd;
if (issubcmd) args[argsIdx++] = subcmd;
foreach (var prm in parms) args[argsIdx++] = prm;
}
return args;
}
static Resp3Helper.ReadResult<T> ExecCmd<T>(string cmd, string subcmd = null, params object[] parms)
{
var args = PrepareCmd(cmd, subcmd, parms);
using (var rds = RedisSocket.GetRedisSocket())
{
Resp3Helper.Write(rds.Stream, args, true);
var rt = Resp3Helper.Read<T>(rds.Stream);
return rt;
}
}
static ExecCmdListenResult ExecCmdListen(Action<ExecCmdListenResult, string> ondata, string cmd, string subcmd = null, params object[] parms)
{
var args = PrepareCmd(cmd, subcmd, parms);
var rds = RedisSocket.GetRedisSocket();
Resp3Helper.Write(rds.Stream, args, true);
var rd = Resp3Helper.Read<string>(rds.Stream);
var rt = new ExecCmdListenResult { rds = rds };
new Thread(() =>
{
ondata?.Invoke(rt, rd.Value);
while (rt._running)
{
try
{
ondata?.Invoke(rt, Resp3Helper.Read<string>(rds.Stream).Value);
}
catch(Exception ex)
{
Console.WriteLine(ex.Message);
}
}
}).Start();
return rt;
}
public class ExecCmdListenResult : IDisposable
{
internal RedisSocket rds;
internal bool _running = true;
public void Dispose() => _running = false;
}
class RedisCommand
{
class RedisServerException : Exception
{
public RedisServerException(string message) : base(message) { }
}
public Resp3Helper.ReadResult<string[]> AclCat(string categoryname = null) => string.IsNullOrWhiteSpace(categoryname) ? ExecCmd<string[]>("ACL", "CAT") : ExecCmd<string[]>("ACL", "CAT", categoryname);
public Resp3Helper.ReadResult<int> AclDelUser(params string[] username) => username?.Any() == true ? ExecCmd<int>("ACL", "DELUSER", username) : throw new ArgumentException(nameof(username));
public Resp3Helper.ReadResult<string> AclGenPass(int bits = 0) => bits <= 0 ? ExecCmd<string>("ACL", "GENPASS") : ExecCmd<string>("ACL", "GENPASS", bits);
public Resp3Helper.ReadResult<string[]> AclList() => ExecCmd<string[]>("ACL", "LIST");
public Resp3Helper.ReadResult<string> AclLoad() => ExecCmd<string>("ACL", "LOAD");
public Resp3Helper.ReadResult<LogInfo[]> AclLog(long count = 0) => (count <= 0 ? ExecCmd<object[][]>("ACL", "LOG") : ExecCmd<object[][]>("ACL", "LOG", count)).NewValue(x => x.Select(a => a.MapToClass<LogInfo>()).ToArray());
public class LogInfo { public long Count { get; } public string Reason { get; } public string Context { get; } public string Object { get; } public string Username { get; } public decimal AgeSeconds { get; } public string ClientInfo { get; } }
public Resp3Helper.ReadResult<string> AclSave() => ExecCmd<string>("ACL", "SAVE");
public Resp3Helper.ReadResult<string> AclSetUser(params string[] rule) => rule?.Any() == true ? ExecCmd<string>("ACL", "SETUSER", rule) : throw new ArgumentException(nameof(rule));
public Resp3Helper.ReadResult<string[]> AclUsers() => ExecCmd<string[]>("ACL", "USERS");
public Resp3Helper.ReadResult<string> AclWhoami() => ExecCmd<string>("ACL", "WHOAMI");
public Resp3Helper.ReadResult<string> BgRewriteAof() => ExecCmd<string>("BGREWRITEAOF");
public Resp3Helper.ReadResult<string> BgSave(string schedule = null) => ExecCmd<string>("BGSAVE", schedule);
public Resp3Helper.ReadResult<object[]> Command() => ExecCmd<object[]>("COMMAND");
public Resp3Helper.ReadResult<int> CommandCount() => ExecCmd<int>("COMMAND", "COUNT");
public Resp3Helper.ReadResult<string[]> CommandGetKeys(params string[] command) => command?.Any() == true ? ExecCmd<string[]>("COMMAND", "GETKEYS", command) : throw new ArgumentException(nameof(command));
public Resp3Helper.ReadResult<string[]> CommandInfo(params string[] command) => command?.Any() == true ? ExecCmd<string[]>("COMMAND", "INFO", command) : throw new ArgumentException(nameof(command));
public Resp3Helper.ReadResult<Dictionary<string, string>> ConfigGet(string parameter) => ExecCmd<string[]>("CONFIG", "GET", parameter).NewValue(a => a.MapToHash<string>());
public Resp3Helper.ReadResult<string> ConfigResetStat() => ExecCmd<string>("CONFIG", "RESETSTAT");
public Resp3Helper.ReadResult<string> ConfigRewrite() => ExecCmd<string>("CONFIG", "REWRITE");
public Resp3Helper.ReadResult<string> ConfigSet(string parameter, object value) => ExecCmd<string>("CONFIG", "SET", parameter, value);
public Resp3Helper.ReadResult<long> DbSize() => ExecCmd<long>("DBSIZE");
public Resp3Helper.ReadResult<string> DebugObject(string key) => ExecCmd<string>("DEBUG", "OBJECT", key);
public Resp3Helper.ReadResult<string> DebugSegfault() => ExecCmd<string>("DEBUG", "SEGFAULT");
public Resp3Helper.ReadResult<string> FlushAll(bool isasync = false) => ExecCmd<string>("FLUSHALL", isasync ? "ASYNC" : null);
public Resp3Helper.ReadResult<string> FlushDb(bool isasync = false) => ExecCmd<string>("FLUSHDB", isasync ? "ASYNC" : null);
public Resp3Helper.ReadResult<string> Info(string section = null) => ExecCmd<string>("INFO", section);
public Resp3Helper.ReadResult<long> LastSave() => ExecCmd<long>("LASTSAVE");
public Resp3Helper.ReadResult<string> LatencyDoctor() => ExecCmd<string>("LATENCY", "DOCTOR");
public Resp3Helper.ReadResult<string> LatencyGraph(string @event) => ExecCmd<string>("LATENCY", "GRAPH", @event);
public Resp3Helper.ReadResult<string[]> LatencyHelp() => ExecCmd<string[]>("LATENCY", "HELP");
public Resp3Helper.ReadResult<string[][]> LatencyHistory(string @event) => ExecCmd<string[][]>("HISTORY", "HELP", @event);
public Resp3Helper.ReadResult<string[][]> LatencyLatest() => ExecCmd<string[][]>("HISTORY", "LATEST");
public Resp3Helper.ReadResult<long> LatencyReset(string @event) => ExecCmd<long>("LASTSAVE", "RESET", @event);
public Resp3Helper.ReadResult<string> Lolwut(string version) => ExecCmd<string>("LATENCY", string.IsNullOrWhiteSpace(version) ? null : $"VERSION {version}");
public Resp3Helper.ReadResult<string> MemoryDoctor() => ExecCmd<string>("MEMORY", "DOCTOR");
public Resp3Helper.ReadResult<string[]> MemoryHelp() => ExecCmd<string[]>("MEMORY", "HELP");
public Resp3Helper.ReadResult<string> MemoryMallocStats() => ExecCmd<string>("MEMORY", "MALLOC-STATS");
public Resp3Helper.ReadResult<string> MemoryPurge() => ExecCmd<string>("MEMORY", "PURGE");
public Resp3Helper.ReadResult<Dictionary<string, string>> MemoryStats() => ExecCmd<string[]>("MEMORY", "STATS").NewValue(a => a.MapToHash<string>());
public Resp3Helper.ReadResult<long> MemoryUsage(string key, long count = 0) => count <= 0 ? ExecCmd<long>("MEMORY ", "USAGE", key) : ExecCmd<long>("MEMORY ", "USAGE", key, "SAMPLES", count);
public Resp3Helper.ReadResult<string[][]> ModuleList() => ExecCmd<string[][]>("MODULE", "LIST");
public Resp3Helper.ReadResult<string> ModuleLoad(string path, params string[] args) => args?.Any() == true ? ExecCmd<string>("MODULE", "LOAD", new[] { path }.Concat(args)) : ExecCmd<string>("MODULE", "LOAD", path);
public Resp3Helper.ReadResult<string> ModuleUnload(string name) => ExecCmd<string>("MODULE", "UNLOAD", name);
public ExecCmdListenResult Monitor(Action<ExecCmdListenResult, string> onData) => ExecCmdListen(onData, "MONITOR");
public ExecCmdListenResult Psync(string replicationid, string offset, Action<ExecCmdListenResult, string> onData) => ExecCmdListen(onData, "PSYNC", replicationid, offset);
public Resp3Helper.ReadResult<string> ReplicaOf(string host, int port) => ExecCmd<string>("REPLICAOF", host, port);
public Resp3Helper.ReadResult<object> Role() => ExecCmd<object>("ROLE");
public Resp3Helper.ReadResult<string> Save() => ExecCmd<string>("SAVE");
public Resp3Helper.ReadResult<string> Shutdown(bool save) => ExecCmd<string>("SHUTDOWN", save ? "SAVE" : "NOSAVE");
public Resp3Helper.ReadResult<string> SlaveOf(string host, int port) => ExecCmd<string>("SLAVEOF", host, port);
public Resp3Helper.ReadResult<object> SlowLog(string subcommand, params string[] argument) => ExecCmd<object>("SLOWLOG", subcommand, argument);
public Resp3Helper.ReadResult<string> SwapDb(int index1, int index2) => ExecCmd<string>("SWAPDB", null, index1, index2);
public ExecCmdListenResult Sync(Action<ExecCmdListenResult, string> onData) => ExecCmdListen(onData, "SYNC");
public Resp3Helper.ReadResult<DateTime> Time() => ExecCmd<long[]>("TIME").NewValue(a => new DateTime(1970, 0, 0).AddSeconds(a[0]).AddTicks(a[1] * 10));
}
RedisCommand rds { get; } = new RedisCommand();
#region server test
[Fact]
public void BgRewriteAof()
{
var rt = rds.BgRewriteAof();
if (!rt.IsError) rt.Value.AssertEqual("Background append only file rewriting started");
}
[Fact]
public void BgSave()
{
var rt = rds.BgSave();
if (!rt.IsError) rt.Value.AssertEqual("Background saving started");
}
[Fact]
public void Command()
{
string UFString(string text)
{
if (text.Length <= 1) return text.ToUpper();
else return text.Substring(0, 1).ToUpper() + text.Substring(1, text.Length - 1);
}
var rt = rds.Command();
var sb = string.Join("\r\n\r\n", (rt.Value).OrderBy(a1 => (a1 as List<object>)[0].ToString()).Select(a1 =>
{
var a = a1 as List<object>;
var plen = int.Parse(a[1].ToString());
var firstKey = int.Parse(a[3].ToString());
var lastKey = int.Parse(a[4].ToString());
var stepCount = int.Parse(a[5].ToString());
var parms = "";
if (plen > 1)
{
for (var x = 1; x < plen; x++)
{
if (x == firstKey) parms += "string key, ";
else parms += "string parm, ";
}
parms = parms.Remove(parms.Length - 2);
}
if (plen < 0)
{
for (var x = 1; x < -plen; x++)
{
if (x == firstKey) parms += "string key, ";
else parms += "string parm, ";
}
if (parms.Length > 0)
parms = parms.Remove(parms.Length - 2);
}
return $@"
//{string.Join(", ", a[2] as List<object>)}
//{string.Join(", ", a[6] as List<object>)}
public void {UFString(a[0].ToString())}({parms}) {{ }}";
}));
}
[Fact]
public void CommandCount()
{
var rt = rds.CommandCount();
if (!rt.IsError) (rt.Value > 100).AssertEqual(true);
}
[Fact]
public void CommandGetKeys()
{
var rt = rds.CommandGetKeys("MSET", "a", "b", "c", "d", "e", "f");
if (!rt.IsError)
{
rt.Value[0].AssertEqual("a");
rt.Value[1].AssertEqual("c");
rt.Value[2].AssertEqual("e");
}
}
[Fact]
public void ConfigGet()
{
var rt = rds.ConfigGet("*max-*-entries*");
if (!rt.IsError)
{
rt.Value.ContainsKey("hash-max-ziplist-entries").AssertEqual(true);
rt.Value.ContainsKey("set-max-intset-entries").AssertEqual(true);
rt.Value.ContainsKey("zset-max-ziplist-entries").AssertEqual(true);
}
}
[Fact]
public void ConfigResetStat()
{
var rt = rds.ConfigResetStat();
if (!rt.IsError) rt.Value.AssertEqual("OK");
}
[Fact]
public void ConfigRewrite()
{
var rt = rds.ConfigRewrite();
if (!rt.IsError) rt.Value.AssertEqual("OK");
}
[Fact]
public void ConfigSet()
{
var rt = rds.ConfigSet("hash-max-ziplist-entries", 512);
if (!rt.IsError) rt.Value.AssertEqual("OK");
}
[Fact]
public void DbSize()
{
var rt = rds.DbSize();
if (!rt.IsError) (rt.Value >= 0).AssertEqual(true);
}
[Fact]
public void DebugObject()
{
var rt = rds.ConfigSet("hash-max-ziplist-entries", 512);
if (!rt.IsError) rt.Value.AssertEqual("Value at:");
//Value at:0x7f52b584aa80 refcount:2147483647 encoding:int serializedlength:2 lru:12199791 lru_seconds_idle:40537
}
[Fact]
public void LastSave()
{
var rt = rds.LastSave();
if (!rt.IsError) (rt.Value >= 0).AssertEqual(true);
}
[Fact]
public void LatencyHelp()
{
var rt = rds.LatencyHelp();
if (!rt.IsError) (rt.Value.Length > 0).AssertEqual(true);
}
[Fact]
public void MemoryStats()
{
var rt = rds.MemoryStats();
if (!rt.IsError) rt.Value.ContainsKey("keys.count").AssertEqual(true);
}
[Fact]
public void MemoryUsage()
{
var rt = rds.MemoryUsage("key");
if (!rt.IsError) (rt.Value > 0).AssertEqual(true);
}
#endregion
#region acl test
[Fact]
public void AclCat()
{
var assertList = new[] { "keyspace", "read", "write", "set", "sortedset", "list", "hash", "string", "bitmap", "hyperloglog", "geo", "stream", "pubsub", "admin", "fast", "slow", "blocking", "dangerous", "connection", "transaction", "scripting" };
var rt = rds.AclCat();
if (!rt.IsError) assertList.Where(a => rt.Value.Contains(a)).Count().AssertEqual(assertList.Length);
assertList = new[] { "flushdb", "lastsave", "info", "latency", "slowlog", "replconf", "slaveof", "acl", "flushall", "role", "pfdebug", "cluster", "shutdown", "restore-asking", "sort", "sync", "pfselftest", "restore", "swapdb", "config", "keys", "psync", "migrate", "bgsave", "monitor", "bgrewriteaof", "module", "debug", "save", "client", "replicaof" };
rt = rds.AclCat("dangerous");
if (!rt.IsError) assertList.Where(a => rt.Value.Contains(a)).Count().AssertEqual(assertList.Length);
}
[Fact]
public void AclDelUser()
{
var rt = rds.AclDelUser("antirez");
if (!rt.IsError) rt.Value.AssertEqual(0);
}
[Fact]
public void AclGenPass()
{
var rt = rds.AclGenPass();
if (!rt.IsError) rt.Value.ToString().Length.AssertEqual(64);
rt = rds.AclGenPass(32);
if (!rt.IsError) rt.Value.ToString().Length.AssertEqual(8);
rt = rds.AclGenPass(5);
if (!rt.IsError) rt.Value.ToString().Length.AssertEqual(2);
}
[Fact]
public void AclList()
{
//1) "user default on nopass ~* +@all"
//2) "user karin on +@all -@admin -@dangerous"
//1) "user antirez on #9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08 ~objects:* +@all -@admin -@dangerous"
//2) "user default on nopass ~* +@all"
var rt = rds.AclList();
if (!rt.IsError) rt.Value[0].StartsWith("user ").AssertEqual(true);
}
[Fact]
public void AclLoad()
{
var rt = rds.AclLoad();
if (!rt.IsError) rt.Value.AssertEqual("OK");
//rt.Value.ToString().StartsWith("ERR This Redis instance is not configured to use an ACL file.");
}
[Fact]
public void AclLog()
{
//127.0.0.1:6379> acl log 1
//1) 1# "count" => (integer) 1
// 2# "reason" => "auth"
// 3# "context" => "toplevel"
// 4# "object" => "AUTH"
// 5# "username" => "someuser"
// 6# "age-seconds" => (double) 8.3040000000000003
// 7# "client-info" => "id=8 addr=127.0.0.1:40298 fd=8 name= age=6802 idle=0 flags=N db=0 sub=0 psub=0 multi=-1 qbuf=48 qbuf-free=32720 obl=0 oll=0 omem=0 events=r cmd=auth user=default"
ExecCmd<string>("AUTH someuser wrongpassword");
var rt = rds.AclLog();
if (!rt.IsError) rt.Value.AssertEqual("OK");
}
[Fact]
public void AclSave()
{
var rt = rds.AclSave();
if (!rt.IsError) rt.Value.AssertEqual("OK");
//rt.Value.ToString().StartsWith("ERR This Redis instance is not configured to use an ACL file.");
}
[Fact]
public void AclSetUser()
{
var rt = rds.AclSetUser("karin", "on", "+@all", "-@dangerous");
if (!rt.IsError) rt.Value.AssertEqual("OK");
}
[Fact]
public void AclUsers()
{
var rt = rds.AclUsers();
if (!rt.IsError) rt.Value.Contains("default").AssertEqual(true);
}
[Fact]
public void AclWhoami()
{
var rt = rds.AclWhoami();
if (!rt.IsError) rt.Value.AssertEqual("default");
}
#endregion
// [Fact]
//public void Set()
//{
// var val = Guid.NewGuid().ToString();
// ExecCmd("SET", "test01", val).AssertEqual("OK");
// ExecCmd("GET", "test01").AssertEqual(val);
// ExecCmd("SET", "test02", Encoding.UTF8.GetBytes(val)).AssertEqual("OK");
// ExecCmd("SET", "test02", val).AssertEqual(val);
//}
}
static class TestExntesions
{
public static void AssertEqual(this object obj, object val) => Assert.Equal(val, obj);
}
}
|
2877025939/tabelVew-CollectionView | 1,217 | tabelVew嵌套CollectionView/tabelVew嵌套CollectionView/Masonry/Masonry/MASViewAttribute.h | //
// MASAttribute.h
// Masonry
//
// Created by Jonas Budelmann on 21/07/13.
// Copyright (c) 2013 cloudling. All rights reserved.
//
#import "MASUtilities.h"
/**
* An immutable tuple which stores the view and the related NSLayoutAttribute.
* Describes part of either the left or right hand side of a constraint equation
*/
@interface MASViewAttribute : NSObject
/**
* The view which the reciever relates to. Can be nil if item is not a view.
*/
@property (nonatomic, weak, readonly) MAS_VIEW *view;
/**
* The item which the reciever relates to.
*/
@property (nonatomic, weak, readonly) id item;
/**
* The attribute which the reciever relates to
*/
@property (nonatomic, assign, readonly) NSLayoutAttribute layoutAttribute;
/**
* Convenience initializer.
*/
- (id)initWithView:(MAS_VIEW *)view layoutAttribute:(NSLayoutAttribute)layoutAttribute;
/**
* The designated initializer.
*/
- (id)initWithView:(MAS_VIEW *)view item:(id)item layoutAttribute:(NSLayoutAttribute)layoutAttribute;
/**
* Determine whether the layoutAttribute is a size attribute
*
* @return YES if layoutAttribute is equal to NSLayoutAttributeWidth or NSLayoutAttributeHeight
*/
- (BOOL)isSizeAttribute;
@end
|
2877025939/tabelVew-CollectionView | 2,753 | tabelVew嵌套CollectionView/tabelVew嵌套CollectionView/Masonry/Masonry/NSArray+MASAdditions.h | //
// NSArray+MASAdditions.h
//
//
// Created by Daniel Hammond on 11/26/13.
//
//
#import "MASUtilities.h"
#import "MASConstraintMaker.h"
#import "MASViewAttribute.h"
typedef NS_ENUM(NSUInteger, MASAxisType) {
MASAxisTypeHorizontal,
MASAxisTypeVertical
};
@interface NSArray (MASAdditions)
/**
* Creates a MASConstraintMaker with each view in the callee.
* Any constraints defined are added to the view or the appropriate superview once the block has finished executing on each view
*
* @param block scope within which you can build up the constraints which you wish to apply to each view.
*
* @return Array of created MASConstraints
*/
- (NSArray *)mas_makeConstraints:(void (^)(MASConstraintMaker *make))block;
/**
* Creates a MASConstraintMaker with each view in the callee.
* Any constraints defined are added to each view or the appropriate superview once the block has finished executing on each view.
* If an existing constraint exists then it will be updated instead.
*
* @param block scope within which you can build up the constraints which you wish to apply to each view.
*
* @return Array of created/updated MASConstraints
*/
- (NSArray *)mas_updateConstraints:(void (^)(MASConstraintMaker *make))block;
/**
* Creates a MASConstraintMaker with each view in the callee.
* Any constraints defined are added to each view or the appropriate superview once the block has finished executing on each view.
* All constraints previously installed for the views will be removed.
*
* @param block scope within which you can build up the constraints which you wish to apply to each view.
*
* @return Array of created/updated MASConstraints
*/
- (NSArray *)mas_remakeConstraints:(void (^)(MASConstraintMaker *make))block;
/**
* distribute with fixed spacing
*
* @param axisType which axis to distribute items along
* @param fixedSpacing the spacing between each item
* @param leadSpacing the spacing before the first item and the container
* @param tailSpacing the spacing after the last item and the container
*/
- (void)mas_distributeViewsAlongAxis:(MASAxisType)axisType withFixedSpacing:(CGFloat)fixedSpacing leadSpacing:(CGFloat)leadSpacing tailSpacing:(CGFloat)tailSpacing;
/**
* distribute with fixed item size
*
* @param axisType which axis to distribute items along
* @param fixedItemLength the fixed length of each item
* @param leadSpacing the spacing before the first item and the container
* @param tailSpacing the spacing after the last item and the container
*/
- (void)mas_distributeViewsAlongAxis:(MASAxisType)axisType withFixedItemLength:(CGFloat)fixedItemLength leadSpacing:(CGFloat)leadSpacing tailSpacing:(CGFloat)tailSpacing;
@end
|
27182812/ChatGLM-LLaMA-chinese-insturct | 3,899 | src/transformers/models/deberta_v2/__init__.py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_import_structure = {
"configuration_deberta_v2": ["DEBERTA_V2_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaV2Config", "DebertaV2OnnxConfig"],
"tokenization_deberta_v2": ["DebertaV2Tokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["tokenization_deberta_v2_fast"] = ["DebertaV2TokenizerFast"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["modeling_tf_deberta_v2"] = [
"TF_DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDebertaV2ForMaskedLM",
"TFDebertaV2ForQuestionAnswering",
"TFDebertaV2ForSequenceClassification",
"TFDebertaV2ForTokenClassification",
"TFDebertaV2Model",
"TFDebertaV2PreTrainedModel",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["modeling_deberta_v2"] = [
"DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST",
"DebertaV2ForMaskedLM",
"DebertaV2ForMultipleChoice",
"DebertaV2ForQuestionAnswering",
"DebertaV2ForSequenceClassification",
"DebertaV2ForTokenClassification",
"DebertaV2Model",
"DebertaV2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deberta_v2 import (
DEBERTA_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
DebertaV2Config,
DebertaV2OnnxConfig,
)
from .tokenization_deberta_v2 import DebertaV2Tokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_v2_fast import DebertaV2TokenizerFast
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta_v2 import (
TF_DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaV2ForMaskedLM,
TFDebertaV2ForQuestionAnswering,
TFDebertaV2ForSequenceClassification,
TFDebertaV2ForTokenClassification,
TFDebertaV2Model,
TFDebertaV2PreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta_v2 import (
DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaV2ForMaskedLM,
DebertaV2ForMultipleChoice,
DebertaV2ForQuestionAnswering,
DebertaV2ForSequenceClassification,
DebertaV2ForTokenClassification,
DebertaV2Model,
DebertaV2PreTrainedModel,
)
else:
import sys
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
27182812/ChatGLM-LLaMA-chinese-insturct | 21,720 | src/transformers/models/deberta_v2/tokenization_deberta_v2.py | # coding=utf-8
# Copyright 2020 Microsoft and the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Tokenization class for model DeBERTa."""
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as sp
from ...tokenization_utils import PreTrainedTokenizer
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"microsoft/deberta-v2-xlarge": "https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/spm.model",
"microsoft/deberta-v2-xxlarge": "https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/spm.model",
"microsoft/deberta-v2-xlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/spm.model"
),
"microsoft/deberta-v2-xxlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/spm.model"
),
}
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"microsoft/deberta-v2-xlarge": 512,
"microsoft/deberta-v2-xxlarge": 512,
"microsoft/deberta-v2-xlarge-mnli": 512,
"microsoft/deberta-v2-xxlarge-mnli": 512,
}
PRETRAINED_INIT_CONFIGURATION = {
"microsoft/deberta-v2-xlarge": {"do_lower_case": False},
"microsoft/deberta-v2-xxlarge": {"do_lower_case": False},
"microsoft/deberta-v2-xlarge-mnli": {"do_lower_case": False},
"microsoft/deberta-v2-xxlarge-mnli": {"do_lower_case": False},
}
VOCAB_FILES_NAMES = {"vocab_file": "spm.model"}
class DebertaV2Tokenizer(PreTrainedTokenizer):
r"""
Constructs a DeBERTa-v2 tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece).
Args:
vocab_file (`str`):
[SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
contains the vocabulary necessary to instantiate a tokenizer.
do_lower_case (`bool`, *optional*, defaults to `False`):
Whether or not to lowercase the input when tokenizing.
bos_token (`string`, *optional*, defaults to `"[CLS]"`):
The beginning of sequence token that was used during pre-training. Can be used a sequence classifier token.
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the `cls_token`.
eos_token (`string`, *optional*, defaults to `"[SEP]"`):
The end of sequence token. When building a sequence using special tokens, this is not the token that is
used for the end of sequence. The token used is the `sep_token`.
unk_token (`str`, *optional*, defaults to `"[UNK]"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
sep_token (`str`, *optional*, defaults to `"[SEP]"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
pad_token (`str`, *optional*, defaults to `"[PAD]"`):
The token used for padding, for example when batching sequences of different lengths.
cls_token (`str`, *optional*, defaults to `"[CLS]"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
mask_token (`str`, *optional*, defaults to `"[MASK]"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
sp_model_kwargs (`dict`, *optional*):
Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
to set:
- `enable_sampling`: Enable subword regularization.
- `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
- `nbest_size = {0,1}`: No sampling is performed.
- `nbest_size > 1`: samples from the nbest_size results.
- `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
using forward-filtering-and-backward-sampling algorithm.
- `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
BPE-dropout.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(
self,
vocab_file,
do_lower_case=False,
split_by_punct=False,
bos_token="[CLS]",
eos_token="[SEP]",
unk_token="[UNK]",
sep_token="[SEP]",
pad_token="[PAD]",
cls_token="[CLS]",
mask_token="[MASK]",
sp_model_kwargs: Optional[Dict[str, Any]] = None,
**kwargs,
) -> None:
self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=do_lower_case,
bos_token=bos_token,
eos_token=eos_token,
unk_token=unk_token,
sep_token=sep_token,
pad_token=pad_token,
cls_token=cls_token,
mask_token=mask_token,
split_by_punct=split_by_punct,
sp_model_kwargs=self.sp_model_kwargs,
**kwargs,
)
if not os.path.isfile(vocab_file):
raise ValueError(
f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
" model use `tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
)
self.do_lower_case = do_lower_case
self.split_by_punct = split_by_punct
self.vocab_file = vocab_file
self._tokenizer = SPMTokenizer(
vocab_file, self.all_special_tokens, split_by_punct=split_by_punct, sp_model_kwargs=self.sp_model_kwargs
)
@property
def vocab_size(self):
return len(self.vocab)
@property
def vocab(self):
return self._tokenizer.vocab
def get_vocab(self):
vocab = self.vocab.copy()
vocab.update(self.get_added_vocab())
return vocab
def _tokenize(self, text: str) -> List[str]:
"""Take as input a string and return a list of strings (tokens) for words/sub-words"""
if self.do_lower_case:
text = text.lower()
return self._tokenizer.tokenize(text)
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
return self._tokenizer.spm.PieceToId(token)
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self._tokenizer.spm.IdToPiece(index) if index < self.vocab_size else self.unk_token
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (string) in a single string."""
return self._tokenizer.decode(tokens)
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A DeBERTa sequence has the following format:
- single sequence: [CLS] X [SEP]
- pair of sequences: [CLS] A [SEP] B [SEP]
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return cls + token_ids_0 + sep + token_ids_1 + sep
def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False):
"""
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
)
if token_ids_1 is not None:
return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
return [1] + ([0] * len(token_ids_0)) + [1]
def create_token_type_ids_from_sequences(self, token_ids_0, token_ids_1=None):
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. A DeBERTa
sequence pair mask has the following format:
```
0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
| first sequence | second sequence |
```
If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
add_prefix_space = kwargs.pop("add_prefix_space", False)
if is_split_into_words or add_prefix_space:
text = " " + text
return (text, kwargs)
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
return self._tokenizer.save_pretrained(save_directory, filename_prefix=filename_prefix)
class SPMTokenizer:
r"""
Constructs a tokenizer based on [SentencePiece](https://github.com/google/sentencepiece).
Args:
vocab_file (`str`):
[SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
contains the vocabulary necessary to instantiate a tokenizer.
sp_model_kwargs (`dict`, *optional*):
Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
to set:
- `enable_sampling`: Enable subword regularization.
- `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
- `nbest_size = {0,1}`: No sampling is performed.
- `nbest_size > 1`: samples from the nbest_size results.
- `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
using forward-filtering-and-backward-sampling algorithm.
- `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
BPE-dropout.
"""
def __init__(
self, vocab_file, special_tokens, split_by_punct=False, sp_model_kwargs: Optional[Dict[str, Any]] = None
):
self.split_by_punct = split_by_punct
self.vocab_file = vocab_file
self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
spm = sp.SentencePieceProcessor(**self.sp_model_kwargs)
if not os.path.exists(vocab_file):
raise FileNotFoundError(f"{vocab_file} does not exist!")
spm.load(vocab_file)
bpe_vocab_size = spm.GetPieceSize()
# Token map
# <unk> 0+1
# <s> 1+1
# </s> 2+1
self.vocab = {spm.IdToPiece(i): i for i in range(bpe_vocab_size)}
self.ids_to_tokens = [spm.IdToPiece(i) for i in range(bpe_vocab_size)]
# self.vocab['[PAD]'] = 0
# self.vocab['[CLS]'] = 1
# self.vocab['[SEP]'] = 2
# self.vocab['[UNK]'] = 3
self.spm = spm
self.special_tokens = special_tokens
def __getstate__(self):
state = self.__dict__.copy()
state["spm"] = None
return state
def __setstate__(self, d):
self.__dict__ = d
# for backward compatibility
if not hasattr(self, "sp_model_kwargs"):
self.sp_model_kwargs = {}
self.spm = sp.SentencePieceProcessor(**self.sp_model_kwargs)
self.spm.Load(self.vocab_file)
def tokenize(self, text):
return self._encode_as_pieces(text)
def convert_ids_to_tokens(self, ids):
tokens = []
for i in ids:
tokens.append(self.ids_to_tokens[i])
return tokens
def decode(self, tokens, start=-1, end=-1, raw_text=None):
if raw_text is None:
current_sub_tokens = []
out_string = ""
prev_is_special = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.spm.decode_pieces(current_sub_tokens) + token
prev_is_special = True
current_sub_tokens = []
else:
current_sub_tokens.append(token)
prev_is_special = False
out_string += self.spm.decode_pieces(current_sub_tokens)
return out_string.strip()
else:
words = self.split_to_words(raw_text)
word_tokens = [self.tokenize(w) for w in words]
token2words = [0] * len(tokens)
tid = 0
for i, w in enumerate(word_tokens):
for k, t in enumerate(w):
token2words[tid] = i
tid += 1
word_start = token2words[start]
word_end = token2words[end] if end < len(tokens) else len(words)
text = "".join(words[word_start:word_end])
return text
def add_special_token(self, token):
if token not in self.special_tokens:
self.special_tokens.append(token)
if token not in self.vocab:
self.vocab[token] = len(self.vocab) - 1
self.ids_to_tokens.append(token)
return self.id(token)
def part_of_whole_word(self, token, is_bos=False):
if is_bos:
return True
if (
len(token) == 1
and (_is_whitespace(list(token)[0]) or _is_control(list(token)[0]) or _is_punctuation(list(token)[0]))
) or token in self.special_tokens:
return False
word_start = b"\xe2\x96\x81".decode("utf-8")
return not token.startswith(word_start)
def pad(self):
return "[PAD]"
def bos(self):
return "[CLS]"
def eos(self):
return "[SEP]"
def unk(self):
return "[UNK]"
def mask(self):
return "[MASK]"
def sym(self, id):
return self.ids_to_tokens[id]
def id(self, sym):
return self.vocab[sym] if sym in self.vocab else 1
def _encode_as_pieces(self, text):
text = convert_to_unicode(text)
if self.split_by_punct:
words = self._run_split_on_punc(text)
pieces = [self.spm.encode(w, out_type=str) for w in words]
return [p for w in pieces for p in w]
else:
return self.spm.encode(text, out_type=str)
def split_to_words(self, text):
pieces = self._encode_as_pieces(text)
word_start = b"\xe2\x96\x81".decode("utf-8")
words = []
offset = 0
prev_end = 0
for i, p in enumerate(pieces):
if p.startswith(word_start):
if offset > prev_end:
words.append(text[prev_end:offset])
prev_end = offset
w = p.replace(word_start, "")
else:
w = p
try:
s = text.index(w, offset)
pn = ""
k = i + 1
while k < len(pieces):
pn = pieces[k].replace(word_start, "")
if len(pn) > 0:
break
k += 1
if len(pn) > 0 and pn in text[offset:s]:
offset = offset + 1
else:
offset = s + len(w)
except Exception:
offset = offset + 1
if prev_end < offset:
words.append(text[prev_end:offset])
return words
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def save_pretrained(self, path: str, filename_prefix: str = None):
filename = VOCAB_FILES_NAMES[list(VOCAB_FILES_NAMES.keys())[0]]
if filename_prefix is not None:
filename = filename_prefix + "-" + filename
full_path = os.path.join(path, filename)
with open(full_path, "wb") as fs:
fs.write(self.spm.serialized_model_proto())
return (full_path,)
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically control characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError(f"Unsupported string type: {type(text)}")
|
2877025939/tabelVew-CollectionView | 13,217 | tabelVew嵌套CollectionView/tabelVew嵌套CollectionView/Masonry/Masonry/MASViewConstraint.m | //
// MASConstraint.m
// Masonry
//
// Created by Jonas Budelmann on 20/07/13.
// Copyright (c) 2013 cloudling. All rights reserved.
//
#import "MASViewConstraint.h"
#import "MASConstraint+Private.h"
#import "MASCompositeConstraint.h"
#import "MASLayoutConstraint.h"
#import "View+MASAdditions.h"
#import <objc/runtime.h>
@interface MAS_VIEW (MASConstraints)
@property (nonatomic, readonly) NSMutableSet *mas_installedConstraints;
@end
@implementation MAS_VIEW (MASConstraints)
static char kInstalledConstraintsKey;
- (NSMutableSet *)mas_installedConstraints {
NSMutableSet *constraints = objc_getAssociatedObject(self, &kInstalledConstraintsKey);
if (!constraints) {
constraints = [NSMutableSet set];
objc_setAssociatedObject(self, &kInstalledConstraintsKey, constraints, OBJC_ASSOCIATION_RETAIN_NONATOMIC);
}
return constraints;
}
@end
@interface MASViewConstraint ()
@property (nonatomic, strong, readwrite) MASViewAttribute *secondViewAttribute;
@property (nonatomic, weak) MAS_VIEW *installedView;
@property (nonatomic, weak) MASLayoutConstraint *layoutConstraint;
@property (nonatomic, assign) NSLayoutRelation layoutRelation;
@property (nonatomic, assign) MASLayoutPriority layoutPriority;
@property (nonatomic, assign) CGFloat layoutMultiplier;
@property (nonatomic, assign) CGFloat layoutConstant;
@property (nonatomic, assign) BOOL hasLayoutRelation;
@property (nonatomic, strong) id mas_key;
@property (nonatomic, assign) BOOL useAnimator;
@end
@implementation MASViewConstraint
- (id)initWithFirstViewAttribute:(MASViewAttribute *)firstViewAttribute {
self = [super init];
if (!self) return nil;
_firstViewAttribute = firstViewAttribute;
self.layoutPriority = MASLayoutPriorityRequired;
self.layoutMultiplier = 1;
return self;
}
#pragma mark - NSCoping
- (id)copyWithZone:(NSZone __unused *)zone {
MASViewConstraint *constraint = [[MASViewConstraint alloc] initWithFirstViewAttribute:self.firstViewAttribute];
constraint.layoutConstant = self.layoutConstant;
constraint.layoutRelation = self.layoutRelation;
constraint.layoutPriority = self.layoutPriority;
constraint.layoutMultiplier = self.layoutMultiplier;
constraint.delegate = self.delegate;
return constraint;
}
#pragma mark - Public
+ (NSArray *)installedConstraintsForView:(MAS_VIEW *)view {
return [view.mas_installedConstraints allObjects];
}
#pragma mark - Private
- (void)setLayoutConstant:(CGFloat)layoutConstant {
_layoutConstant = layoutConstant;
#if TARGET_OS_MAC && !(TARGET_OS_IPHONE || TARGET_OS_TV)
if (self.useAnimator) {
[self.layoutConstraint.animator setConstant:layoutConstant];
} else {
self.layoutConstraint.constant = layoutConstant;
}
#else
self.layoutConstraint.constant = layoutConstant;
#endif
}
- (void)setLayoutRelation:(NSLayoutRelation)layoutRelation {
_layoutRelation = layoutRelation;
self.hasLayoutRelation = YES;
}
- (BOOL)supportsActiveProperty {
return [self.layoutConstraint respondsToSelector:@selector(isActive)];
}
- (BOOL)isActive {
BOOL active = YES;
if ([self supportsActiveProperty]) {
active = [self.layoutConstraint isActive];
}
return active;
}
- (BOOL)hasBeenInstalled {
return (self.layoutConstraint != nil) && [self isActive];
}
- (void)setSecondViewAttribute:(id)secondViewAttribute {
if ([secondViewAttribute isKindOfClass:NSValue.class]) {
[self setLayoutConstantWithValue:secondViewAttribute];
} else if ([secondViewAttribute isKindOfClass:MAS_VIEW.class]) {
_secondViewAttribute = [[MASViewAttribute alloc] initWithView:secondViewAttribute layoutAttribute:self.firstViewAttribute.layoutAttribute];
} else if ([secondViewAttribute isKindOfClass:MASViewAttribute.class]) {
_secondViewAttribute = secondViewAttribute;
} else {
NSAssert(NO, @"attempting to add unsupported attribute: %@", secondViewAttribute);
}
}
#pragma mark - NSLayoutConstraint multiplier proxies
- (MASConstraint * (^)(CGFloat))multipliedBy {
return ^id(CGFloat multiplier) {
NSAssert(!self.hasBeenInstalled,
@"Cannot modify constraint multiplier after it has been installed");
self.layoutMultiplier = multiplier;
return self;
};
}
- (MASConstraint * (^)(CGFloat))dividedBy {
return ^id(CGFloat divider) {
NSAssert(!self.hasBeenInstalled,
@"Cannot modify constraint multiplier after it has been installed");
self.layoutMultiplier = 1.0/divider;
return self;
};
}
#pragma mark - MASLayoutPriority proxy
- (MASConstraint * (^)(MASLayoutPriority))priority {
return ^id(MASLayoutPriority priority) {
NSAssert(!self.hasBeenInstalled,
@"Cannot modify constraint priority after it has been installed");
self.layoutPriority = priority;
return self;
};
}
#pragma mark - NSLayoutRelation proxy
- (MASConstraint * (^)(id, NSLayoutRelation))equalToWithRelation {
return ^id(id attribute, NSLayoutRelation relation) {
if ([attribute isKindOfClass:NSArray.class]) {
NSAssert(!self.hasLayoutRelation, @"Redefinition of constraint relation");
NSMutableArray *children = NSMutableArray.new;
for (id attr in attribute) {
MASViewConstraint *viewConstraint = [self copy];
viewConstraint.layoutRelation = relation;
viewConstraint.secondViewAttribute = attr;
[children addObject:viewConstraint];
}
MASCompositeConstraint *compositeConstraint = [[MASCompositeConstraint alloc] initWithChildren:children];
compositeConstraint.delegate = self.delegate;
[self.delegate constraint:self shouldBeReplacedWithConstraint:compositeConstraint];
return compositeConstraint;
} else {
NSAssert(!self.hasLayoutRelation || self.layoutRelation == relation && [attribute isKindOfClass:NSValue.class], @"Redefinition of constraint relation");
self.layoutRelation = relation;
self.secondViewAttribute = attribute;
return self;
}
};
}
#pragma mark - Semantic properties
- (MASConstraint *)with {
return self;
}
- (MASConstraint *)and {
return self;
}
#pragma mark - attribute chaining
- (MASConstraint *)addConstraintWithLayoutAttribute:(NSLayoutAttribute)layoutAttribute {
NSAssert(!self.hasLayoutRelation, @"Attributes should be chained before defining the constraint relation");
return [self.delegate constraint:self addConstraintWithLayoutAttribute:layoutAttribute];
}
#pragma mark - Animator proxy
#if TARGET_OS_MAC && !(TARGET_OS_IPHONE || TARGET_OS_TV)
- (MASConstraint *)animator {
self.useAnimator = YES;
return self;
}
#endif
#pragma mark - debug helpers
- (MASConstraint * (^)(id))key {
return ^id(id key) {
self.mas_key = key;
return self;
};
}
#pragma mark - NSLayoutConstraint constant setters
- (void)setInsets:(MASEdgeInsets)insets {
NSLayoutAttribute layoutAttribute = self.firstViewAttribute.layoutAttribute;
switch (layoutAttribute) {
case NSLayoutAttributeLeft:
case NSLayoutAttributeLeading:
self.layoutConstant = insets.left;
break;
case NSLayoutAttributeTop:
self.layoutConstant = insets.top;
break;
case NSLayoutAttributeBottom:
self.layoutConstant = -insets.bottom;
break;
case NSLayoutAttributeRight:
case NSLayoutAttributeTrailing:
self.layoutConstant = -insets.right;
break;
default:
break;
}
}
- (void)setOffset:(CGFloat)offset {
self.layoutConstant = offset;
}
- (void)setSizeOffset:(CGSize)sizeOffset {
NSLayoutAttribute layoutAttribute = self.firstViewAttribute.layoutAttribute;
switch (layoutAttribute) {
case NSLayoutAttributeWidth:
self.layoutConstant = sizeOffset.width;
break;
case NSLayoutAttributeHeight:
self.layoutConstant = sizeOffset.height;
break;
default:
break;
}
}
- (void)setCenterOffset:(CGPoint)centerOffset {
NSLayoutAttribute layoutAttribute = self.firstViewAttribute.layoutAttribute;
switch (layoutAttribute) {
case NSLayoutAttributeCenterX:
self.layoutConstant = centerOffset.x;
break;
case NSLayoutAttributeCenterY:
self.layoutConstant = centerOffset.y;
break;
default:
break;
}
}
#pragma mark - MASConstraint
- (void)activate {
[self install];
}
- (void)deactivate {
[self uninstall];
}
- (void)install {
if (self.hasBeenInstalled) {
return;
}
if ([self supportsActiveProperty] && self.layoutConstraint) {
self.layoutConstraint.active = YES;
[self.firstViewAttribute.view.mas_installedConstraints addObject:self];
return;
}
MAS_VIEW *firstLayoutItem = self.firstViewAttribute.item;
NSLayoutAttribute firstLayoutAttribute = self.firstViewAttribute.layoutAttribute;
MAS_VIEW *secondLayoutItem = self.secondViewAttribute.item;
NSLayoutAttribute secondLayoutAttribute = self.secondViewAttribute.layoutAttribute;
// alignment attributes must have a secondViewAttribute
// therefore we assume that is refering to superview
// eg make.left.equalTo(@10)
if (!self.firstViewAttribute.isSizeAttribute && !self.secondViewAttribute) {
secondLayoutItem = self.firstViewAttribute.view.superview;
secondLayoutAttribute = firstLayoutAttribute;
}
MASLayoutConstraint *layoutConstraint
= [MASLayoutConstraint constraintWithItem:firstLayoutItem
attribute:firstLayoutAttribute
relatedBy:self.layoutRelation
toItem:secondLayoutItem
attribute:secondLayoutAttribute
multiplier:self.layoutMultiplier
constant:self.layoutConstant];
layoutConstraint.priority = self.layoutPriority;
layoutConstraint.mas_key = self.mas_key;
if (self.secondViewAttribute.view) {
MAS_VIEW *closestCommonSuperview = [self.firstViewAttribute.view mas_closestCommonSuperview:self.secondViewAttribute.view];
NSAssert(closestCommonSuperview,
@"couldn't find a common superview for %@ and %@",
self.firstViewAttribute.view, self.secondViewAttribute.view);
self.installedView = closestCommonSuperview;
} else if (self.firstViewAttribute.isSizeAttribute) {
self.installedView = self.firstViewAttribute.view;
} else {
self.installedView = self.firstViewAttribute.view.superview;
}
MASLayoutConstraint *existingConstraint = nil;
if (self.updateExisting) {
existingConstraint = [self layoutConstraintSimilarTo:layoutConstraint];
}
if (existingConstraint) {
// just update the constant
existingConstraint.constant = layoutConstraint.constant;
self.layoutConstraint = existingConstraint;
} else {
[self.installedView addConstraint:layoutConstraint];
self.layoutConstraint = layoutConstraint;
[firstLayoutItem.mas_installedConstraints addObject:self];
}
}
- (MASLayoutConstraint *)layoutConstraintSimilarTo:(MASLayoutConstraint *)layoutConstraint {
// check if any constraints are the same apart from the only mutable property constant
// go through constraints in reverse as we do not want to match auto-resizing or interface builder constraints
// and they are likely to be added first.
for (NSLayoutConstraint *existingConstraint in self.installedView.constraints.reverseObjectEnumerator) {
if (![existingConstraint isKindOfClass:MASLayoutConstraint.class]) continue;
if (existingConstraint.firstItem != layoutConstraint.firstItem) continue;
if (existingConstraint.secondItem != layoutConstraint.secondItem) continue;
if (existingConstraint.firstAttribute != layoutConstraint.firstAttribute) continue;
if (existingConstraint.secondAttribute != layoutConstraint.secondAttribute) continue;
if (existingConstraint.relation != layoutConstraint.relation) continue;
if (existingConstraint.multiplier != layoutConstraint.multiplier) continue;
if (existingConstraint.priority != layoutConstraint.priority) continue;
return (id)existingConstraint;
}
return nil;
}
- (void)uninstall {
if ([self supportsActiveProperty]) {
self.layoutConstraint.active = NO;
[self.firstViewAttribute.view.mas_installedConstraints removeObject:self];
return;
}
[self.installedView removeConstraint:self.layoutConstraint];
self.layoutConstraint = nil;
self.installedView = nil;
[self.firstViewAttribute.view.mas_installedConstraints removeObject:self];
}
@end
|
2877025939/tabelVew-CollectionView | 1,785 | tabelVew嵌套CollectionView/tabelVew嵌套CollectionView/Masonry/Masonry/MASConstraint+Private.h | //
// MASConstraint+Private.h
// Masonry
//
// Created by Nick Tymchenko on 29/04/14.
// Copyright (c) 2014 cloudling. All rights reserved.
//
#import "MASConstraint.h"
@protocol MASConstraintDelegate;
@interface MASConstraint ()
/**
* Whether or not to check for an existing constraint instead of adding constraint
*/
@property (nonatomic, assign) BOOL updateExisting;
/**
* Usually MASConstraintMaker but could be a parent MASConstraint
*/
@property (nonatomic, weak) id<MASConstraintDelegate> delegate;
/**
* Based on a provided value type, is equal to calling:
* NSNumber - setOffset:
* NSValue with CGPoint - setPointOffset:
* NSValue with CGSize - setSizeOffset:
* NSValue with MASEdgeInsets - setInsets:
*/
- (void)setLayoutConstantWithValue:(NSValue *)value;
@end
@interface MASConstraint (Abstract)
/**
* Sets the constraint relation to given NSLayoutRelation
* returns a block which accepts one of the following:
* MASViewAttribute, UIView, NSValue, NSArray
* see readme for more details.
*/
- (MASConstraint * (^)(id, NSLayoutRelation))equalToWithRelation;
/**
* Override to set a custom chaining behaviour
*/
- (MASConstraint *)addConstraintWithLayoutAttribute:(NSLayoutAttribute)layoutAttribute;
@end
@protocol MASConstraintDelegate <NSObject>
/**
* Notifies the delegate when the constraint needs to be replaced with another constraint. For example
* A MASViewConstraint may turn into a MASCompositeConstraint when an array is passed to one of the equality blocks
*/
- (void)constraint:(MASConstraint *)constraint shouldBeReplacedWithConstraint:(MASConstraint *)replacementConstraint;
- (MASConstraint *)constraint:(MASConstraint *)constraint addConstraintWithLayoutAttribute:(NSLayoutAttribute)layoutAttribute;
@end
|
27182812/ChatGLM-LLaMA-chinese-insturct | 68,990 | src/transformers/models/deberta_v2/modeling_tf_deberta_v2.py | # coding=utf-8
# Copyright 2021 Microsoft and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" TF 2.0 DeBERTa-v2 model."""
from typing import Dict, Optional, Tuple, Union
import numpy as np
import tensorflow as tf
from ...activations_tf import get_tf_activation
from ...modeling_tf_outputs import (
TFBaseModelOutput,
TFMaskedLMOutput,
TFQuestionAnsweringModelOutput,
TFSequenceClassifierOutput,
TFTokenClassifierOutput,
)
from ...modeling_tf_utils import (
TFMaskedLanguageModelingLoss,
TFModelInputType,
TFPreTrainedModel,
TFQuestionAnsweringLoss,
TFSequenceClassificationLoss,
TFTokenClassificationLoss,
get_initializer,
unpack_inputs,
)
from ...tf_utils import shape_list, stable_softmax
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_deberta_v2 import DebertaV2Config
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "DebertaV2Config"
_CHECKPOINT_FOR_DOC = "kamalkraj/deberta-v2-xlarge"
TF_DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST = [
"kamalkraj/deberta-v2-xlarge",
# See all DeBERTa models at https://huggingface.co/models?filter=deberta-v2
]
# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaContextPooler with Deberta->DebertaV2
class TFDebertaV2ContextPooler(tf.keras.layers.Layer):
def __init__(self, config: DebertaV2Config, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(config.pooler_hidden_size, name="dense")
self.dropout = TFDebertaV2StableDropout(config.pooler_dropout, name="dropout")
self.config = config
def call(self, hidden_states, training: bool = False):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
context_token = hidden_states[:, 0]
context_token = self.dropout(context_token, training=training)
pooled_output = self.dense(context_token)
pooled_output = get_tf_activation(self.config.pooler_hidden_act)(pooled_output)
return pooled_output
@property
def output_dim(self) -> int:
return self.config.hidden_size
# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaXSoftmax with Deberta->DebertaV2
class TFDebertaV2XSoftmax(tf.keras.layers.Layer):
"""
Masked Softmax which is optimized for saving memory
Args:
input (`tf.Tensor`): The input tensor that will apply softmax.
mask (`tf.Tensor`): The mask matrix where 0 indicate that element will be ignored in the softmax calculation.
dim (int): The dimension that will apply softmax
"""
def __init__(self, axis=-1, **kwargs):
super().__init__(**kwargs)
self.axis = axis
def call(self, inputs: tf.Tensor, mask: tf.Tensor):
rmask = tf.logical_not(tf.cast(mask, tf.bool))
output = tf.where(rmask, float("-inf"), inputs)
output = stable_softmax(output, self.axis)
output = tf.where(rmask, 0.0, output)
return output
# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaStableDropout with Deberta->DebertaV2
class TFDebertaV2StableDropout(tf.keras.layers.Layer):
"""
Optimized dropout module for stabilizing the training
Args:
drop_prob (float): the dropout probabilities
"""
def __init__(self, drop_prob, **kwargs):
super().__init__(**kwargs)
self.drop_prob = drop_prob
@tf.custom_gradient
def xdropout(self, inputs):
"""
Applies dropout to the inputs, as vanilla dropout, but also scales the remaining elements up by 1/drop_prob.
"""
mask = tf.cast(
1
- tf.compat.v1.distributions.Bernoulli(probs=1.0 - self.drop_prob).sample(sample_shape=shape_list(inputs)),
tf.bool,
)
scale = tf.convert_to_tensor(1.0 / (1 - self.drop_prob), dtype=tf.float32)
if self.drop_prob > 0:
inputs = tf.where(mask, 0.0, inputs) * scale
def grad(upstream):
if self.drop_prob > 0:
return tf.where(mask, 0.0, upstream) * scale
else:
return upstream
return inputs, grad
def call(self, inputs: tf.Tensor, training: tf.Tensor = False):
if training:
return self.xdropout(inputs)
return inputs
# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaSelfOutput with Deberta->DebertaV2
class TFDebertaV2SelfOutput(tf.keras.layers.Layer):
def __init__(self, config: DebertaV2Config, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(config.hidden_size, name="dense")
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.dropout = TFDebertaV2StableDropout(config.hidden_dropout_prob, name="dropout")
def call(self, hidden_states, input_tensor, training: bool = False):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaAttention with Deberta->DebertaV2
class TFDebertaV2Attention(tf.keras.layers.Layer):
def __init__(self, config: DebertaV2Config, **kwargs):
super().__init__(**kwargs)
self.self = TFDebertaV2DisentangledSelfAttention(config, name="self")
self.dense_output = TFDebertaV2SelfOutput(config, name="output")
self.config = config
def call(
self,
input_tensor: tf.Tensor,
attention_mask: tf.Tensor,
query_states: tf.Tensor = None,
relative_pos: tf.Tensor = None,
rel_embeddings: tf.Tensor = None,
output_attentions: bool = False,
training: bool = False,
) -> Tuple[tf.Tensor]:
self_outputs = self.self(
hidden_states=input_tensor,
attention_mask=attention_mask,
query_states=query_states,
relative_pos=relative_pos,
rel_embeddings=rel_embeddings,
output_attentions=output_attentions,
training=training,
)
if query_states is None:
query_states = input_tensor
attention_output = self.dense_output(
hidden_states=self_outputs[0], input_tensor=query_states, training=training
)
output = (attention_output,) + self_outputs[1:]
return output
# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaIntermediate with Deberta->DebertaV2
class TFDebertaV2Intermediate(tf.keras.layers.Layer):
def __init__(self, config: DebertaV2Config, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = get_tf_activation(config.hidden_act)
else:
self.intermediate_act_fn = config.hidden_act
def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
hidden_states = self.dense(inputs=hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaOutput with Deberta->DebertaV2
class TFDebertaV2Output(tf.keras.layers.Layer):
def __init__(self, config: DebertaV2Config, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.dropout = TFDebertaV2StableDropout(config.hidden_dropout_prob, name="dropout")
def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
hidden_states = self.dense(inputs=hidden_states)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaLayer with Deberta->DebertaV2
class TFDebertaV2Layer(tf.keras.layers.Layer):
def __init__(self, config: DebertaV2Config, **kwargs):
super().__init__(**kwargs)
self.attention = TFDebertaV2Attention(config, name="attention")
self.intermediate = TFDebertaV2Intermediate(config, name="intermediate")
self.bert_output = TFDebertaV2Output(config, name="output")
def call(
self,
hidden_states: tf.Tensor,
attention_mask: tf.Tensor,
query_states: tf.Tensor = None,
relative_pos: tf.Tensor = None,
rel_embeddings: tf.Tensor = None,
output_attentions: bool = False,
training: bool = False,
) -> Tuple[tf.Tensor]:
attention_outputs = self.attention(
input_tensor=hidden_states,
attention_mask=attention_mask,
query_states=query_states,
relative_pos=relative_pos,
rel_embeddings=rel_embeddings,
output_attentions=output_attentions,
training=training,
)
attention_output = attention_outputs[0]
intermediate_output = self.intermediate(hidden_states=attention_output)
layer_output = self.bert_output(
hidden_states=intermediate_output, input_tensor=attention_output, training=training
)
outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them
return outputs
class TFDebertaV2ConvLayer(tf.keras.layers.Layer):
def __init__(self, config: DebertaV2Config, **kwargs):
super().__init__(**kwargs)
self.kernel_size = getattr(config, "conv_kernel_size", 3)
# groups = getattr(config, "conv_groups", 1)
self.conv_act = get_tf_activation(getattr(config, "conv_act", "tanh"))
self.padding = (self.kernel_size - 1) // 2
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.dropout = TFDebertaV2StableDropout(config.hidden_dropout_prob, name="dropout")
self.config = config
def build(self, input_shape):
with tf.name_scope("conv"):
self.conv_kernel = self.add_weight(
name="kernel",
shape=[self.kernel_size, self.config.hidden_size, self.config.hidden_size],
initializer=get_initializer(self.config.initializer_range),
)
self.conv_bias = self.add_weight(
name="bias", shape=[self.config.hidden_size], initializer=tf.zeros_initializer()
)
return super().build(input_shape)
def call(
self, hidden_states: tf.Tensor, residual_states: tf.Tensor, input_mask: tf.Tensor, training: bool = False
) -> tf.Tensor:
out = tf.nn.conv2d(
tf.expand_dims(hidden_states, 1),
tf.expand_dims(self.conv_kernel, 0),
strides=1,
padding=[[0, 0], [0, 0], [self.padding, self.padding], [0, 0]],
)
out = tf.squeeze(tf.nn.bias_add(out, self.conv_bias), 1)
rmask = tf.cast(1 - input_mask, tf.bool)
out = tf.where(tf.broadcast_to(tf.expand_dims(rmask, -1), shape_list(out)), 0.0, out)
out = self.dropout(out, training=training)
out = self.conv_act(out)
layer_norm_input = residual_states + out
output = self.LayerNorm(layer_norm_input)
if input_mask is None:
output_states = output
else:
if len(shape_list(input_mask)) != len(shape_list(layer_norm_input)):
if len(shape_list(input_mask)) == 4:
input_mask = tf.squeeze(tf.squeeze(input_mask, axis=1), axis=1)
input_mask = tf.cast(tf.expand_dims(input_mask, axis=2), tf.float32)
output_states = output * input_mask
return output_states
class TFDebertaV2Encoder(tf.keras.layers.Layer):
def __init__(self, config: DebertaV2Config, **kwargs):
super().__init__(**kwargs)
self.layer = [TFDebertaV2Layer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)]
self.relative_attention = getattr(config, "relative_attention", False)
self.config = config
if self.relative_attention:
self.max_relative_positions = getattr(config, "max_relative_positions", -1)
if self.max_relative_positions < 1:
self.max_relative_positions = config.max_position_embeddings
self.position_buckets = getattr(config, "position_buckets", -1)
self.pos_ebd_size = self.max_relative_positions * 2
if self.position_buckets > 0:
self.pos_ebd_size = self.position_buckets * 2
self.norm_rel_ebd = [x.strip() for x in getattr(config, "norm_rel_ebd", "none").lower().split("|")]
if "layer_norm" in self.norm_rel_ebd:
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.conv = TFDebertaV2ConvLayer(config, name="conv") if getattr(config, "conv_kernel_size", 0) > 0 else None
def build(self, input_shape):
if self.relative_attention:
self.rel_embeddings = self.add_weight(
name="rel_embeddings.weight",
shape=[self.pos_ebd_size, self.config.hidden_size],
initializer=get_initializer(self.config.initializer_range),
)
return super().build(input_shape)
def get_rel_embedding(self):
rel_embeddings = self.rel_embeddings if self.relative_attention else None
if rel_embeddings is not None and ("layer_norm" in self.norm_rel_ebd):
rel_embeddings = self.LayerNorm(rel_embeddings)
return rel_embeddings
def get_attention_mask(self, attention_mask):
if len(shape_list(attention_mask)) <= 2:
extended_attention_mask = tf.expand_dims(tf.expand_dims(attention_mask, 1), 2)
attention_mask = extended_attention_mask * tf.expand_dims(tf.squeeze(extended_attention_mask, -2), -1)
attention_mask = tf.cast(attention_mask, tf.uint8)
elif len(shape_list(attention_mask)) == 3:
attention_mask = tf.expand_dims(attention_mask, 1)
return attention_mask
def get_rel_pos(self, hidden_states, query_states=None, relative_pos=None):
if self.relative_attention and relative_pos is None:
q = shape_list(query_states)[-2] if query_states is not None else shape_list(hidden_states)[-2]
relative_pos = build_relative_position(
q,
shape_list(hidden_states)[-2],
bucket_size=self.position_buckets,
max_position=self.max_relative_positions,
)
return relative_pos
def call(
self,
hidden_states: tf.Tensor,
attention_mask: tf.Tensor,
query_states: tf.Tensor = None,
relative_pos: tf.Tensor = None,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
training: bool = False,
) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
if len(shape_list(attention_mask)) <= 2:
input_mask = attention_mask
else:
input_mask = tf.cast(tf.math.reduce_sum(attention_mask, axis=-2) > 0, dtype=tf.uint8)
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
attention_mask = self.get_attention_mask(attention_mask)
relative_pos = self.get_rel_pos(hidden_states, query_states, relative_pos)
next_kv = hidden_states
rel_embeddings = self.get_rel_embedding()
output_states = next_kv
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (output_states,)
layer_outputs = layer_module(
hidden_states=next_kv,
attention_mask=attention_mask,
query_states=query_states,
relative_pos=relative_pos,
rel_embeddings=rel_embeddings,
output_attentions=output_attentions,
training=training,
)
output_states = layer_outputs[0]
if i == 0 and self.conv is not None:
output_states = self.conv(hidden_states, output_states, input_mask)
next_kv = output_states
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
# Add last layer
if output_hidden_states:
all_hidden_states = all_hidden_states + (output_states,)
if not return_dict:
return tuple(v for v in [output_states, all_hidden_states, all_attentions] if v is not None)
return TFBaseModelOutput(
last_hidden_state=output_states, hidden_states=all_hidden_states, attentions=all_attentions
)
def make_log_bucket_position(relative_pos, bucket_size, max_position):
sign = tf.math.sign(relative_pos)
mid = bucket_size // 2
abs_pos = tf.where((relative_pos < mid) & (relative_pos > -mid), mid - 1, tf.math.abs(relative_pos))
log_pos = (
tf.math.ceil(
tf.cast(tf.math.log(abs_pos / mid), tf.float32) / tf.math.log((max_position - 1) / mid) * (mid - 1)
)
+ mid
)
bucket_pos = tf.cast(
tf.where(abs_pos <= mid, tf.cast(relative_pos, tf.float32), log_pos * tf.cast(sign, tf.float32)), tf.int32
)
return bucket_pos
def build_relative_position(query_size, key_size, bucket_size=-1, max_position=-1):
"""
Build relative position according to the query and key
We assume the absolute position of query \\(P_q\\) is range from (0, query_size) and the absolute position of key
\\(P_k\\) is range from (0, key_size), The relative positions from query to key is \\(R_{q \\rightarrow k} = P_q -
P_k\\)
Args:
query_size (int): the length of query
key_size (int): the length of key
bucket_size (int): the size of position bucket
max_position (int): the maximum allowed absolute position
Return:
`tf.Tensor`: A tensor with shape [1, query_size, key_size]
"""
q_ids = tf.range(query_size, dtype=tf.int32)
k_ids = tf.range(key_size, dtype=tf.int32)
rel_pos_ids = q_ids[:, None] - tf.tile(tf.expand_dims(k_ids, axis=0), [shape_list(q_ids)[0], 1])
if bucket_size > 0 and max_position > 0:
rel_pos_ids = make_log_bucket_position(rel_pos_ids, bucket_size, max_position)
rel_pos_ids = rel_pos_ids[:query_size, :]
rel_pos_ids = tf.expand_dims(rel_pos_ids, axis=0)
return tf.cast(rel_pos_ids, tf.int64)
def c2p_dynamic_expand(c2p_pos, query_layer, relative_pos):
shapes = [
shape_list(query_layer)[0],
shape_list(query_layer)[1],
shape_list(query_layer)[2],
shape_list(relative_pos)[-1],
]
return tf.broadcast_to(c2p_pos, shapes)
def p2c_dynamic_expand(c2p_pos, query_layer, key_layer):
shapes = [
shape_list(query_layer)[0],
shape_list(query_layer)[1],
shape_list(key_layer)[-2],
shape_list(key_layer)[-2],
]
return tf.broadcast_to(c2p_pos, shapes)
def pos_dynamic_expand(pos_index, p2c_att, key_layer):
shapes = shape_list(p2c_att)[:2] + [shape_list(pos_index)[-2], shape_list(key_layer)[-2]]
return tf.broadcast_to(pos_index, shapes)
def take_along_axis(x, indices):
# Only a valid port of np.take_along_axis when the gather axis is -1
# TPU + gathers and reshapes don't go along well -- see https://github.com/huggingface/transformers/issues/18239
if isinstance(tf.distribute.get_strategy(), tf.distribute.TPUStrategy):
# [B, S, P] -> [B, S, P, D]
one_hot_indices = tf.one_hot(indices, depth=x.shape[-1], dtype=x.dtype)
# if we ignore the first two dims, this is equivalent to multiplying a matrix (one hot) by a vector (x)
# grossly abusing notation: [B, S, P, D] . [B, S, D] = [B, S, P]
gathered = tf.einsum("ijkl,ijl->ijk", one_hot_indices, x)
# GPUs, on the other hand, prefer gathers instead of large one-hot+matmuls
else:
gathered = tf.gather(x, indices, batch_dims=2)
return gathered
class TFDebertaV2DisentangledSelfAttention(tf.keras.layers.Layer):
"""
Disentangled self-attention module
Parameters:
config (`DebertaV2Config`):
A model config class instance with the configuration to build a new model. The schema is similar to
*BertConfig*, for more details, please refer [`DebertaV2Config`]
"""
def __init__(self, config: DebertaV2Config, **kwargs):
super().__init__(**kwargs)
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.num_attention_heads = config.num_attention_heads
_attention_head_size = config.hidden_size // config.num_attention_heads
self.attention_head_size = getattr(config, "attention_head_size", _attention_head_size)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query_proj = tf.keras.layers.Dense(
self.all_head_size,
kernel_initializer=get_initializer(config.initializer_range),
name="query_proj",
use_bias=True,
)
self.key_proj = tf.keras.layers.Dense(
self.all_head_size,
kernel_initializer=get_initializer(config.initializer_range),
name="key_proj",
use_bias=True,
)
self.value_proj = tf.keras.layers.Dense(
self.all_head_size,
kernel_initializer=get_initializer(config.initializer_range),
name="value_proj",
use_bias=True,
)
self.share_att_key = getattr(config, "share_att_key", False)
self.pos_att_type = config.pos_att_type if config.pos_att_type is not None else []
self.relative_attention = getattr(config, "relative_attention", False)
if self.relative_attention:
self.position_buckets = getattr(config, "position_buckets", -1)
self.max_relative_positions = getattr(config, "max_relative_positions", -1)
if self.max_relative_positions < 1:
self.max_relative_positions = config.max_position_embeddings
self.pos_ebd_size = self.max_relative_positions
if self.position_buckets > 0:
self.pos_ebd_size = self.position_buckets
self.pos_dropout = TFDebertaV2StableDropout(config.hidden_dropout_prob, name="pos_dropout")
if not self.share_att_key:
if "c2p" in self.pos_att_type:
self.pos_key_proj = tf.keras.layers.Dense(
self.all_head_size,
kernel_initializer=get_initializer(config.initializer_range),
name="pos_proj",
use_bias=True,
)
if "p2c" in self.pos_att_type:
self.pos_query_proj = tf.keras.layers.Dense(
self.all_head_size,
kernel_initializer=get_initializer(config.initializer_range),
name="pos_q_proj",
)
self.softmax = TFDebertaV2XSoftmax(axis=-1)
self.dropout = TFDebertaV2StableDropout(config.attention_probs_dropout_prob, name="dropout")
def transpose_for_scores(self, tensor: tf.Tensor, attention_heads: int) -> tf.Tensor:
tensor_shape = shape_list(tensor)
# In graph mode mode, we can't reshape with -1 as the final dimension if the first dimension (batch size) is None
shape = tensor_shape[:-1] + [attention_heads, tensor_shape[-1] // attention_heads]
# Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]
tensor = tf.reshape(tensor=tensor, shape=shape)
tensor = tf.transpose(tensor, perm=[0, 2, 1, 3])
x_shape = shape_list(tensor)
tensor = tf.reshape(tensor, shape=[-1, x_shape[-2], x_shape[-1]])
return tensor
def call(
self,
hidden_states: tf.Tensor,
attention_mask: tf.Tensor,
query_states: tf.Tensor = None,
relative_pos: tf.Tensor = None,
rel_embeddings: tf.Tensor = None,
output_attentions: bool = False,
training: bool = False,
) -> Tuple[tf.Tensor]:
"""
Call the module
Args:
hidden_states (`tf.Tensor`):
Input states to the module usually the output from previous layer, it will be the Q,K and V in
*Attention(Q,K,V)*
attention_mask (`tf.Tensor`):
An attention mask matrix of shape [*B*, *N*, *N*] where *B* is the batch size, *N* is the maximum
sequence length in which element [i,j] = *1* means the *i* th token in the input can attend to the *j*
th token.
return_att (`bool`, optional):
Whether return the attention matrix.
query_states (`tf.Tensor`, optional):
The *Q* state in *Attention(Q,K,V)*.
relative_pos (`tf.Tensor`):
The relative position encoding between the tokens in the sequence. It's of shape [*B*, *N*, *N*] with
values ranging in [*-max_relative_positions*, *max_relative_positions*].
rel_embeddings (`tf.Tensor`):
The embedding of relative distances. It's a tensor of shape [\\(2 \\times
\\text{max_relative_positions}\\), *hidden_size*].
"""
if query_states is None:
query_states = hidden_states
query_layer = self.transpose_for_scores(self.query_proj(query_states), self.num_attention_heads)
key_layer = self.transpose_for_scores(self.key_proj(hidden_states), self.num_attention_heads)
value_layer = self.transpose_for_scores(self.value_proj(hidden_states), self.num_attention_heads)
rel_att = None
# Take the dot product between "query" and "key" to get the raw attention scores.
scale_factor = 1
if "c2p" in self.pos_att_type:
scale_factor += 1
if "p2c" in self.pos_att_type:
scale_factor += 1
scale = tf.math.sqrt(tf.cast(shape_list(query_layer)[-1] * scale_factor, tf.float32))
attention_scores = tf.matmul(query_layer, tf.transpose(key_layer, [0, 2, 1])) / scale
if self.relative_attention:
rel_embeddings = self.pos_dropout(rel_embeddings)
rel_att = self.disentangled_att_bias(query_layer, key_layer, relative_pos, rel_embeddings, scale_factor)
if rel_att is not None:
attention_scores = attention_scores + rel_att
attention_scores = tf.reshape(
attention_scores,
(-1, self.num_attention_heads, shape_list(attention_scores)[-2], shape_list(attention_scores)[-1]),
)
# bsz x height x length x dimension
attention_probs = self.softmax(attention_scores, attention_mask)
attention_probs = self.dropout(attention_probs, training=training)
context_layer = tf.matmul(
tf.reshape(attention_probs, [-1, shape_list(attention_probs)[-2], shape_list(attention_probs)[-1]]),
value_layer,
)
context_layer = tf.transpose(
tf.reshape(
context_layer,
[-1, self.num_attention_heads, shape_list(context_layer)[-2], shape_list(context_layer)[-1]],
),
[0, 2, 1, 3],
)
# Set the final dimension here explicitly.
# Calling tf.reshape(context_layer, (*context_layer_shape[:-2], -1)) raises an error when executing
# the model in graph mode as context_layer is reshaped to (None, 7, None) and Dense layer in TFDebertaV2SelfOutput
# requires final input dimension to be defined
context_layer_shape = shape_list(context_layer)
new_context_layer_shape = context_layer_shape[:-2] + [context_layer_shape[-2] * context_layer_shape[-1]]
context_layer = tf.reshape(context_layer, new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
def disentangled_att_bias(self, query_layer, key_layer, relative_pos, rel_embeddings, scale_factor):
if relative_pos is None:
q = shape_list(query_layer)[-2]
relative_pos = build_relative_position(
q,
shape_list(key_layer)[-2],
bucket_size=self.position_buckets,
max_position=self.max_relative_positions,
)
shape_list_pos = shape_list(relative_pos)
if len(shape_list_pos) == 2:
relative_pos = tf.expand_dims(tf.expand_dims(relative_pos, 0), 0)
elif len(shape_list_pos) == 3:
relative_pos = tf.expand_dims(relative_pos, 1)
# bsz x height x query x key
elif len(shape_list_pos) != 4:
raise ValueError(f"Relative position ids must be of dim 2 or 3 or 4. {len(shape_list_pos)}")
att_span = self.pos_ebd_size
rel_embeddings = tf.expand_dims(
rel_embeddings[self.pos_ebd_size - att_span : self.pos_ebd_size + att_span, :], 0
)
if self.share_att_key:
pos_query_layer = tf.tile(
self.transpose_for_scores(self.query_proj(rel_embeddings), self.num_attention_heads),
[shape_list(query_layer)[0] // self.num_attention_heads, 1, 1],
)
pos_key_layer = tf.tile(
self.transpose_for_scores(self.key_proj(rel_embeddings), self.num_attention_heads),
[shape_list(query_layer)[0] // self.num_attention_heads, 1, 1],
)
else:
if "c2p" in self.pos_att_type:
pos_key_layer = tf.tile(
self.transpose_for_scores(self.pos_key_proj(rel_embeddings), self.num_attention_heads),
[shape_list(query_layer)[0] // self.num_attention_heads, 1, 1],
) # .split(self.all_head_size, dim=-1)
if "p2c" in self.pos_att_type:
pos_query_layer = tf.tile(
self.transpose_for_scores(self.pos_query_proj(rel_embeddings), self.num_attention_heads),
[shape_list(query_layer)[0] // self.num_attention_heads, 1, 1],
) # .split(self.all_head_size, dim=-1)
score = 0
# content->position
if "c2p" in self.pos_att_type:
scale = tf.math.sqrt(tf.cast(shape_list(pos_key_layer)[-1] * scale_factor, tf.float32))
c2p_att = tf.matmul(query_layer, tf.transpose(pos_key_layer, [0, 2, 1]))
c2p_pos = tf.clip_by_value(relative_pos + att_span, 0, att_span * 2 - 1)
c2p_att = take_along_axis(
c2p_att,
tf.broadcast_to(
tf.squeeze(c2p_pos, 0),
[shape_list(query_layer)[0], shape_list(query_layer)[1], shape_list(relative_pos)[-1]],
),
)
score += c2p_att / scale
# position->content
if "p2c" in self.pos_att_type:
scale = tf.math.sqrt(tf.cast(shape_list(pos_query_layer)[-1] * scale_factor, tf.float32))
if shape_list(key_layer)[-2] != shape_list(query_layer)[-2]:
r_pos = build_relative_position(
shape_list(key_layer)[-2],
shape_list(key_layer)[-2],
bucket_size=self.position_buckets,
max_position=self.max_relative_positions,
)
r_pos = tf.expand_dims(r_pos, 0)
else:
r_pos = relative_pos
p2c_pos = tf.clip_by_value(-r_pos + att_span, 0, att_span * 2 - 1)
p2c_att = tf.matmul(key_layer, tf.transpose(pos_query_layer, [0, 2, 1]))
p2c_att = tf.transpose(
take_along_axis(
p2c_att,
tf.broadcast_to(
tf.squeeze(p2c_pos, 0),
[shape_list(query_layer)[0], shape_list(key_layer)[-2], shape_list(key_layer)[-2]],
),
),
[0, 2, 1],
)
score += p2c_att / scale
return score
# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaEmbeddings Deberta->DebertaV2
class TFDebertaV2Embeddings(tf.keras.layers.Layer):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.config = config
self.embedding_size = getattr(config, "embedding_size", config.hidden_size)
self.hidden_size = config.hidden_size
self.max_position_embeddings = config.max_position_embeddings
self.position_biased_input = getattr(config, "position_biased_input", True)
self.initializer_range = config.initializer_range
if self.embedding_size != config.hidden_size:
self.embed_proj = tf.keras.layers.Dense(config.hidden_size, use_bias=False)
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.dropout = TFDebertaV2StableDropout(config.hidden_dropout_prob, name="dropout")
def build(self, input_shape: tf.TensorShape):
with tf.name_scope("word_embeddings"):
self.weight = self.add_weight(
name="weight",
shape=[self.config.vocab_size, self.embedding_size],
initializer=get_initializer(self.initializer_range),
)
with tf.name_scope("token_type_embeddings"):
if self.config.type_vocab_size > 0:
self.token_type_embeddings = self.add_weight(
name="embeddings",
shape=[self.config.type_vocab_size, self.embedding_size],
initializer=get_initializer(self.initializer_range),
)
else:
self.token_type_embeddings = None
with tf.name_scope("position_embeddings"):
if self.position_biased_input:
self.position_embeddings = self.add_weight(
name="embeddings",
shape=[self.max_position_embeddings, self.hidden_size],
initializer=get_initializer(self.initializer_range),
)
else:
self.position_embeddings = None
super().build(input_shape)
def call(
self,
input_ids: tf.Tensor = None,
position_ids: tf.Tensor = None,
token_type_ids: tf.Tensor = None,
inputs_embeds: tf.Tensor = None,
mask: tf.Tensor = None,
training: bool = False,
) -> tf.Tensor:
"""
Applies embedding based on inputs tensor.
Returns:
final_embeddings (`tf.Tensor`): output embedding tensor.
"""
if input_ids is None and inputs_embeds is None:
raise ValueError("Need to provide either `input_ids` or `input_embeds`.")
if input_ids is not None:
# Note: tf.gather, on which the embedding layer is based, won't check positive out of bound
# indices on GPU, returning zeros instead. This is a dangerous silent behavior.
tf.debugging.assert_less(
input_ids,
tf.cast(self.config.vocab_size, dtype=input_ids.dtype),
message=(
"input_ids must be smaller than the embedding layer's input dimension (got"
f" {tf.math.reduce_max(input_ids)} >= {self.config.vocab_size})"
),
)
inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
input_shape = shape_list(inputs_embeds)[:-1]
if token_type_ids is None:
token_type_ids = tf.fill(dims=input_shape, value=0)
if position_ids is None:
position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0)
final_embeddings = inputs_embeds
if self.position_biased_input:
position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
final_embeddings += position_embeds
if self.config.type_vocab_size > 0:
token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)
final_embeddings += token_type_embeds
if self.embedding_size != self.hidden_size:
final_embeddings = self.embed_proj(final_embeddings)
final_embeddings = self.LayerNorm(final_embeddings)
if mask is not None:
if len(shape_list(mask)) != len(shape_list(final_embeddings)):
if len(shape_list(mask)) == 4:
mask = tf.squeeze(tf.squeeze(mask, axis=1), axis=1)
mask = tf.cast(tf.expand_dims(mask, axis=2), tf.float32)
final_embeddings = final_embeddings * mask
final_embeddings = self.dropout(final_embeddings, training=training)
return final_embeddings
# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaPredictionHeadTransform with Deberta->DebertaV2
class TFDebertaV2PredictionHeadTransform(tf.keras.layers.Layer):
def __init__(self, config: DebertaV2Config, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
units=config.hidden_size,
kernel_initializer=get_initializer(config.initializer_range),
name="dense",
)
if isinstance(config.hidden_act, str):
self.transform_act_fn = get_tf_activation(config.hidden_act)
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
hidden_states = self.dense(inputs=hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaLMPredictionHead with Deberta->DebertaV2
class TFDebertaV2LMPredictionHead(tf.keras.layers.Layer):
def __init__(self, config: DebertaV2Config, input_embeddings: tf.keras.layers.Layer, **kwargs):
super().__init__(**kwargs)
self.config = config
self.hidden_size = config.hidden_size
self.transform = TFDebertaV2PredictionHeadTransform(config, name="transform")
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.input_embeddings = input_embeddings
def build(self, input_shape: tf.TensorShape):
self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias")
super().build(input_shape)
def get_output_embeddings(self) -> tf.keras.layers.Layer:
return self.input_embeddings
def set_output_embeddings(self, value: tf.Variable):
self.input_embeddings.weight = value
self.input_embeddings.vocab_size = shape_list(value)[0]
def get_bias(self) -> Dict[str, tf.Variable]:
return {"bias": self.bias}
def set_bias(self, value: tf.Variable):
self.bias = value["bias"]
self.config.vocab_size = shape_list(value["bias"])[0]
def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
hidden_states = self.transform(hidden_states=hidden_states)
seq_length = shape_list(hidden_states)[1]
hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.hidden_size])
hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True)
hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size])
hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias)
return hidden_states
# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaOnlyMLMHead with Deberta->DebertaV2
class TFDebertaV2OnlyMLMHead(tf.keras.layers.Layer):
def __init__(self, config: DebertaV2Config, input_embeddings: tf.keras.layers.Layer, **kwargs):
super().__init__(**kwargs)
self.predictions = TFDebertaV2LMPredictionHead(config, input_embeddings, name="predictions")
def call(self, sequence_output: tf.Tensor) -> tf.Tensor:
prediction_scores = self.predictions(hidden_states=sequence_output)
return prediction_scores
# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaMainLayer with Deberta->DebertaV2
class TFDebertaV2MainLayer(tf.keras.layers.Layer):
config_class = DebertaV2Config
def __init__(self, config: DebertaV2Config, **kwargs):
super().__init__(**kwargs)
self.config = config
self.embeddings = TFDebertaV2Embeddings(config, name="embeddings")
self.encoder = TFDebertaV2Encoder(config, name="encoder")
def get_input_embeddings(self) -> tf.keras.layers.Layer:
return self.embeddings
def set_input_embeddings(self, value: tf.Variable):
self.embeddings.weight = value
self.embeddings.vocab_size = shape_list(value)[0]
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
raise NotImplementedError
@unpack_inputs
def call(
self,
input_ids: Optional[TFModelInputType] = None,
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: bool = False,
) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = shape_list(input_ids)
elif inputs_embeds is not None:
input_shape = shape_list(inputs_embeds)[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if attention_mask is None:
attention_mask = tf.fill(dims=input_shape, value=1)
if token_type_ids is None:
token_type_ids = tf.fill(dims=input_shape, value=0)
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
mask=attention_mask,
training=training,
)
encoder_outputs = self.encoder(
hidden_states=embedding_output,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
sequence_output = encoder_outputs[0]
if not return_dict:
return (sequence_output,) + encoder_outputs[1:]
return TFBaseModelOutput(
last_hidden_state=sequence_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaPreTrainedModel with Deberta->DebertaV2
class TFDebertaV2PreTrainedModel(TFPreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = DebertaV2Config
base_model_prefix = "deberta"
DEBERTA_START_DOCSTRING = r"""
The DeBERTa model was proposed in [DeBERTa: Decoding-enhanced BERT with Disentangled
Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It's build
on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two
improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.
This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
behavior.
<Tip>
TensorFlow models and layers in `transformers` accept two formats as input:
- having all inputs as keyword arguments (like PyTorch models), or
- having all inputs as a list, tuple or dict in the first positional argument.
The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
positional argument:
- a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
`model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
`model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
Note that when creating models and layers with
[subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
about any of this, as you can just pass inputs like you would to any other Python function!
</Tip>
Parameters:
config ([`DebertaV2Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
DEBERTA_INPUTS_DOCSTRING = r"""
Args:
input_ids (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
token_type_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
inputs_embeds (`np.ndarray` or `tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput``] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare DeBERTa Model transformer outputting raw hidden-states without any specific head on top.",
DEBERTA_START_DOCSTRING,
)
# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaModel with Deberta->DebertaV2
class TFDebertaV2Model(TFDebertaV2PreTrainedModel):
def __init__(self, config: DebertaV2Config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.deberta = TFDebertaV2MainLayer(config, name="deberta")
@unpack_inputs
@add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFBaseModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids: Optional[TFModelInputType] = None,
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: Optional[bool] = False,
) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
outputs = self.deberta(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
return outputs
def serving_output(self, output: TFBaseModelOutput) -> TFBaseModelOutput:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFBaseModelOutput(last_hidden_state=output.last_hidden_state, hidden_states=hs, attentions=attns)
@add_start_docstrings("""DeBERTa Model with a `language modeling` head on top.""", DEBERTA_START_DOCSTRING)
# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaForMaskedLM with Deberta->DebertaV2
class TFDebertaV2ForMaskedLM(TFDebertaV2PreTrainedModel, TFMaskedLanguageModelingLoss):
def __init__(self, config: DebertaV2Config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
if config.is_decoder:
logger.warning(
"If you want to use `TFDebertaV2ForMaskedLM` make sure `config.is_decoder=False` for "
"bi-directional self-attention."
)
self.deberta = TFDebertaV2MainLayer(config, name="deberta")
self.mlm = TFDebertaV2OnlyMLMHead(config, input_embeddings=self.deberta.embeddings, name="cls")
def get_lm_head(self) -> tf.keras.layers.Layer:
return self.mlm.predictions
@unpack_inputs
@add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFMaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids: Optional[TFModelInputType] = None,
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]:
r"""
labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
"""
outputs = self.deberta(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
sequence_output = outputs[0]
prediction_scores = self.mlm(sequence_output=sequence_output, training=training)
loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=prediction_scores)
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFMaskedLMOutput(
loss=loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def serving_output(self, output: TFMaskedLMOutput) -> TFMaskedLMOutput:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFMaskedLMOutput(logits=output.logits, hidden_states=hs, attentions=attns)
@add_start_docstrings(
"""
DeBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the
pooled output) e.g. for GLUE tasks.
""",
DEBERTA_START_DOCSTRING,
)
# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaForSequenceClassification with Deberta->DebertaV2
class TFDebertaV2ForSequenceClassification(TFDebertaV2PreTrainedModel, TFSequenceClassificationLoss):
def __init__(self, config: DebertaV2Config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.deberta = TFDebertaV2MainLayer(config, name="deberta")
self.pooler = TFDebertaV2ContextPooler(config, name="pooler")
drop_out = getattr(config, "cls_dropout", None)
drop_out = self.config.hidden_dropout_prob if drop_out is None else drop_out
self.dropout = TFDebertaV2StableDropout(drop_out, name="cls_dropout")
self.classifier = tf.keras.layers.Dense(
units=config.num_labels,
kernel_initializer=get_initializer(config.initializer_range),
name="classifier",
)
@unpack_inputs
@add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFSequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids: Optional[TFModelInputType] = None,
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
r"""
labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
outputs = self.deberta(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
sequence_output = outputs[0]
pooled_output = self.pooler(sequence_output, training=training)
pooled_output = self.dropout(pooled_output, training=training)
logits = self.classifier(pooled_output)
loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
if not return_dict:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def serving_output(self, output: TFSequenceClassifierOutput) -> TFSequenceClassifierOutput:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFSequenceClassifierOutput(logits=output.logits, hidden_states=hs, attentions=attns)
@add_start_docstrings(
"""
DeBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
Named-Entity-Recognition (NER) tasks.
""",
DEBERTA_START_DOCSTRING,
)
# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaForTokenClassification with Deberta->DebertaV2
class TFDebertaV2ForTokenClassification(TFDebertaV2PreTrainedModel, TFTokenClassificationLoss):
def __init__(self, config: DebertaV2Config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.deberta = TFDebertaV2MainLayer(config, name="deberta")
self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
self.classifier = tf.keras.layers.Dense(
units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
)
@unpack_inputs
@add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFTokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids: Optional[TFModelInputType] = None,
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]:
r"""
labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
"""
outputs = self.deberta(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output, training=training)
logits = self.classifier(inputs=sequence_output)
loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
if not return_dict:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return TFTokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def serving_output(self, output: TFTokenClassifierOutput) -> TFTokenClassifierOutput:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFTokenClassifierOutput(logits=output.logits, hidden_states=hs, attentions=attns)
@add_start_docstrings(
"""
DeBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
DEBERTA_START_DOCSTRING,
)
# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaForQuestionAnswering with Deberta->DebertaV2
class TFDebertaV2ForQuestionAnswering(TFDebertaV2PreTrainedModel, TFQuestionAnsweringLoss):
def __init__(self, config: DebertaV2Config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.deberta = TFDebertaV2MainLayer(config, name="deberta")
self.qa_outputs = tf.keras.layers.Dense(
units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
)
@unpack_inputs
@add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFQuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids: Optional[TFModelInputType] = None,
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
start_positions: Optional[Union[np.ndarray, tf.Tensor]] = None,
end_positions: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]:
r"""
start_positions (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
end_positions (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
"""
outputs = self.deberta(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
sequence_output = outputs[0]
logits = self.qa_outputs(inputs=sequence_output)
start_logits, end_logits = tf.split(value=logits, num_or_size_splits=2, axis=-1)
start_logits = tf.squeeze(input=start_logits, axis=-1)
end_logits = tf.squeeze(input=end_logits, axis=-1)
loss = None
if start_positions is not None and end_positions is not None:
labels = {"start_position": start_positions}
labels["end_position"] = end_positions
loss = self.hf_compute_loss(labels=labels, logits=(start_logits, end_logits))
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFQuestionAnsweringModelOutput(
loss=loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def serving_output(self, output: TFQuestionAnsweringModelOutput) -> TFQuestionAnsweringModelOutput:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFQuestionAnsweringModelOutput(
start_logits=output.start_logits, end_logits=output.end_logits, hidden_states=hs, attentions=attns
)
|
2881099/csredis | 94,455 | src/CSRedisCore/CSRedisClientPipe.cs | using CSRedis.Internal.ObjectPool;
using System;
using System.Collections.Generic;
using System.Collections.Concurrent;
using System.Diagnostics;
using System.Linq;
using System.Threading;
using System.Threading.Tasks;
namespace CSRedis
{
public partial class CSRedisClientPipe<TObject> : IDisposable
{
private CSRedisClient rds;
private ConcurrentDictionary<string, RedisClientPool> Nodes => rds.Nodes;
private bool IsMultiNode => rds.IsMultiNode;
private Func<string, string> NodeRuleRaw => rds.NodeRuleRaw;
private ConcurrentDictionary<string, (List<int> indexes, Object<RedisClient> conn)> Conns = new ConcurrentDictionary<string, (List<int> indexes, Object<RedisClient> conn)>();
private Queue<Func<object, object>> Parsers = new Queue<Func<object, object>>();
private static object ConnsLock = new object();
/// <summary>
/// 执行命令数量
/// </summary>
public int Counter => Parsers.Count;
internal CSRedisClientPipe(CSRedisClient csredis)
{
rds = csredis;
}
private CSRedisClientPipe(CSRedisClient csredis, ConcurrentDictionary<string, (List<int> indexes, Object<RedisClient> conn)> conns, Queue<Func<object, object>> parsers)
{
this.rds = csredis;
this.Conns = conns;
this.Parsers = parsers;
}
/// <summary>
/// 提交批命令
/// </summary>
/// <returns></returns>
public object[] EndPipe()
{
var ret = new object[Parsers.Count];
Exception ex = null;
try
{
foreach (var conn in Conns.Values)
{
try
{
object[] tmp = tmp = conn.conn.Value.EndPipe();
for (var a = 0; a < tmp.Length; a++)
{
var retIdx = conn.indexes[a];
ret[retIdx] = tmp[a];
}
}
catch (Exception ex2)
{
ex = ex2;
}
}
}
finally
{
foreach (var conn in Conns.Values)
(conn.conn.Pool as RedisClientPool).Return(conn.conn, ex);
}
for (var b = 0; b < ret.Length; b++)
{
var parse = Parsers.Dequeue();
if (parse != null) ret[b] = parse(ret[b]);
}
Conns.Clear();
return ret;
}
/// <summary>
/// 提交批命令
/// </summary>
public void Dispose()
{
this.EndPipe();
}
private CSRedisClientPipe<TReturn> PipeCommand<TReturn>(string key, Func<Object<RedisClient>, string, TReturn> handle) => PipeCommand<TReturn>(key, handle, null);
private CSRedisClientPipe<TReturn> PipeCommand<TReturn>(string key, Func<Object<RedisClient>, string, TReturn> handle, Func<object, object> parser)
{
if (string.IsNullOrEmpty(key)) throw new Exception("key 不可为空或null");
var nodeKey = NodeRuleRaw == null || Nodes.Count == 1 ? Nodes.Keys.First() : NodeRuleRaw(key);
if (Nodes.TryGetValue(nodeKey, out var pool) == false) Nodes.TryGetValue(nodeKey = Nodes.Keys.First(), out pool);
try
{
if (Conns.TryGetValue(pool.Key, out var conn) == false)
{
conn = (new List<int>(), pool.Get());
bool isStartPipe = false;
lock (ConnsLock)
{
if (Conns.TryAdd(pool.Key, conn) == false)
{
pool.Return(conn.conn);
Conns.TryGetValue(pool.Key, out conn);
}
else
{
isStartPipe = true;
}
}
if (isStartPipe)
{
conn.conn.Value.StartPipe();
}
}
key = string.Concat(pool.Prefix, key);
handle(conn.conn, key);
conn.indexes.Add(Parsers.Count);
Parsers.Enqueue(parser);
}
catch (Exception ex)
{
foreach (var conn in Conns.Values)
(conn.conn.Pool as RedisClientPool).Return(conn.conn, ex);
throw ex;
}
if (typeof(TReturn) == typeof(TObject)) return this as CSRedisClientPipe<TReturn>;// return (CSRedisClientPipe<TReturn>)Convert.ChangeType(this, typeof(CSRedisClientPipe<TReturn>));
//this._isDisposed = true;
return new CSRedisClientPipe<TReturn>(rds, this.Conns, this.Parsers);
}
#region Script
/// <summary>
/// 执行脚本
/// </summary>
/// <param name="script">Lua 脚本</param>
/// <param name="key">用于定位分区节点,不含prefix前辍</param>
/// <param name="args">参数</param>
/// <returns></returns>
public CSRedisClientPipe<object> Eval(string script, string key, params object[] args) => PipeCommand(key, (c, k) => c.Value.Eval(script, new[] { k }, args?.Select(z => rds.SerializeRedisValueInternal(z)).ToArray()));
/// <summary>
/// 执行脚本
/// </summary>
/// <param name="sha1">脚本缓存的sha1</param>
/// <param name="key">用于定位分区节点,不含prefix前辍</param>
/// <param name="args">参数</param>
/// <returns></returns>
public CSRedisClientPipe<object> EvalSHA(string sha1, string key, params object[] args) => PipeCommand(key, (c, k) => c.Value.EvalSHA(sha1, new[] { k }, args?.Select(z => rds.SerializeRedisValueInternal(z)).ToArray()));
#endregion
#region Pub/Sub
/// <summary>
/// 用于将信息发送到指定分区节点的频道,最终消息发布格式:1|message
/// </summary>
/// <param name="channel">频道名</param>
/// <param name="message">消息文本</param>
/// <returns></returns>
public CSRedisClientPipe<long> Publish(string channel, string message)
{
var msgid = rds.HIncrBy("csredisclient:Publish:msgid", channel, 1);
return PipeCommand(channel, (c, k) => c.Value.Publish(channel, $"{msgid}|{message}"));
}
/// <summary>
/// 用于将信息发送到指定分区节点的频道,与 Publish 方法不同,不返回消息id头,即 1|
/// </summary>
/// <param name="channel">频道名</param>
/// <param name="message">消息文本</param>
/// <returns></returns>
public CSRedisClientPipe<long> PublishNoneMessageId(string channel, string message) => PipeCommand(channel, (c, k) => c.Value.Publish(channel, message));
#endregion
#region HyperLogLog
/// <summary>
/// 添加指定元素到 HyperLogLog
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="elements">元素</param>
/// <returns></returns>
public CSRedisClientPipe<bool> PfAdd<T>(string key, params T[] elements) => PipeCommand(key, (c, k) => c.Value.PfAdd(k, elements?.Select(z => rds.SerializeRedisValueInternal(z)).ToArray()));
/// <summary>
/// 返回给定 HyperLogLog 的基数估算值
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public CSRedisClientPipe<long> PfCount(string key) => PipeCommand(key, (c, k) => c.Value.PfCount(k));
#endregion
#region Sorted Set
/// <summary>
/// [redis-server 5.0.0] 删除并返回有序集合key中的最多count个具有最高得分的成员。如未指定,count的默认值为1。指定一个大于有序集合的基数的count不会产生错误。 当返回多个元素时候,得分最高的元素将是第一个元素,然后是分数较低的元素。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="count">数量</param>
/// <returns></returns>
public CSRedisClientPipe<(string member, decimal score)[]> ZPopMax(string key, long count) =>
PipeCommand(key, (c, k) => { c.Value.ZPopMax(k, count); return default((string, decimal)[]); }, obj =>
((Tuple<string, decimal>[])obj).Select(a => (a.Item1, a.Item2)).ToArray());
/// <summary>
/// [redis-server 5.0.0] 删除并返回有序集合key中的最多count个具有最高得分的成员。如未指定,count的默认值为1。指定一个大于有序集合的基数的count不会产生错误。 当返回多个元素时候,得分最高的元素将是第一个元素,然后是分数较低的元素。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="count">数量</param>
/// <returns></returns>
public CSRedisClientPipe<(T member, decimal score)[]> ZPopMax<T>(string key, long count) =>
PipeCommand(key, (c, k) => { c.Value.ZPopMaxBytes(k, count); return default((T member, decimal score)[]); }, obj => rds.DeserializeRedisValueTuple1Internal<T, decimal>((Tuple<byte[], decimal>[])obj));
/// <summary>
/// [redis-server 5.0.0] 删除并返回有序集合key中的最多count个具有最低得分的成员。如未指定,count的默认值为1。指定一个大于有序集合的基数的count不会产生错误。 当返回多个元素时候,得分最低的元素将是第一个元素,然后是分数较高的元素。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="count">数量</param>
/// <returns></returns>
public CSRedisClientPipe<(string member, decimal score)[]> ZPopMin(string key, long count) =>
PipeCommand(key, (c, k) => { c.Value.ZPopMin(k, count); return default((string, decimal)[]); }, obj =>
((Tuple<string, decimal>[])obj).Select(a => (a.Item1, a.Item2)).ToArray());
/// <summary>
/// [redis-server 5.0.0] 删除并返回有序集合key中的最多count个具有最低得分的成员。如未指定,count的默认值为1。指定一个大于有序集合的基数的count不会产生错误。 当返回多个元素时候,得分最低的元素将是第一个元素,然后是分数较高的元素。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="count">数量</param>
/// <returns></returns>
public CSRedisClientPipe<(T member, decimal score)[]> ZPopMin<T>(string key, long count) =>
PipeCommand(key, (c, k) => { c.Value.ZPopMinBytes(k, count); return default((T member, decimal score)[]); }, obj => rds.DeserializeRedisValueTuple1Internal<T, decimal>((Tuple<byte[], decimal>[])obj));
/// <summary>
/// 向有序集合添加一个或多个成员,或者更新已存在成员的分数
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="scoreMembers">一个或多个成员分数</param>
/// <returns></returns>
public CSRedisClientPipe<long> ZAdd(string key, params (decimal, object)[] scoreMembers)
{
if (scoreMembers == null || scoreMembers.Any() == false) throw new Exception("scoreMembers 参数不可为空");
var ms = scoreMembers.Select(a => new Tuple<decimal, object>(a.Item1, rds.SerializeRedisValueInternal(a.Item2))).ToArray();
return PipeCommand(key, (c, k) => c.Value.ZAdd(k, ms));
}
/// <summary>
/// 获取有序集合的成员数量
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public CSRedisClientPipe<long> ZCard(string key) => PipeCommand(key, (c, k) => c.Value.ZCard(k));
/// <summary>
/// 计算在有序集合中指定区间分数的成员数量
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="min">分数最小值 decimal.MinValue 1</param>
/// <param name="max">分数最大值 decimal.MaxValue 10</param>
/// <returns></returns>
public CSRedisClientPipe<long> ZCount(string key, decimal min, decimal max) => PipeCommand(key, (c, k) => c.Value.ZCount(k, min == decimal.MinValue ? "-inf" : min.ToString(), max == decimal.MaxValue ? "+inf" : max.ToString()));
/// <summary>
/// 计算在有序集合中指定区间分数的成员数量
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="min">分数最小值 -inf (1 1</param>
/// <param name="max">分数最大值 +inf (10 10</param>
/// <returns></returns>
public CSRedisClientPipe<long> ZCount(string key, string min, string max) => PipeCommand(key, (c, k) => c.Value.ZCount(k, min, max));
/// <summary>
/// 有序集合中对指定成员的分数加上增量 increment
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="member">成员</param>
/// <param name="increment">增量值(默认=1)</param>
/// <returns></returns>
public CSRedisClientPipe<decimal> ZIncrBy(string key, string member, decimal increment = 1) => PipeCommand(key, (c, k) => c.Value.ZIncrBy(k, increment, member));
/// <summary>
/// 计算给定的一个或多个有序集的交集,将结果集存储在新的有序集合 destination 中
/// </summary>
/// <param name="destination">新的有序集合,不含prefix前辍</param>
/// <param name="weights">使用 WEIGHTS 选项,你可以为 每个 给定有序集 分别 指定一个乘法因子。如果没有指定 WEIGHTS 选项,乘法因子默认设置为 1 。</param>
/// <param name="aggregate">Sum | Min | Max</param>
/// <param name="keys">一个或多个有序集合,不含prefix前辍</param>
/// <returns></returns>
public CSRedisClientPipe<long> ZInterStore(string destination, decimal[] weights, RedisAggregate aggregate, params string[] keys)
{
if (keys == null || keys.Length == 0) throw new Exception("keys 参数不可为空");
if (weights != null && weights.Length != keys.Length) throw new Exception("weights 和 keys 参数长度必须相同");
if (IsMultiNode) throw new Exception("ZInterStore 管道命令,在分区模式下不可用");
var prefix = Nodes.First().Value.Prefix;
return PipeCommand(destination, (c, k) => c.Value.ZInterStore(k, weights, aggregate, keys.Select(z => prefix + z).ToArray()));
}
/// <summary>
/// 通过索引区间返回有序集合成指定区间内的成员
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="start">开始位置,0表示第一个元素,-1表示最后一个元素</param>
/// <param name="stop">结束位置,0表示第一个元素,-1表示最后一个元素</param>
/// <returns></returns>
public CSRedisClientPipe<string[]> ZRange(string key, long start, long stop) => PipeCommand(key, (c, k) => c.Value.ZRange(k, start, stop, false));
/// <summary>
/// 通过索引区间返回有序集合成指定区间内的成员
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="start">开始位置,0表示第一个元素,-1表示最后一个元素</param>
/// <param name="stop">结束位置,0表示第一个元素,-1表示最后一个元素</param>
/// <returns></returns>
public CSRedisClientPipe<T[]> ZRange<T>(string key, long start, long stop) =>
PipeCommand(key, (c, k) => { c.Value.ZRangeBytes(k, start, stop, false); return default(T[]); }, obj => rds.DeserializeRedisValueArrayInternal<T>((byte[][])obj));
/// <summary>
/// 通过索引区间返回有序集合成指定区间内的成员和分数
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="start">开始位置,0表示第一个元素,-1表示最后一个元素</param>
/// <param name="stop">结束位置,0表示第一个元素,-1表示最后一个元素</param>
/// <returns></returns>
public CSRedisClientPipe<(string member, decimal score)[]> ZRangeWithScores(string key, long start, long stop) =>
PipeCommand(key, (c, k) => { c.Value.ZRangeWithScores(k, start, stop); return default((string, decimal)[]); }, obj =>
((Tuple<string, decimal>[])obj).Select(a => (a.Item1, a.Item2)).ToArray());
/// <summary>
/// 通过索引区间返回有序集合成指定区间内的成员和分数
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="start">开始位置,0表示第一个元素,-1表示最后一个元素</param>
/// <param name="stop">结束位置,0表示第一个元素,-1表示最后一个元素</param>
/// <returns></returns>
public CSRedisClientPipe<(T member, decimal score)[]> ZRangeWithScores<T>(string key, long start, long stop) =>
PipeCommand(key, (c, k) => { c.Value.ZRangeBytesWithScores(k, start, stop); return default((T member, decimal score)[]); }, obj => rds.DeserializeRedisValueTuple1Internal<T, decimal>((Tuple<byte[], decimal>[])obj));
/// <summary>
/// 通过分数返回有序集合指定区间内的成员
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="min">分数最小值 decimal.MinValue 1</param>
/// <param name="max">分数最大值 decimal.MaxValue 10</param>
/// <param name="count">返回多少成员</param>
/// <param name="offset">返回条件偏移位置</param>
/// <returns></returns>
public CSRedisClientPipe<string[]> ZRangeByScore(string key, decimal min, decimal max, long? count = null, long offset = 0) =>
PipeCommand(key, (c, k) => c.Value.ZRangeByScore(k, min == decimal.MinValue ? "-inf" : min.ToString(), max == decimal.MaxValue ? "+inf" : max.ToString(), false, offset, count));
/// <summary>
/// 通过分数返回有序集合指定区间内的成员
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="min">分数最小值 decimal.MinValue 1</param>
/// <param name="max">分数最大值 decimal.MaxValue 10</param>
/// <param name="count">返回多少成员</param>
/// <param name="offset">返回条件偏移位置</param>
/// <returns></returns>
public CSRedisClientPipe<T[]> ZRangeByScore<T>(string key, decimal min, decimal max, long? count = null, long offset = 0) =>
PipeCommand(key, (c, k) => { c.Value.ZRangeBytesByScore(k, min == decimal.MinValue ? "-inf" : min.ToString(), max == decimal.MaxValue ? "+inf" : max.ToString(), false, offset, count); return default(T[]); }, obj =>
rds.DeserializeRedisValueArrayInternal<T>((byte[][])obj));
/// <summary>
/// 通过分数返回有序集合指定区间内的成员
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="min">分数最小值 -inf (1 1</param>
/// <param name="max">分数最大值 +inf (10 10</param>
/// <param name="count">返回多少成员</param>
/// <param name="offset">返回条件偏移位置</param>
/// <returns></returns>
public CSRedisClientPipe<string[]> ZRangeByScore(string key, string min, string max, long? count = null, long offset = 0) =>
PipeCommand(key, (c, k) => c.Value.ZRangeByScore(k, min, max, false, offset, count));
/// <summary>
/// 通过分数返回有序集合指定区间内的成员
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="min">分数最小值 -inf (1 1</param>
/// <param name="max">分数最大值 +inf (10 10</param>
/// <param name="count">返回多少成员</param>
/// <param name="offset">返回条件偏移位置</param>
/// <returns></returns>
public CSRedisClientPipe<T[]> ZRangeByScore<T>(string key, string min, string max, long? count = null, long offset = 0) =>
PipeCommand(key, (c, k) => { c.Value.ZRangeBytesByScore(k, min, max, false, offset, count); return default(T[]); }, obj =>
rds.DeserializeRedisValueArrayInternal<T>((byte[][])obj));
/// <summary>
/// 通过分数返回有序集合指定区间内的成员和分数
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="min">分数最小值 decimal.MinValue 1</param>
/// <param name="max">分数最大值 decimal.MaxValue 10</param>
/// <param name="count">返回多少成员</param>
/// <param name="offset">返回条件偏移位置</param>
/// <returns></returns>
public CSRedisClientPipe<(string member, decimal score)[]> ZRangeByScoreWithScores(string key, decimal min, decimal max, long? count = null, long offset = 0) =>
PipeCommand(key, (c, k) => { c.Value.ZRangeByScoreWithScores(k, min == decimal.MinValue ? "-inf" : min.ToString(), max == decimal.MaxValue ? "+inf" : max.ToString(), offset, count); return default((string, decimal)[]); }, obj =>
((Tuple<string, decimal>[])obj).Select(z => (z.Item1, z.Item2)));
/// <summary>
/// 通过分数返回有序集合指定区间内的成员和分数
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="min">分数最小值 decimal.MinValue 1</param>
/// <param name="max">分数最大值 decimal.MaxValue 10</param>
/// <param name="count">返回多少成员</param>
/// <param name="offset">返回条件偏移位置</param>
/// <returns></returns>
public CSRedisClientPipe<(T member, decimal score)[]> ZRangeByScoreWithScores<T>(string key, decimal min, decimal max, long? count = null, long offset = 0) =>
PipeCommand(key, (c, k) => { c.Value.ZRangeBytesByScoreWithScores(k, min == decimal.MinValue ? "-inf" : min.ToString(), max == decimal.MaxValue ? "+inf" : max.ToString(), offset, count); return default((T, decimal)[]); }, obj =>
rds.DeserializeRedisValueTuple1Internal<T, decimal>((Tuple<byte[], decimal>[])obj));
/// <summary>
/// 通过分数返回有序集合指定区间内的成员和分数
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="min">分数最小值 -inf (1 1</param>
/// <param name="max">分数最大值 +inf (10 10</param>
/// <param name="count">返回多少成员</param>
/// <param name="offset">返回条件偏移位置</param>
/// <returns></returns>
public CSRedisClientPipe<(string member, decimal score)[]> ZRangeByScoreWithScores(string key, string min, string max, long? count = null, long offset = 0) =>
PipeCommand(key, (c, k) => c.Value.ZRangeByScoreWithScores(k, min, max, offset, count).Select(z => (z.Item1, z.Item2)).ToArray());
/// <summary>
/// 通过分数返回有序集合指定区间内的成员和分数
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="min">分数最小值 -inf (1 1</param>
/// <param name="max">分数最大值 +inf (10 10</param>
/// <param name="count">返回多少成员</param>
/// <param name="offset">返回条件偏移位置</param>
/// <returns></returns>
public CSRedisClientPipe<(T member, decimal score)[]> ZRangeByScoreWithScores<T>(string key, string min, string max, long? count = null, long offset = 0) =>
PipeCommand(key, (c, k) => { c.Value.ZRangeBytesByScoreWithScores(k, min, max, offset, count); return default((T, decimal)[]); }, obj =>
rds.DeserializeRedisValueTuple1Internal<T, decimal>((Tuple<byte[], decimal>[])obj));
/// <summary>
/// 返回有序集合中指定成员的索引
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="member">成员</param>
/// <returns></returns>
public CSRedisClientPipe<long?> ZRank(string key, object member) => PipeCommand(key, (c, k) => c.Value.ZRank(k, rds.SerializeRedisValueInternal(member)));
/// <summary>
/// 移除有序集合中的一个或多个成员
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="member">一个或多个成员</param>
/// <returns></returns>
public CSRedisClientPipe<long> ZRem<T>(string key, params T[] member)
{
if (member == null || member.Any() == false) throw new Exception("member 参数不可为空");
return PipeCommand(key, (c, k) => c.Value.ZRem(k, member?.Select(z => rds.SerializeRedisValueInternal(z)).ToArray()));
}
/// <summary>
/// 移除有序集合中给定的排名区间的所有成员
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="start">开始位置,0表示第一个元素,-1表示最后一个元素</param>
/// <param name="stop">结束位置,0表示第一个元素,-1表示最后一个元素</param>
/// <returns></returns>
public CSRedisClientPipe<long> ZRemRangeByRank(string key, long start, long stop) => PipeCommand(key, (c, k) => c.Value.ZRemRangeByRank(k, start, stop));
/// <summary>
/// 移除有序集合中给定的分数区间的所有成员
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="min">分数最小值 decimal.MinValue 1</param>
/// <param name="max">分数最大值 decimal.MaxValue 10</param>
/// <returns></returns>
public CSRedisClientPipe<long> ZRemRangeByScore(string key, decimal min, decimal max) => PipeCommand(key, (c, k) => c.Value.ZRemRangeByScore(k, min == decimal.MinValue ? "-inf" : min.ToString(), max == decimal.MaxValue ? "+inf" : max.ToString()));
/// <summary>
/// 移除有序集合中给定的分数区间的所有成员
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="min">分数最小值 -inf (1 1</param>
/// <param name="max">分数最大值 +inf (10 10</param>
/// <returns></returns>
public CSRedisClientPipe<long> ZRemRangeByScore(string key, string min, string max) => PipeCommand(key, (c, k) => c.Value.ZRemRangeByScore(k, min, max));
/// <summary>
/// 返回有序集中指定区间内的成员,通过索引,分数从高到底
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="start">开始位置,0表示第一个元素,-1表示最后一个元素</param>
/// <param name="stop">结束位置,0表示第一个元素,-1表示最后一个元素</param>
/// <returns></returns>
public CSRedisClientPipe<string[]> ZRevRange(string key, long start, long stop) => PipeCommand(key, (c, k) => c.Value.ZRevRange(k, start, stop, false));
/// <summary>
/// 返回有序集中指定区间内的成员,通过索引,分数从高到底
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="start">开始位置,0表示第一个元素,-1表示最后一个元素</param>
/// <param name="stop">结束位置,0表示第一个元素,-1表示最后一个元素</param>
/// <returns></returns>
public CSRedisClientPipe<T[]> ZRevRange<T>(string key, long start, long stop) =>
PipeCommand(key, (c, k) => { c.Value.ZRevRangeBytes(k, start, stop, false); return default(T[]); }, obj =>
rds.DeserializeRedisValueArrayInternal<T>((byte[][])obj));
/// <summary>
/// 返回有序集中指定区间内的成员和分数,通过索引,分数从高到底
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="start">开始位置,0表示第一个元素,-1表示最后一个元素</param>
/// <param name="stop">结束位置,0表示第一个元素,-1表示最后一个元素</param>
/// <returns></returns>
public CSRedisClientPipe<(string member, decimal score)[]> ZRevRangeWithScores(string key, long start, long stop) =>
PipeCommand(key, (c, k) => { c.Value.ZRevRangeWithScores(k, start, stop); return default((string, decimal)[]); }, obj =>
((Tuple<string, decimal>[])obj).Select(a => (a.Item1, a.Item2)).ToArray());
/// <summary>
/// 返回有序集中指定区间内的成员和分数,通过索引,分数从高到底
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="start">开始位置,0表示第一个元素,-1表示最后一个元素</param>
/// <param name="stop">结束位置,0表示第一个元素,-1表示最后一个元素</param>
/// <returns></returns>
public CSRedisClientPipe<(T member, decimal score)[]> ZRevRangeWithScores<T>(string key, long start, long stop) =>
PipeCommand(key, (c, k) => { c.Value.ZRevRangeBytesWithScores(k, start, stop); return default((T, decimal)[]); }, obj =>
rds.DeserializeRedisValueTuple1Internal<T, decimal>((Tuple<byte[], decimal>[])obj));
/// <summary>
/// 返回有序集中指定分数区间内的成员,分数从高到低排序
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="max">分数最大值 decimal.MaxValue 10</param>
/// <param name="min">分数最小值 decimal.MinValue 1</param>
/// <param name="count">返回多少成员</param>
/// <param name="offset">返回条件偏移位置</param>
/// <returns></returns>
public CSRedisClientPipe<string[]> ZRevRangeByScore(string key, decimal max, decimal min, long? count = null, long? offset = 0) =>
PipeCommand(key, (c, k) => c.Value.ZRevRangeByScore(k, max == decimal.MaxValue ? "+inf" : max.ToString(), min == decimal.MinValue ? "-inf" : min.ToString(), false, offset, count));
/// <summary>
/// 返回有序集中指定分数区间内的成员,分数从高到低排序
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="max">分数最大值 decimal.MaxValue 10</param>
/// <param name="min">分数最小值 decimal.MinValue 1</param>
/// <param name="count">返回多少成员</param>
/// <param name="offset">返回条件偏移位置</param>
/// <returns></returns>
public CSRedisClientPipe<T[]> ZRevRangeByScore<T>(string key, decimal max, decimal min, long? count = null, long offset = 0) =>
PipeCommand(key, (c, k) => { c.Value.ZRevRangeBytesByScore(k, max == decimal.MaxValue ? "+inf" : max.ToString(), min == decimal.MinValue ? "-inf" : min.ToString(), false, offset, count); return default(T[]); }, obj =>
rds.DeserializeRedisValueArrayInternal<T>((byte[][])obj));
/// <summary>
/// 返回有序集中指定分数区间内的成员,分数从高到低排序
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="max">分数最大值 +inf (10 10</param>
/// <param name="min">分数最小值 -inf (1 1</param>
/// <param name="count">返回多少成员</param>
/// <param name="offset">返回条件偏移位置</param>
/// <returns></returns>
public CSRedisClientPipe<string[]> ZRevRangeByScore(string key, string max, string min, long? count = null, long? offset = 0) =>
PipeCommand(key, (c, k) => c.Value.ZRevRangeByScore(k, max, min, false, offset, count));
/// <summary>
/// 返回有序集中指定分数区间内的成员,分数从高到低排序
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="max">分数最大值 +inf (10 10</param>
/// <param name="min">分数最小值 -inf (1 1</param>
/// <param name="count">返回多少成员</param>
/// <param name="offset">返回条件偏移位置</param>
/// <returns></returns>
public CSRedisClientPipe<T[]> ZRevRangeByScore<T>(string key, string max, string min, long? count = null, long offset = 0) =>
PipeCommand(key, (c, k) => { c.Value.ZRevRangeBytesByScore(k, max, min, false, offset, count); return default(T[]); }, obj =>
rds.DeserializeRedisValueArrayInternal<T>((byte[][])obj));
/// <summary>
/// 返回有序集中指定分数区间内的成员和分数,分数从高到低排序
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="max">分数最大值 decimal.MaxValue 10</param>
/// <param name="min">分数最小值 decimal.MinValue 1</param>
/// <param name="count">返回多少成员</param>
/// <param name="offset">返回条件偏移位置</param>
/// <returns></returns>
public CSRedisClientPipe<(string member, decimal score)[]> ZRevRangeByScoreWithScores(string key, decimal max, decimal min, long? count = null, long offset = 0) =>
PipeCommand(key, (c, k) => { c.Value.ZRevRangeByScoreWithScores(k, max == decimal.MaxValue ? "+inf" : max.ToString(), min == decimal.MinValue ? "-inf" : min.ToString(), offset, count); return default((string member, decimal score)[]); }, obj =>
((Tuple<string, decimal>[])obj).Select(z => (z.Item1, z.Item2)).ToArray());
/// <summary>
/// 返回有序集中指定分数区间内的成员和分数,分数从高到低排序
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="max">分数最大值 decimal.MaxValue 10</param>
/// <param name="min">分数最小值 decimal.MinValue 1</param>
/// <param name="count">返回多少成员</param>
/// <param name="offset">返回条件偏移位置</param>
/// <returns></returns>
public CSRedisClientPipe<(T member, decimal score)[]> ZRevRangeByScoreWithScores<T>(string key, decimal max, decimal min, long? count = null, long offset = 0) =>
PipeCommand(key, (c, k) => { c.Value.ZRevRangeBytesByScoreWithScores(k, max == decimal.MaxValue ? "+inf" : max.ToString(), min == decimal.MinValue ? "-inf" : min.ToString(), offset, count); return default((T member, decimal score)[]); }, obj =>
rds.DeserializeRedisValueTuple1Internal<T, decimal>((Tuple<byte[], decimal>[])obj));
/// <summary>
/// 返回有序集中指定分数区间内的成员和分数,分数从高到低排序
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="max">分数最大值 +inf (10 10</param>
/// <param name="min">分数最小值 -inf (1 1</param>
/// <param name="count">返回多少成员</param>
/// <param name="offset">返回条件偏移位置</param>
/// <returns></returns>
public CSRedisClientPipe<(string member, decimal score)[]> ZRevRangeByScoreWithScores(string key, string max, string min, long? count = null, long offset = 0) =>
PipeCommand(key, (c, k) => { c.Value.ZRevRangeByScoreWithScores(k, max, min, offset, count); return default((string, decimal)[]); }, obj =>
((Tuple<string, decimal>[])obj).Select(z => (z.Item1, z.Item2)).ToArray());
/// <summary>
/// 返回有序集中指定分数区间内的成员和分数,分数从高到低排序
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="max">分数最大值 +inf (10 10</param>
/// <param name="min">分数最小值 -inf (1 1</param>
/// <param name="count">返回多少成员</param>
/// <param name="offset">返回条件偏移位置</param>
/// <returns></returns>
public CSRedisClientPipe<(T member, decimal score)[]> ZRevRangeByScoreWithScores<T>(string key, string max, string min, long? count = null, long offset = 0) =>
PipeCommand(key, (c, k) => { c.Value.ZRevRangeBytesByScoreWithScores(k, max, min, offset, count); return default((T, decimal)[]); }, obj =>
rds.DeserializeRedisValueTuple1Internal<T, decimal>((Tuple<byte[], decimal>[])obj));
/// <summary>
/// 返回有序集合中指定成员的排名,有序集成员按分数值递减(从大到小)排序
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="member">成员</param>
/// <returns></returns>
public CSRedisClientPipe<long?> ZRevRank(string key, object member) => PipeCommand(key, (c, k) => c.Value.ZRevRank(k, rds.SerializeRedisValueInternal(member)));
/// <summary>
/// 返回有序集中,成员的分数值
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="member">成员</param>
/// <returns></returns>
public CSRedisClientPipe<decimal?> ZScore(string key, object member) => PipeCommand(key, (c, k) => c.Value.ZScore(k, rds.SerializeRedisValueInternal(member)));
/// <summary>
/// 计算给定的一个或多个有序集的并集,将结果集存储在新的有序集合 destination 中
/// </summary>
/// <param name="destination">新的有序集合,不含prefix前辍</param>
/// <param name="weights">使用 WEIGHTS 选项,你可以为 每个 给定有序集 分别 指定一个乘法因子。如果没有指定 WEIGHTS 选项,乘法因子默认设置为 1 。</param>
/// <param name="aggregate">Sum | Min | Max</param>
/// <param name="keys">一个或多个有序集合,不含prefix前辍</param>
/// <returns></returns>
public CSRedisClientPipe<long> ZUnionStore(string destination, decimal[] weights, RedisAggregate aggregate, params string[] keys)
{
if (keys == null || keys.Length == 0) throw new Exception("keys 参数不可为空");
if (weights != null && weights.Length != keys.Length) throw new Exception("weights 和 keys 参数长度必须相同");
if (IsMultiNode) throw new Exception("ZUnionStore 管道命令,在分区模式下不可用");
var prefix = Nodes.First().Value.Prefix;
return PipeCommand(destination, (c, k) => c.Value.ZUnionStore(k, weights, aggregate, keys.Select(z => prefix + z).ToArray()));
}
/// <summary>
/// 迭代有序集合中的元素
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="cursor">位置</param>
/// <param name="pattern">模式</param>
/// <param name="count">数量</param>
/// <returns></returns>
public CSRedisClientPipe<RedisScan<(string member, decimal score)>> ZScan(string key, long cursor, string pattern = null, long? count = null) =>
PipeCommand(key, (c, k) => { c.Value.ZScan(k, cursor, pattern, count); return default(RedisScan<(string, decimal)>); }, obj =>
{
var scan = (RedisScan<Tuple<string, decimal>>)obj;
return new RedisScan<(string, decimal)>(scan.Cursor, scan.Items.Select(z => (z.Item1, z.Item2)).ToArray());
});
/// <summary>
/// 迭代有序集合中的元素
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="cursor">位置</param>
/// <param name="pattern">模式</param>
/// <param name="count">数量</param>
/// <returns></returns>
public CSRedisClientPipe<RedisScan<(T member, decimal score)>> ZScan<T>(string key, long cursor, string pattern = null, long? count = null) =>
PipeCommand(key, (c, k) => { c.Value.ZScanBytes(k, cursor, pattern, count); return default(RedisScan<(T, decimal)>); }, obj =>
{
var scan = (RedisScan<Tuple<byte[], decimal>>)obj;
return new RedisScan<(T, decimal)>(scan.Cursor, rds.DeserializeRedisValueTuple1Internal<T, decimal>(scan.Items));
});
/// <summary>
/// 当有序集合的所有成员都具有相同的分值时,有序集合的元素会根据成员的字典序来进行排序,这个命令可以返回给定的有序集合键 key 中,值介于 min 和 max 之间的成员。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="min">'(' 表示包含在范围,'[' 表示不包含在范围,'+' 正无穷大,'-' 负无限。 ZRANGEBYLEX zset - + ,命令将返回有序集合中的所有元素</param>
/// <param name="max">'(' 表示包含在范围,'[' 表示不包含在范围,'+' 正无穷大,'-' 负无限。 ZRANGEBYLEX zset - + ,命令将返回有序集合中的所有元素</param>
/// <param name="count">返回多少成员</param>
/// <param name="offset">返回条件偏移位置</param>
/// <returns></returns>
public CSRedisClientPipe<string[]> ZRangeByLex(string key, string min, string max, long? count = null, long offset = 0) =>
PipeCommand(key, (c, k) => c.Value.ZRangeByLex(k, min, max, offset, count));
/// <summary>
/// 当有序集合的所有成员都具有相同的分值时,有序集合的元素会根据成员的字典序来进行排序,这个命令可以返回给定的有序集合键 key 中,值介于 min 和 max 之间的成员。
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="min">'(' 表示包含在范围,'[' 表示不包含在范围,'+' 正无穷大,'-' 负无限。 ZRANGEBYLEX zset - + ,命令将返回有序集合中的所有元素</param>
/// <param name="max">'(' 表示包含在范围,'[' 表示不包含在范围,'+' 正无穷大,'-' 负无限。 ZRANGEBYLEX zset - + ,命令将返回有序集合中的所有元素</param>
/// <param name="count">返回多少成员</param>
/// <param name="offset">返回条件偏移位置</param>
/// <returns></returns>
public CSRedisClientPipe<T[]> ZRangeByLex<T>(string key, string min, string max, long? count = null, long offset = 0) =>
PipeCommand(key, (c, k) => { c.Value.ZRangeBytesByLex(k, min, max, offset, count); return default(T[]); }, obj => rds.DeserializeRedisValueArrayInternal<T>((byte[][])obj));
/// <summary>
/// 当有序集合的所有成员都具有相同的分值时,有序集合的元素会根据成员的字典序来进行排序,这个命令可以返回给定的有序集合键 key 中,值介于 min 和 max 之间的成员。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="min">'(' 表示包含在范围,'[' 表示不包含在范围,'+' 正无穷大,'-' 负无限。 ZRANGEBYLEX zset - + ,命令将返回有序集合中的所有元素</param>
/// <param name="max">'(' 表示包含在范围,'[' 表示不包含在范围,'+' 正无穷大,'-' 负无限。 ZRANGEBYLEX zset - + ,命令将返回有序集合中的所有元素</param>
/// <returns></returns>
public CSRedisClientPipe<long> ZRemRangeByLex(string key, string min, string max) =>
PipeCommand(key, (c, k) => c.Value.ZRemRangeByLex(k, min, max));
/// <summary>
/// 当有序集合的所有成员都具有相同的分值时,有序集合的元素会根据成员的字典序来进行排序,这个命令可以返回给定的有序集合键 key 中,值介于 min 和 max 之间的成员。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="min">'(' 表示包含在范围,'[' 表示不包含在范围,'+' 正无穷大,'-' 负无限。 ZRANGEBYLEX zset - + ,命令将返回有序集合中的所有元素</param>
/// <param name="max">'(' 表示包含在范围,'[' 表示不包含在范围,'+' 正无穷大,'-' 负无限。 ZRANGEBYLEX zset - + ,命令将返回有序集合中的所有元素</param>
/// <returns></returns>
public CSRedisClientPipe<long> ZLexCount(string key, string min, string max) =>
PipeCommand(key, (c, k) => c.Value.ZLexCount(k, min, max));
#endregion
#region Set
/// <summary>
/// 向集合添加一个或多个成员
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="members">一个或多个成员</param>
/// <returns></returns>
public CSRedisClientPipe<long> SAdd<T>(string key, params T[] members)
{
if (members == null || members.Any() == false) throw new Exception("members 参数不可为空");
return PipeCommand(key, (c, k) => c.Value.SAdd(k, members?.Select(z => rds.SerializeRedisValueInternal(z)).ToArray()));
}
/// <summary>
/// 获取集合的成员数
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public CSRedisClientPipe<long> SCard(string key) => PipeCommand(key, (c, k) => c.Value.SCard(k));
/// <summary>
/// 返回给定所有集合的差集
/// </summary>
/// <param name="keys">不含prefix前辍</param>
/// <returns></returns>
public CSRedisClientPipe<string[]> SDiff(params string[] keys)
{
if (keys == null || keys.Length == 0) throw new Exception("keys 参数不可为空");
if (IsMultiNode) throw new Exception("SDiff 管道命令,在分区模式下不可用");
var prefix = Nodes.First().Value.Prefix;
return PipeCommand(keys.First(), (c, k) => c.Value.SDiff(keys.Select(z => prefix + z).ToArray()));
}
/// <summary>
/// 返回给定所有集合的差集
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="keys">不含prefix前辍</param>
/// <returns></returns>
public CSRedisClientPipe<T[]> SDiff<T>(params string[] keys)
{
if (keys == null || keys.Length == 0) throw new Exception("keys 参数不可为空");
if (IsMultiNode) throw new Exception("SDiff<T> 管道命令,在分区模式下不可用");
var prefix = Nodes.First().Value.Prefix;
return PipeCommand(keys.First(), (c, k) => { c.Value.SDiffBytes(keys.Select(z => prefix + z).ToArray()); return default(T[]); }, obj =>
rds.DeserializeRedisValueArrayInternal<T>((byte[][])obj));
}
/// <summary>
/// 返回给定所有集合的差集并存储在 destination 中
/// </summary>
/// <param name="destination">新的无序集合,不含prefix前辍</param>
/// <param name="keys">一个或多个无序集合,不含prefix前辍</param>
/// <returns></returns>
public CSRedisClientPipe<long> SDiffStore(string destination, params string[] keys)
{
if (string.IsNullOrEmpty(destination)) throw new Exception("destination 参数不可为空");
if (keys == null || keys.Length == 0) throw new Exception("keys 参数不可为空");
if (IsMultiNode) throw new Exception("SDiffStore 管道命令,在分区模式下不可用");
var prefix = Nodes.First().Value.Prefix;
return PipeCommand(destination, (c, k) => c.Value.SDiffStore(k, keys.Select(z => prefix + z).ToArray()));
}
/// <summary>
/// 返回给定所有集合的交集
/// </summary>
/// <param name="keys">不含prefix前辍</param>
/// <returns></returns>
public CSRedisClientPipe<string[]> SInter(params string[] keys)
{
if (keys == null || keys.Length == 0) throw new Exception("keys 参数不可为空");
if (IsMultiNode) throw new Exception("SInter 管道命令,在分区模式下不可用");
var prefix = Nodes.First().Value.Prefix;
return PipeCommand(keys.First(), (c, k) => c.Value.SInter(keys.Select(z => prefix + z).ToArray()));
}
/// <summary>
/// 返回给定所有集合的交集
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="keys">不含prefix前辍</param>
/// <returns></returns>
public CSRedisClientPipe<T[]> SInter<T>(params string[] keys)
{
if (keys == null || keys.Length == 0) throw new Exception("keys 参数不可为空");
if (IsMultiNode) throw new Exception("SInter<T> 管道命令,在分区模式下不可用");
var prefix = Nodes.First().Value.Prefix;
return PipeCommand(keys.First(), (c, k) => { c.Value.SInterBytes(keys.Select(z => prefix + z).ToArray()); return default(T[]); }, obj =>
rds.DeserializeRedisValueArrayInternal<T>((byte[][])obj));
}
/// <summary>
/// 返回给定所有集合的交集并存储在 destination 中
/// </summary>
/// <param name="destination">新的无序集合,不含prefix前辍</param>
/// <param name="keys">一个或多个无序集合,不含prefix前辍</param>
/// <returns></returns>
public CSRedisClientPipe<long> SInterStore(string destination, params string[] keys)
{
if (string.IsNullOrEmpty(destination)) throw new Exception("destination 参数不可为空");
if (keys == null || keys.Length == 0) throw new Exception("keys 参数不可为空");
if (IsMultiNode) throw new Exception("SInterStore 管道命令,在分区模式下不可用");
var prefix = Nodes.First().Value.Prefix;
return PipeCommand(destination, (c, k) => c.Value.SInterStore(k, keys.Select(z => prefix + z).ToArray()));
}
/// <summary>
/// 判断 member 元素是否是集合 key 的成员
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="member">成员</param>
/// <returns></returns>
public CSRedisClientPipe<bool> SIsMember(string key, object member) => PipeCommand(key, (c, k) => c.Value.SIsMember(k, rds.SerializeRedisValueInternal(member)));
/// <summary>
/// 返回集合中的所有成员
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public CSRedisClientPipe<string[]> SMembers(string key) => PipeCommand(key, (c, k) => c.Value.SMembers(k));
/// <summary>
/// 返回集合中的所有成员
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public CSRedisClientPipe<T[]> SMembers<T>(string key) => PipeCommand(key, (c, k) => { c.Value.SMembersBytes(k); return default(T[]); }, obj => rds.DeserializeRedisValueArrayInternal<T>((byte[][])obj));
/// <summary>
/// 将 member 元素从 source 集合移动到 destination 集合
/// </summary>
/// <param name="source">无序集合key,不含prefix前辍</param>
/// <param name="destination">目标无序集合key,不含prefix前辍</param>
/// <param name="member">成员</param>
/// <returns></returns>
public CSRedisClientPipe<bool> SMove(string source, string destination, object member)
{
if (IsMultiNode) throw new Exception("SMove 管道命令,在分区模式下不可用");
return PipeCommand(source, (c, k) => c.Value.SMove(k, (c.Pool as RedisClientPool)?.Prefix + destination, rds.SerializeRedisValueInternal(member)));
}
/// <summary>
/// 移除并返回集合中的一个随机元素
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public CSRedisClientPipe<string> SPop(string key) => PipeCommand(key, (c, k) => c.Value.SPop(k));
/// <summary>
/// 移除并返回集合中的一个随机元素
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public CSRedisClientPipe<T> SPop<T>(string key) => PipeCommand(key, (c, k) => { c.Value.SPopBytes(k); return default(T); }, obj => rds.DeserializeRedisValueInternal<T>((byte[])obj));
/// <summary>
/// 返回集合中的一个随机元素
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public CSRedisClientPipe<string> SRandMember(string key) => PipeCommand(key, (c, k) => c.Value.SRandMember(k));
/// <summary>
/// 返回集合中的一个随机元素
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public CSRedisClientPipe<T> SRandMember<T>(string key) => PipeCommand(key, (c, k) => { c.Value.SRandMemberBytes(k); return default(T); }, obj => rds.DeserializeRedisValueInternal<T>((byte[])obj));
/// <summary>
/// 返回集合中一个或多个随机数的元素
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="count">返回个数</param>
/// <returns></returns>
public CSRedisClientPipe<string[]> SRandMembers(string key, int count = 1) => PipeCommand(key, (c, k) => c.Value.SRandMembers(k, count));
/// <summary>
/// 返回集合中一个或多个随机数的元素
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="count">返回个数</param>
/// <returns></returns>
public CSRedisClientPipe<T[]> SRandMembers<T>(string key, int count = 1) => PipeCommand(key, (c, k) => { c.Value.SRandMembersBytes(k, count); return default(T[]); }, obj => rds.DeserializeRedisValueArrayInternal<T>((byte[][])obj));
/// <summary>
/// 移除集合中一个或多个成员
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="members">一个或多个成员</param>
/// <returns></returns>
public CSRedisClientPipe<long> SRem<T>(string key, params T[] members)
{
if (members == null || members.Any() == false) throw new Exception("members 参数不可为空");
return PipeCommand(key, (c, k) => c.Value.SRem(k, members?.Select(z => rds.SerializeRedisValueInternal(z)).ToArray()));
}
/// <summary>
/// 返回所有给定集合的并集
/// </summary>
/// <param name="keys">不含prefix前辍</param>
/// <returns></returns>
public CSRedisClientPipe<string[]> SUnion(params string[] keys)
{
if (keys == null || keys.Length == 0) throw new Exception("keys 参数不可为空");
if (IsMultiNode) throw new Exception("SUnion 管道命令,在分区模式下不可用");
var prefix = Nodes.First().Value.Prefix;
return PipeCommand(keys.First(), (c, k) => c.Value.SUnion(keys.Select(z => prefix + z).ToArray()));
}
/// <summary>
/// 返回所有给定集合的并集
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="keys">不含prefix前辍</param>
/// <returns></returns>
public CSRedisClientPipe<T[]> SUnion<T>(params string[] keys)
{
if (keys == null || keys.Length == 0) throw new Exception("keys 参数不可为空");
if (IsMultiNode) throw new Exception("SUnion<T> 管道命令,在分区模式下不可用");
var prefix = Nodes.First().Value.Prefix;
return PipeCommand(keys.First(), (c, k) => { c.Value.SUnionBytes(keys.Select(z => prefix + z).ToArray()); return default(T[]); }, obj =>
rds.DeserializeRedisValueArrayInternal<T>((byte[][])obj));
}
/// <summary>
/// 所有给定集合的并集存储在 destination 集合中
/// </summary>
/// <param name="destination">新的无序集合,不含prefix前辍</param>
/// <param name="keys">一个或多个无序集合,不含prefix前辍</param>
/// <returns></returns>
public CSRedisClientPipe<long> SUnionStore(string destination, params string[] keys)
{
if (string.IsNullOrEmpty(destination)) throw new Exception("destination 参数不可为空");
if (keys == null || keys.Length == 0) throw new Exception("keys 参数不可为空");
if (IsMultiNode) throw new Exception("SUnionStore 管道命令,在分区模式下不可用");
var prefix = Nodes.First().Value.Prefix;
return PipeCommand(destination, (c, k) => c.Value.SUnionStore(k, keys.Select(z => prefix + z).ToArray()));
}
/// <summary>
/// 迭代集合中的元素
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="cursor">位置</param>
/// <param name="pattern">模式</param>
/// <param name="count">数量</param>
/// <returns></returns>
public CSRedisClientPipe<RedisScan<string>> SScan(string key, long cursor, string pattern = null, long? count = null) =>
PipeCommand(key, (c, k) => c.Value.SScan(k, cursor, pattern, count));
/// <summary>
/// 迭代集合中的元素
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="cursor">位置</param>
/// <param name="pattern">模式</param>
/// <param name="count">数量</param>
/// <returns></returns>
public CSRedisClientPipe<RedisScan<T>> SScan<T>(string key, long cursor, string pattern = null, long? count = null) =>
PipeCommand(key, (c, k) => { c.Value.SScanBytes(k, cursor, pattern, count); return default(RedisScan<T>); }, obj =>
{
var scan = (RedisScan<byte[]>)obj;
return new RedisScan<T>(scan.Cursor, rds.DeserializeRedisValueArrayInternal<T>(scan.Items));
});
#endregion
#region List
/// <summary>
/// 通过索引获取列表中的元素
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="index">索引</param>
/// <returns></returns>
public CSRedisClientPipe<string> LIndex(string key, long index) => PipeCommand(key, (c, k) => c.Value.LIndex(k, index));
/// <summary>
/// 通过索引获取列表中的元素
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="index">索引</param>
/// <returns></returns>
public CSRedisClientPipe<T> LIndex<T>(string key, long index) => PipeCommand(key, (c, k) => { c.Value.LIndexBytes(k, index); return default(T); }, obj => rds.DeserializeRedisValueInternal<T>((byte[])obj));
/// <summary>
/// 在列表中的元素前面插入元素
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="pivot">列表的元素</param>
/// <param name="value">新元素</param>
/// <returns></returns>
public CSRedisClientPipe<long> LInsertBefore(string key, string pivot, object value) => PipeCommand(key, (c, k) => c.Value.LInsert(k, RedisInsert.Before, pivot, rds.SerializeRedisValueInternal(value)));
/// <summary>
/// 在列表中的元素后面插入元素
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="pivot">列表的元素</param>
/// <param name="value">新元素</param>
/// <returns></returns>
public CSRedisClientPipe<long> LInsertAfter(string key, string pivot, object value) => PipeCommand(key, (c, k) => c.Value.LInsert(k, RedisInsert.After, pivot, rds.SerializeRedisValueInternal(value)));
/// <summary>
/// 获取列表长度
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public CSRedisClientPipe<long> LLen(string key) => PipeCommand(key, (c, k) => c.Value.LLen(k));
/// <summary>
/// 移出并获取列表的第一个元素
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public CSRedisClientPipe<string> LPop(string key) => PipeCommand(key, (c, k) => c.Value.LPop(k));
/// <summary>
/// 移出并获取列表的第一个元素
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public CSRedisClientPipe<T> LPop<T>(string key) => PipeCommand(key, (c, k) => { c.Value.LPopBytes(k); return default(T); }, obj => rds.DeserializeRedisValueInternal<T>((byte[])obj));
/// <summary>
/// 将一个或多个值插入到列表头部
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="value">一个或多个值</param>
/// <returns>执行 LPUSH 命令后,列表的长度</returns>
public CSRedisClientPipe<long> LPush<T>(string key, params T[] value)
{
if (value == null || value.Any() == false) throw new Exception("value 参数不可为空"); ;
return PipeCommand(key, (c, k) => c.Value.LPush(k, value?.Select(z => rds.SerializeRedisValueInternal(z)).ToArray()));
}
/// <summary>
/// 将一个值插入到已存在的列表头部
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="value">值</param>
/// <returns>执行 LPUSHX 命令后,列表的长度。</returns>
public CSRedisClientPipe<long> LPushX(string key, object value) => PipeCommand(key, (c, k) => c.Value.LPushX(k, rds.SerializeRedisValueInternal(value)));
/// <summary>
/// 获取列表指定范围内的元素
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="start">开始位置,0表示第一个元素,-1表示最后一个元素</param>
/// <param name="stop">结束位置,0表示第一个元素,-1表示最后一个元素</param>
/// <returns></returns>
public CSRedisClientPipe<string[]> LRange(string key, long start, long stop) => PipeCommand(key, (c, k) => c.Value.LRange(k, start, stop));
/// <summary>
/// 获取列表指定范围内的元素
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="start">开始位置,0表示第一个元素,-1表示最后一个元素</param>
/// <param name="stop">结束位置,0表示第一个元素,-1表示最后一个元素</param>
/// <returns></returns>
public CSRedisClientPipe<T[]> LRange<T>(string key, long start, long stop) => PipeCommand(key, (c, k) => { c.Value.LRangeBytes(k, start, stop); return default(T[]); }, obj => rds.DeserializeRedisValueArrayInternal<T>((byte[][])obj));
/// <summary>
/// 根据参数 count 的值,移除列表中与参数 value 相等的元素
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="count">移除的数量,大于0时从表头删除数量count,小于0时从表尾删除数量-count,等于0移除所有</param>
/// <param name="value">元素</param>
/// <returns></returns>
public CSRedisClientPipe<long> LRem(string key, long count, object value) => PipeCommand(key, (c, k) => c.Value.LRem(k, count, rds.SerializeRedisValueInternal(value)));
/// <summary>
/// 通过索引设置列表元素的值
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="index">索引</param>
/// <param name="value">值</param>
/// <returns></returns>
public CSRedisClientPipe<bool> LSet(string key, long index, object value) => PipeCommand(key, (c, k) =>
{
c.Value.LSet(k, index, rds.SerializeRedisValueInternal(value));
return false;
}, ret => ret?.ToString() == "OK");
/// <summary>
/// 对一个列表进行修剪,让列表只保留指定区间内的元素,不在指定区间之内的元素都将被删除
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="start">开始位置,0表示第一个元素,-1表示最后一个元素</param>
/// <param name="stop">结束位置,0表示第一个元素,-1表示最后一个元素</param>
/// <returns></returns>
public CSRedisClientPipe<bool> LTrim(string key, long start, long stop) => PipeCommand(key, (c, k) =>
{
c.Value.LTrim(k, start, stop);
return false;
}, ret => ret?.ToString() == "OK");
/// <summary>
/// 移除并获取列表最后一个元素
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public CSRedisClientPipe<string> RPop(string key) => PipeCommand(key, (c, k) => c.Value.RPop(k));
/// <summary>
/// 移除并获取列表最后一个元素
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public CSRedisClientPipe<T> RPop<T>(string key) => PipeCommand(key, (c, k) => { c.Value.RPopBytes(k); return default(T); }, obj => rds.DeserializeRedisValueInternal<T>((byte[])obj));
/// <summary>
/// 将列表 source 中的最后一个元素(尾元素)弹出,并返回给客户端。
/// 将 source 弹出的元素插入到列表 destination ,作为 destination 列表的的头元素。
/// </summary>
/// <param name="source">源key,不含prefix前辍</param>
/// <param name="destination">目标key,不含prefix前辍</param>
/// <returns></returns>
public CSRedisClientPipe<string> RPopLPush(string source, string destination)
{
if (IsMultiNode) throw new Exception("RPopLPush 管道命令,在分区模式下不可用");
return PipeCommand(source, (c, k) => c.Value.RPopLPush(k, (c.Pool as RedisClientPool)?.Prefix + destination));
}
/// <summary>
/// 将列表 source 中的最后一个元素(尾元素)弹出,并返回给客户端。
/// 将 source 弹出的元素插入到列表 destination ,作为 destination 列表的的头元素。
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="source">源key,不含prefix前辍</param>
/// <param name="destination">目标key,不含prefix前辍</param>
/// <returns></returns>
public CSRedisClientPipe<T> RPopLPush<T>(string source, string destination)
{
if (IsMultiNode) throw new Exception("RPopLPush<T> 管道命令,在分区模式下不可用");
return PipeCommand(source, (c, k) => { c.Value.RPopBytesLPush(k, (c.Pool as RedisClientPool)?.Prefix + destination); return default(T); }, obj =>
rds.DeserializeRedisValueInternal<T>((byte[])obj));
}
/// <summary>
/// 在列表中添加一个或多个值
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="value">一个或多个值</param>
/// <returns>执行 RPUSH 命令后,列表的长度</returns>
public CSRedisClientPipe<long> RPush<T>(string key, params T[] value)
{
if (value == null || value.Any() == false) throw new Exception("value 参数不可为空"); ;
return PipeCommand(key, (c, k) => c.Value.RPush(k, value?.Select(z => rds.SerializeRedisValueInternal(z)).ToArray()));
}
/// <summary>
/// 为已存在的列表添加值
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="value">一个或多个值</param>
/// <returns>执行 RPUSHX 命令后,列表的长度</returns>
public CSRedisClientPipe<long> RPushX(string key, object value) => PipeCommand(key, (c, k) => c.Value.RPushX(k, rds.SerializeRedisValueInternal(value)));
#endregion
#region Hash
/// <summary>
/// [redis-server 3.2.0] 返回hash指定field的value的字符串长度,如果hash或者field不存在,返回0.
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="field">字段</param>
/// <returns></returns>
public CSRedisClientPipe<long> HStrLen(string key, string field) => PipeCommand(key, (c, k) => c.Value.HStrLen(k, field));
/// <summary>
/// 删除一个或多个哈希表字段
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="fields">字段</param>
/// <returns></returns>
public CSRedisClientPipe<long> HDel(string key, params string[] fields)
{
if (fields == null || fields.Any() == false) throw new Exception("fields 参数不可为空");
return PipeCommand(key, (c, k) => c.Value.HDel(k, fields));
}
/// <summary>
/// 查看哈希表 key 中,指定的字段是否存在
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="field">字段</param>
/// <returns></returns>
public CSRedisClientPipe<bool> HExists(string key, string field) => PipeCommand(key, (c, k) => c.Value.HExists(k, field));
/// <summary>
/// 获取存储在哈希表中指定字段的值
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="field">字段</param>
/// <returns></returns>
public CSRedisClientPipe<string> HGet(string key, string field) => PipeCommand(key, (c, k) => c.Value.HGet(k, field));
/// <summary>
/// 获取存储在哈希表中指定字段的值
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="field">字段</param>
/// <returns></returns>
public CSRedisClientPipe<T> HGet<T>(string key, string field) => PipeCommand(key, (c, k) => { c.Value.HGetBytes(k, field); return default(T); }, obj => rds.DeserializeRedisValueInternal<T>((byte[])obj));
/// <summary>
/// 获取在哈希表中指定 key 的所有字段和值
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public CSRedisClientPipe<Dictionary<string, string>> HGetAll(string key) => PipeCommand(key, (c, k) => c.Value.HGetAll(k));
/// <summary>
/// 获取在哈希表中指定 key 的所有字段和值
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public CSRedisClientPipe<Dictionary<string, T>> HGetAll<T>(string key) => PipeCommand(key, (c, k) => { c.Value.HGetAllBytes(k); return default(Dictionary<string, T>); }, obj =>
rds.DeserializeRedisValueDictionaryInternal<string, T>((Dictionary<string, byte[]>)obj));
/// <summary>
/// 为哈希表 key 中的指定字段的整数值加上增量 increment
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="field">字段</param>
/// <param name="value">增量值(默认=1)</param>
/// <returns></returns>
public CSRedisClientPipe<long> HIncrBy(string key, string field, long value = 1) => PipeCommand(key, (c, k) => c.Value.HIncrBy(k, field, value));
/// <summary>
/// 为哈希表 key 中的指定字段的整数值加上增量 increment
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="field">字段</param>
/// <param name="value">增量值(默认=1)</param>
/// <returns></returns>
public CSRedisClientPipe<decimal> HIncrByFloat(string key, string field, decimal value) => PipeCommand(key, (c, k) => c.Value.HIncrByFloat(k, field, value));
/// <summary>
/// 获取所有哈希表中的字段
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public CSRedisClientPipe<string[]> HKeys(string key) => PipeCommand(key, (c, k) => c.Value.HKeys(k));
/// <summary>
/// 获取哈希表中字段的数量
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public CSRedisClientPipe<long> HLen(string key) => PipeCommand(key, (c, k) => c.Value.HLen(k));
/// <summary>
/// 获取存储在哈希表中多个字段的值
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="fields">字段</param>
/// <returns></returns>
public CSRedisClientPipe<string[]> HMGet(string key, params string[] fields)
{
if (fields == null || fields.Any() == false) throw new Exception("fields 参数不可为空");
return PipeCommand(key, (c, k) => c.Value.HMGet(k, fields));
}
/// <summary>
/// 获取存储在哈希表中多个字段的值
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="fields">一个或多个字段</param>
/// <returns></returns>
public CSRedisClientPipe<T[]> HMGet<T>(string key, params string[] fields)
{
if (fields == null || fields.Any() == false) throw new Exception("fields 参数不可为空");
return PipeCommand(key, (c, k) => { c.Value.HMGetBytes(k, fields); return default(T[]); }, obj => rds.DeserializeRedisValueArrayInternal<T>((byte[][])obj));
}
/// <summary>
/// 同时将多个 field-value (域-值)对设置到哈希表 key 中
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="keyValues">key1 value1 [key2 value2]</param>
/// <returns></returns>
public CSRedisClientPipe<bool> HMSet(string key, params object[] keyValues)
{
if (keyValues == null || keyValues.Any() == false) throw new Exception("keyValues 参数不可为空");
if (keyValues.Length % 2 != 0) throw new Exception("keyValues 参数是键值对,不应该出现奇数(数量),请检查使用姿势。");
var parms = new List<object>();
for (var a = 0; a < keyValues.Length; a += 2)
{
var k = string.Concat(keyValues[a]);
var v = keyValues[a + 1];
if (string.IsNullOrEmpty(k)) throw new Exception("keyValues 参数是键值对,并且 key 不可为空");
parms.Add(k);
parms.Add(rds.SerializeRedisValueInternal(v));
}
return PipeCommand(key, (c, k) => { c.Value.HMSet(k, parms.ToArray()); return false; }, obj => obj?.ToString() == "OK");
}
/// <summary>
/// 将哈希表 key 中的字段 field 的值设为 value
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="field">字段</param>
/// <param name="value">值</param>
/// <returns>如果字段是哈希表中的一个新建字段,并且值设置成功,返回true。如果哈希表中域字段已经存在且旧值已被新值覆盖,返回false。</returns>
public CSRedisClientPipe<bool> HSet(string key, string field, object value) => PipeCommand(key, (c, k) => c.Value.HSet(k, field, rds.SerializeRedisValueInternal(value)));
/// <summary>
/// 只有在字段 field 不存在时,设置哈希表字段的值
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="field">字段</param>
/// <param name="value">值(string 或 byte[])</param>
/// <returns></returns>
public CSRedisClientPipe<bool> HSetNx(string key, string field, object value) => PipeCommand(key, (c, k) => c.Value.HSetNx(k, field, rds.SerializeRedisValueInternal(value)));
/// <summary>
/// 获取哈希表中所有值
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public CSRedisClientPipe<string[]> HVals(string key) => PipeCommand(key, (c, k) => c.Value.HVals(k));
/// <summary>
/// 获取哈希表中所有值
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public CSRedisClientPipe<T[]> HVals<T>(string key) => PipeCommand(key, (c, k) => { c.Value.HValsBytes(k); return default(T[]); }, obj => rds.DeserializeRedisValueArrayInternal<T>((byte[][])obj));
/// <summary>
/// 迭代哈希表中的键值对
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="cursor">位置</param>
/// <param name="pattern">模式</param>
/// <param name="count">数量</param>
/// <returns></returns>
public CSRedisClientPipe<RedisScan<(string field, string value)>> HScan(string key, long cursor, string pattern = null, long? count = null) =>
PipeCommand(key, (c, k) => { c.Value.HScan(k, cursor, pattern, count); return default(RedisScan<(string, string)>); }, obj =>
{
var scan = (RedisScan<Tuple<string, string>>)obj;
return new RedisScan<(string, string)>(scan.Cursor, scan.Items.Select(z => (z.Item1, z.Item2)).ToArray());
});
/// <summary>
/// 迭代哈希表中的键值对
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="cursor">位置</param>
/// <param name="pattern">模式</param>
/// <param name="count">数量</param>
/// <returns></returns>
public CSRedisClientPipe<RedisScan<(string field, T value)>> HScan<T>(string key, long cursor, string pattern = null, long? count = null) =>
PipeCommand(key, (c, k) => { c.Value.HScanBytes(k, cursor, pattern, count); return default(RedisScan<(string, T)>); }, obj =>
{
var scan = (RedisScan<Tuple<string, byte[]>>)obj;
return new RedisScan<(string, T)>(scan.Cursor, scan.Items.Select(z => (z.Item1, rds.DeserializeRedisValueInternal<T>(z.Item2))).ToArray());
});
#endregion
#region String
/// <summary>
/// 如果 key 已经存在并且是一个字符串, APPEND 命令将指定的 value 追加到该 key 原来值(value)的末尾
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="value">字符串</param>
/// <returns>追加指定值之后, key 中字符串的长度</returns>
public CSRedisClientPipe<long> Append(string key, object value) => PipeCommand(key, (c, k) => c.Value.Append(k, rds.SerializeRedisValueInternal(value)));
/// <summary>
/// 计算给定位置被设置为 1 的比特位的数量
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="start">开始位置</param>
/// <param name="end">结束位置</param>
/// <returns></returns>
public CSRedisClientPipe<long> BitCount(string key, long start, long end) => PipeCommand(key, (c, k) => c.Value.BitCount(k, start, end));
/// <summary>
/// 对一个或多个保存二进制位的字符串 key 进行位元操作,并将结果保存到 destkey 上
/// </summary>
/// <param name="op">And | Or | XOr | Not</param>
/// <param name="destKey">不含prefix前辍</param>
/// <param name="keys">不含prefix前辍</param>
/// <returns>保存到 destkey 的长度,和输入 key 中最长的长度相等</returns>
public CSRedisClientPipe<long> BitOp(RedisBitOp op, string destKey, params string[] keys)
{
if (string.IsNullOrEmpty(destKey)) throw new Exception("destKey 不能为空");
if (keys == null || keys.Length == 0) throw new Exception("keys 不能为空");
if (IsMultiNode) throw new Exception("BitOp 管道命令,在分区模式下不可用");
var prefix = Nodes.First().Value.Prefix;
return PipeCommand(destKey, (c, k) => c.Value.BitOp(op, k, keys.Select(z => prefix + z).ToArray()));
}
/// <summary>
/// 对 key 所储存的值,查找范围内第一个被设置为1或者0的bit位
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="bit">查找值</param>
/// <param name="start">开始位置,-1是最后一个,-2是倒数第二个</param>
/// <param name="end">结果位置,-1是最后一个,-2是倒数第二个</param>
/// <returns>返回范围内第一个被设置为1或者0的bit位</returns>
public CSRedisClientPipe<long> BitPos(string key, bool bit, long? start = null, long? end = null) => PipeCommand(key, (c, k) => c.Value.BitPos(k, bit, start, end));
/// <summary>
/// 获取指定 key 的值
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public CSRedisClientPipe<string> Get(string key) => PipeCommand(key, (c, k) => c.Value.Get(k));
/// <summary>
/// 获取指定 key 的值
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public CSRedisClientPipe<T> Get<T>(string key) => PipeCommand(key, (c, k) => { c.Value.GetBytes(k); return default(T); }, obj => rds.DeserializeRedisValueInternal<T>((byte[])obj));
/// <summary>
/// 对 key 所储存的值,获取指定偏移量上的位(bit)
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="offset">偏移量</param>
/// <returns></returns>
public CSRedisClientPipe<bool> GetBit(string key, uint offset) => PipeCommand(key, (c, k) => c.Value.GetBit(k, offset));
/// <summary>
/// 返回 key 中字符串值的子字符
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="start">开始位置,0表示第一个元素,-1表示最后一个元素</param>
/// <param name="end">结束位置,0表示第一个元素,-1表示最后一个元素</param>
/// <returns></returns>
public CSRedisClientPipe<string> GetRange(string key, long start, long end) => PipeCommand(key, (c, k) => c.Value.GetRange(k, start, end));
/// <summary>
/// 返回 key 中字符串值的子字符
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="start">开始位置,0表示第一个元素,-1表示最后一个元素</param>
/// <param name="end">结束位置,0表示第一个元素,-1表示最后一个元素</param>
/// <returns></returns>
public CSRedisClientPipe<T> GetRange<T>(string key, long start, long end) => PipeCommand(key, (c, k) => { c.Value.GetRangeBytes(k, start, end); return default(T); }, obj => rds.DeserializeRedisValueInternal<T>((byte[])obj));
/// <summary>
/// 将给定 key 的值设为 value ,并返回 key 的旧值(old value)
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="value">字符串</param>
/// <returns></returns>
public CSRedisClientPipe<string> GetSet(string key, object value) => PipeCommand(key, (c, k) => c.Value.GetSet(k, rds.SerializeRedisValueInternal(value)));
/// <summary>
/// 将给定 key 的值设为 value ,并返回 key 的旧值(old value)
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="value">值</param>
/// <returns></returns>
public CSRedisClientPipe<T> GetSet<T>(string key, object value) => PipeCommand(key, (c, k) => { c.Value.GetSetBytes(k, rds.SerializeRedisValueInternal(value)); return default(T); }, obj => rds.DeserializeRedisValueInternal<T>((byte[])obj));
/// <summary>
/// 将 key 所储存的值加上给定的增量值(increment)
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="value">增量值(默认=1)</param>
/// <returns></returns>
public CSRedisClientPipe<long> IncrBy(string key, long value = 1) => PipeCommand(key, (c, k) => c.Value.IncrBy(k, value));
/// <summary>
/// 将 key 所储存的值加上给定的浮点增量值(increment)
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="value">增量值(默认=1)</param>
/// <returns></returns>
public CSRedisClientPipe<decimal> IncrBy(string key, decimal value) => PipeCommand(key, (c, k) => c.Value.IncrByFloat(k, value));
/// <summary>
/// 获取多个指定 key 的值(数组)
/// </summary>
/// <param name="keys">不含prefix前辍</param>
/// <returns></returns>
public CSRedisClientPipe<string[]> MGet(params string[] keys)
{
if (keys == null || keys.Length == 0) throw new Exception("keys 不能为空");
if (IsMultiNode) throw new Exception("MGet 管道命令,在分区模式下不可用");
var prefix = Nodes.First().Value.Prefix;
return PipeCommand(keys.First(), (c, k) => c.Value.MGet(keys.Select(z => prefix + z).ToArray()));
}
/// <summary>
/// 设置指定 key 的值,所有写入参数object都支持string | byte[] | 数值 | 对象
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="value">值</param>
/// <param name="expireSeconds">过期(秒单位)</param>
/// <param name="exists">Nx, Xx</param>
/// <returns></returns>
public CSRedisClientPipe<bool> Set(string key, object value, int expireSeconds = -1, RedisExistence? exists = null)
{
object redisValule = rds.SerializeRedisValueInternal(value);
if (expireSeconds <= 0 && exists == null) return PipeCommand(key, (c, k) => { c.Value.Set(k, redisValule); return false; }, obj => obj?.ToString() == "OK");
if (expireSeconds <= 0 && exists != null) return PipeCommand(key, (c, k) => { c.Value.Set(k, redisValule, null, exists); return false; }, obj => obj?.ToString() == "OK");
if (expireSeconds > 0 && exists == null) return PipeCommand(key, (c, k) => { c.Value.Set(k, redisValule, expireSeconds, null); return false; }, obj => obj?.ToString() == "OK");
if (expireSeconds > 0 && exists != null) return PipeCommand(key, (c, k) => { c.Value.Set(k, redisValule, expireSeconds, exists); return false; }, obj => obj?.ToString() == "OK");
return PipeCommand(key, (c, k) => { c.Value.Set(k, redisValule); return false; }, obj => obj?.ToString() == "OK");
}
public CSRedisClientPipe<bool> Set(string key, object value, TimeSpan expire, RedisExistence? exists = null)
{
object redisValule = rds.SerializeRedisValueInternal(value);
if (expire <= TimeSpan.Zero && exists == null) return PipeCommand(key, (c, k) => { c.Value.Set(k, redisValule); return false; }, obj => obj?.ToString() == "OK");
if (expire <= TimeSpan.Zero && exists != null) return PipeCommand(key, (c, k) => { c.Value.Set(k, redisValule, null, exists); return false; }, obj => obj?.ToString() == "OK");
if (expire > TimeSpan.Zero && exists == null) return PipeCommand(key, (c, k) => { c.Value.Set(k, redisValule, expire, null); return false; }, obj => obj?.ToString() == "OK");
if (expire > TimeSpan.Zero && exists != null) return PipeCommand(key, (c, k) => { c.Value.Set(k, redisValule, expire, exists); return false; }, obj => obj?.ToString() == "OK");
return PipeCommand(key, (c, k) => { c.Value.Set(k, redisValule); return false; }, obj => obj?.ToString() == "OK");
}
/// <summary>
/// 对 key 所储存的字符串值,设置或清除指定偏移量上的位(bit)
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="offset">偏移量</param>
/// <param name="value">值</param>
/// <returns></returns>
public CSRedisClientPipe<bool> SetBit(string key, uint offset, bool value) => PipeCommand(key, (c, k) => c.Value.SetBit(k, offset, value));
/// <summary>
/// 只有在 key 不存在时设置 key 的值
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="value">值</param>
/// <returns></returns>
public CSRedisClientPipe<bool> SetNx(string key, object value) => PipeCommand(key, (c, k) => c.Value.SetNx(k, rds.SerializeRedisValueInternal(value)));
/// <summary>
/// 用 value 参数覆写给定 key 所储存的字符串值,从偏移量 offset 开始
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="offset">偏移量</param>
/// <param name="value">值</param>
/// <returns>被修改后的字符串长度</returns>
public CSRedisClientPipe<long> SetRange(string key, uint offset, object value) => PipeCommand(key, (c, k) => c.Value.SetRange(k, offset, rds.SerializeRedisValueInternal(value)));
/// <summary>
/// 返回 key 所储存的字符串值的长度
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public CSRedisClientPipe<long> StrLen(string key) => PipeCommand(key, (c, k) => c.Value.StrLen(k));
#endregion
#region Key
/// <summary>
/// [redis-server 3.2.1] 修改指定key(s) 最后访问时间 若key不存在,不做操作
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public CSRedisClientPipe<long> Touch(string key) => PipeCommand(key, (c, k) => c.Value.Touch(k));
/// <summary>
/// [redis-server 4.0.0] Delete a key, 该命令和DEL十分相似:删除指定的key(s),若key不存在则该key被跳过。但是,相比DEL会产生阻塞,该命令会在另一个线程中回收内存,因此它是非阻塞的。 这也是该命令名字的由来:仅将keys从keyspace元数据中删除,真正的删除会在后续异步操作。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public CSRedisClientPipe<long> UnLink(string key) => PipeCommand(key, (c, k) => c.Value.UnLink(k));
/// <summary>
/// 用于在 key 存在时删除 key
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public CSRedisClientPipe<long> Del(string key) => PipeCommand(key, (c, k) => c.Value.Del(k));
/// <summary>
/// 序列化给定 key ,并返回被序列化的值
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public CSRedisClientPipe<byte[]> Dump(string key) => PipeCommand(key, (c, k) => c.Value.Dump(k));
/// <summary>
/// 检查给定 key 是否存在
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public CSRedisClientPipe<bool> Exists(string key) => PipeCommand(key, (c, k) => c.Value.Exists(k));
/// <summary>
/// 为给定 key 设置过期时间
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="seconds">过期秒数</param>
/// <returns></returns>
public CSRedisClientPipe<bool> Expire(string key, int seconds) => PipeCommand(key, (c, k) => c.Value.Expire(k, seconds));
/// <summary>
/// 为给定 key 设置过期时间
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="expire">过期时间</param>
/// <returns></returns>
public CSRedisClientPipe<bool> Expire(string key, TimeSpan expire) => PipeCommand(key, (c, k) => c.Value.Expire(k, expire));
/// <summary>
/// 为给定 key 设置过期时间
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="expire">过期时间</param>
/// <returns></returns>
public CSRedisClientPipe<bool> ExpireAt(string key, DateTime expire) => PipeCommand(key, (c, k) => c.Value.ExpireAt(k, expire));
/// <summary>
/// 查找所有分区节点中符合给定模式(pattern)的 key
/// </summary>
/// <param name="pattern">如:runoob*</param>
/// <returns></returns>
public CSRedisClientPipe<string[]> Keys(string pattern)
{
if (IsMultiNode) throw new Exception("SInterStore 管道命令,在分区模式下不可用");
return PipeCommand("Keys", (c, k) => c.Value.Keys(pattern));
}
/// <summary>
/// 将当前数据库的 key 移动到给定的数据库 db 当中
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="database">数据库</param>
/// <returns></returns>
public CSRedisClientPipe<bool> Move(string key, int database) => PipeCommand(key, (c, k) => c.Value.Move(k, database));
/// <summary>
/// 该返回给定 key 锁储存的值所使用的内部表示(representation)
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public CSRedisClientPipe<string> ObjectEncoding(string key) => PipeCommand(key, (c, k) => c.Value.ObjectEncoding(k));
/// <summary>
/// 该返回给定 key 引用所储存的值的次数。此命令主要用于除错
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public CSRedisClientPipe<long?> ObjectRefCount(string key) => PipeCommand(key, (c, k) => c.Value.Object(RedisObjectSubCommand.RefCount, k));
/// <summary>
/// 返回给定 key 自储存以来的空转时间(idle, 没有被读取也没有被写入),以秒为单位
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public CSRedisClientPipe<long?> ObjectIdleTime(string key) => PipeCommand(key, (c, k) => c.Value.Object(RedisObjectSubCommand.IdleTime, k));
/// <summary>
/// 移除 key 的过期时间,key 将持久保持
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public CSRedisClientPipe<bool> Persist(string key) => PipeCommand(key, (c, k) => c.Value.Persist(k));
/// <summary>
/// 为给定 key 设置过期时间(毫秒)
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="milliseconds">过期毫秒数</param>
/// <returns></returns>
public CSRedisClientPipe<bool> PExpire(string key, int milliseconds) => PipeCommand(key, (c, k) => c.Value.PExpire(k, milliseconds));
/// <summary>
/// 为给定 key 设置过期时间(毫秒)
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="expire">过期时间</param>
/// <returns></returns>
public CSRedisClientPipe<bool> PExpire(string key, TimeSpan expire) => PipeCommand(key, (c, k) => c.Value.PExpire(k, expire));
/// <summary>
/// 为给定 key 设置过期时间(毫秒)
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="expire">过期时间</param>
/// <returns></returns>
public CSRedisClientPipe<bool> PExpireAt(string key, DateTime expire) => PipeCommand(key, (c, k) => c.Value.PExpireAt(k, expire));
/// <summary>
/// 以毫秒为单位返回 key 的剩余的过期时间
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public CSRedisClientPipe<long> PTtl(string key) => PipeCommand(key, (c, k) => c.Value.PTtl(k));
/// <summary>
/// 从所有节点中随机返回一个 key
/// </summary>
/// <returns>返回的 key 如果包含 prefix前辍,则会去除后返回</returns>
public CSRedisClientPipe<string> RandomKey()
{
var prefix = "";
return PipeCommand(Guid.NewGuid().ToString(), (c, k) =>
{
c.Value.RandomKey();
prefix = (c.Pool as RedisClientPool).Prefix;
return prefix;
}, obj =>
{
var rk = obj?.ToString();
if (string.IsNullOrEmpty(prefix) == false && rk.StartsWith(prefix)) return rk.Substring(prefix.Length);
return rk;
});
}
/// <summary>
/// 修改 key 的名称
/// </summary>
/// <param name="key">旧名称,不含prefix前辍</param>
/// <param name="newKey">新名称,不含prefix前辍</param>
/// <returns></returns>
public CSRedisClientPipe<bool> Rename(string key, string newKey)
{
if (IsMultiNode) throw new Exception("Rename 管道命令,在分区模式下不可用");
var prefix = Nodes.First().Value.Prefix;
return PipeCommand(key, (c, k) => { c.Value.Rename(k, prefix + newKey); return false; }, obj => obj?.ToString() == "OK");
}
/// <summary>
/// 修改 key 的名称
/// </summary>
/// <param name="key">旧名称,不含prefix前辍</param>
/// <param name="newKey">新名称,不含prefix前辍</param>
/// <returns></returns>
public CSRedisClientPipe<bool> RenameNx(string key, string newKey)
{
if (IsMultiNode) throw new Exception("RenameNx 管道命令,在分区模式下不可用");
var prefix = Nodes.First().Value.Prefix;
return PipeCommand(key, (c, k) => { c.Value.RenameNx(k, prefix + newKey); return false; }, obj => obj?.ToString() == "OK");
}
/// <summary>
/// 反序列化给定的序列化值,并将它和给定的 key 关联
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="serializedValue">序列化值</param>
/// <returns></returns>
public CSRedisClientPipe<bool> Restore(string key, byte[] serializedValue) => PipeCommand(key, (c, k) => { c.Value.Restore(k, 0, serializedValue); return false; }, obj => obj?.ToString() == "OK");
/// <summary>
/// 反序列化给定的序列化值,并将它和给定的 key 关联
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="ttlMilliseconds">毫秒为单位为 key 设置生存时间</param>
/// <param name="serializedValue">序列化值</param>
/// <returns></returns>
public CSRedisClientPipe<bool> Restore(string key, long ttlMilliseconds, byte[] serializedValue) => PipeCommand(key, (c, k) => { c.Value.Restore(k, ttlMilliseconds, serializedValue); return false; }, obj => obj?.ToString() == "OK");
/// <summary>
/// 返回给定列表、集合、有序集合 key 中经过排序的元素,参数资料:http://doc.redisfans.com/key/sort.html
/// </summary>
/// <param name="key">列表、集合、有序集合,不含prefix前辍</param>
/// <param name="offset">偏移量</param>
/// <param name="count">数量</param>
/// <param name="by">排序字段</param>
/// <param name="dir">排序方式</param>
/// <param name="isAlpha">对字符串或数字进行排序</param>
/// <param name="get">根据排序的结果来取出相应的键值</param>
/// <returns></returns>
public CSRedisClientPipe<string[]> Sort(string key, long? count = null, long offset = 0, string by = null, RedisSortDir? dir = null, bool? isAlpha = null, params string[] get)
{
if (IsMultiNode) throw new Exception("Sort 管道命令,在分区模式下不可用");
return PipeCommand(key, (c, k) => c.Value.Sort(k, offset, count, by, dir, isAlpha, get));
}
/// <summary>
/// 保存给定列表、集合、有序集合 key 中经过排序的元素,参数资料:http://doc.redisfans.com/key/sort.html
/// </summary>
/// <param name="key">列表、集合、有序集合,不含prefix前辍</param>
/// <param name="destination">目标key,不含prefix前辍</param>
/// <param name="offset">偏移量</param>
/// <param name="count">数量</param>
/// <param name="by">排序字段</param>
/// <param name="dir">排序方式</param>
/// <param name="isAlpha">对字符串或数字进行排序</param>
/// <param name="get">根据排序的结果来取出相应的键值</param>
/// <returns></returns>
public CSRedisClientPipe<long> SortAndStore(string key, string destination, long? count = null, long offset = 0, string by = null, RedisSortDir? dir = null, bool? isAlpha = null, params string[] get)
{
if (IsMultiNode) throw new Exception("SortAndStore 管道命令,在分区模式下不可用");
return PipeCommand(key, (c, k) => c.Value.SortAndStore(k, (c.Pool as RedisClientPool)?.Prefix + destination, offset, count, by, dir, isAlpha, get));
}
/// <summary>
/// 以秒为单位,返回给定 key 的剩余生存时间
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public CSRedisClientPipe<long> Ttl(string key) => PipeCommand(key, (c, k) => c.Value.Ttl(k));
/// <summary>
/// 返回 key 所储存的值的类型
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public CSRedisClientPipe<KeyType> Type(string key) => PipeCommand(key, (c, k) => { c.Value.Type(k); return KeyType.None; }, obj => Enum.TryParse(obj?.ToString(), true, out KeyType tryenum) ? tryenum : KeyType.None);
/// <summary>
/// 迭代当前数据库中的数据库键
/// </summary>
/// <param name="cursor">位置</param>
/// <param name="pattern">模式</param>
/// <param name="count">数量</param>
/// <returns></returns>
public CSRedisClientPipe<RedisScan<string>> Scan(long cursor, string pattern = null, long? count = null)
{
if (IsMultiNode) throw new Exception("Scan 管道命令,在分区模式下不可用");
return PipeCommand("Scan", (c, k) => c.Value.Scan(cursor, pattern, count));
}
/// <summary>
/// 迭代当前数据库中的数据库键
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="cursor">位置</param>
/// <param name="pattern">模式</param>
/// <param name="count">数量</param>
/// <returns></returns>
public CSRedisClientPipe<RedisScan<T>> Scan<T>(string key, long cursor, string pattern = null, long? count = null)
{
if (IsMultiNode) throw new Exception("Scan<T> 管道命令,在分区模式下不可用");
return PipeCommand("Scan<T>", (c, k) =>
{
c.Value.ScanBytes(cursor, pattern, count); return default(RedisScan<T>);
}, obj =>
{
var scan = (RedisScan<byte[]>)obj;
return new RedisScan<T>(scan.Cursor, rds.DeserializeRedisValueArrayInternal<T>(scan.Items));
});
}
#endregion
}
}
|
2881099/csredis | 1,816 | src/CSRedisCore/Exceptions.cs | using System;
namespace CSRedis
{
/// <summary>
/// Represents a Redis server error reply
/// </summary>
public class RedisException : RedisClientException
{
/// <summary>
/// Instantiate a new instance of the RedisException class
/// </summary>
/// <param name="message">Server response</param>
public RedisException(string message)
: base(message)
{ }
public RedisException(string message, Exception inner)
: base(message, inner)
{ }
}
/// <summary>
/// The exception that is thrown when an unexpected value is found in a Redis request or response
/// </summary>
public class RedisProtocolException : RedisClientException
{
/// <summary>
/// Instantiate a new instance of the RedisProtocolException class
/// </summary>
/// <param name="message">Protocol violoation message</param>
public RedisProtocolException(string message)
: base(message)
{ }
}
/// <summary>
/// Exception thrown by RedisClient
/// </summary>
public class RedisClientException : Exception
{
/// <summary>
/// Instantiate a new instance of the RedisClientException class
/// </summary>
/// <param name="message">Exception message</param>
public RedisClientException(string message)
: base(message)
{ }
/// <summary>
/// Instantiate a new instance of the RedisClientException class
/// </summary>
/// <param name="message">Exception message</param>
/// <param name="inner">Inner exception</param>
public RedisClientException(string message, Exception inner)
: base(message, inner)
{ }
}
}
|
2881099/csredis | 2,411 | src/CSRedisCore/Events.cs | using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
namespace CSRedis
{
/// <summary>
/// Provides data for the event that is raised when a subscription message is received
/// </summary>
public class RedisSubscriptionReceivedEventArgs : EventArgs
{
/// <summary>
/// The subscription message
/// </summary>
public RedisSubscriptionMessage Message { get; private set; }
internal RedisSubscriptionReceivedEventArgs(RedisSubscriptionMessage message)
{
Message = message;
}
}
/// <summary>
/// Provides data for the event that is raised when a subscription channel is opened or closed
/// </summary>
public class RedisSubscriptionChangedEventArgs : EventArgs
{
/// <summary>
/// The subscription response
/// </summary>
public RedisSubscriptionChannel Response { get; private set; }
internal RedisSubscriptionChangedEventArgs(RedisSubscriptionChannel response)
{
Response = response;
}
}
/// <summary>
/// Provides data for the event that is raised when a transaction command has been processed by the server
/// </summary>
public class RedisTransactionQueuedEventArgs : EventArgs
{
/// <summary>
/// The status code of the transaction command
/// </summary>
public string Status { get; private set; }
/// <summary>
/// The command that was queued
/// </summary>
public string Command { get; private set; }
/// <summary>
/// The arguments of the queued command
/// </summary>
public object[] Arguments { get; private set; }
internal RedisTransactionQueuedEventArgs(string status, string command, object[] arguments)
{
Status = status;
Command = command;
Arguments = arguments;
}
}
/// <summary>
/// Provides data for the event that is raised when a Redis MONITOR message is received
/// </summary>
public class RedisMonitorEventArgs : EventArgs
{
/// <summary>
/// Monitor output
/// </summary>
public object Message { get; private set; }
internal RedisMonitorEventArgs(object message)
{
Message = message;
}
}
}
|
2881099/csredis | 21,208 | src/CSRedisCore/Types.cs |
using System;
using System.Runtime.Serialization;
namespace CSRedis
{
/// <summary>
/// Sub-command used by Redis OBJECT command
/// </summary>
public enum RedisObjectSubCommand
{
/// <summary>
/// Return the number of references of the value associated with the specified key
/// </summary>
RefCount,
/// <summary>
/// Return the number of seconds since the object stored at the specified key is idle
/// </summary>
IdleTime,
};
/// <summary>
/// Sort direction used by Redis SORT command
/// </summary>
public enum RedisSortDir
{
/// <summary>
/// Sort ascending (a-z)
/// </summary>
Asc,
/// <summary>
/// Sort descending (z-a)
/// </summary>
Desc,
}
/// <summary>
/// Insert position used by Redis LINSERT command
/// </summary>
public enum RedisInsert
{
/// <summary>
/// Insert before pivot element
/// </summary>
Before,
/// <summary>
/// Insert after pivot element
/// </summary>
After,
}
/// <summary>
/// Operation used by Redis BITOP command
/// </summary>
public enum RedisBitOp
{
/// <summary>
/// Bitwise AND
/// </summary>
And,
/// <summary>
/// Bitwise OR
/// </summary>
Or,
/// <summary>
/// Bitwise EXCLUSIVE-OR
/// </summary>
XOr,
/// <summary>
/// Bitwise NOT
/// </summary>
Not,
}
/// <summary>
/// Aggregation function used by Reids set operations
/// </summary>
public enum RedisAggregate
{
/// <summary>
/// Aggregate SUM
/// </summary>
Sum,
/// <summary>
/// Aggregate MIN
/// </summary>
Min,
/// <summary>
/// Aggregate MAX
/// </summary>
Max,
}
/// <summary>
/// Redis unified message prefix
/// </summary>
public enum RedisMessage
{
/// <summary>
/// Error message
/// </summary>
Error = '-',
/// <summary>
/// Status message
/// </summary>
Status = '+',
/// <summary>
/// Bulk message
/// </summary>
Bulk = '$',
/// <summary>
/// Multi bulk message
/// </summary>
MultiBulk = '*',
/// <summary>
/// Int message
/// </summary>
Int = ':',
}
/// <summary>
/// Redis subscription response type
/// </summary>
public enum RedisSubscriptionResponseType
{
/// <summary>
/// Channel subscribed
/// </summary>
Subscribe,
/// <summary>
/// Message published
/// </summary>
Message,
/// <summary>
/// Channel unsubscribed
/// </summary>
Unsubscribe,
/// <summary>
/// Channel pattern subscribed
/// </summary>
PSubscribe,
/// <summary>
/// Message published to channel pattern
/// </summary>
PMessage,
/// <summary>
/// Channel pattern unsubsribed
/// </summary>
PUnsubscribe,
}
/// <summary>
/// Redis existence specification for SET command
/// </summary>
public enum RedisExistence
{
/// <summary>
/// Only set the key if it does not already exist
/// </summary>
Nx,
/// <summary>
/// Only set the key if it already exists
/// </summary>
Xx,
}
/// <summary>
/// Base class for Redis role information
/// </summary>
public abstract class RedisRole
{
readonly string _roleName;
/// <summary>
/// Get the role type
/// </summary>
public string RoleName { get { return _roleName; } }
internal RedisRole(string roleName)
{
_roleName = roleName;
}
}
/// <summary>
/// Represents information on the Redis master role
/// </summary>
public class RedisMasterRole : RedisRole
{
readonly long _replicationOffset;
readonly Tuple<string, int, long>[] _slaves;
/// <summary>
/// Get the master replication offset
/// </summary>
public long ReplicationOffset { get { return _replicationOffset; } }
/// <summary>
/// Get the slaves associated with the current master
/// </summary>
public Tuple<string, int, long>[] Slaves { get { return _slaves; } }
internal RedisMasterRole(string role, long replicationOffset, Tuple<string, int, long>[] slaves)
: base(role)
{
_replicationOffset = replicationOffset;
_slaves = slaves;
}
}
/// <summary>
/// Represents information on the Redis slave role
/// </summary>
public class RedisSlaveRole : RedisRole
{
readonly string _masterIp;
readonly int _masterPort;
readonly string _replicationState;
readonly long _dataReceived;
/// <summary>
/// Get the IP address of the master node
/// </summary>
public string MasterIp { get { return _masterIp; } }
/// <summary>
/// Get the port of the master node
/// </summary>
public int MasterPort { get { return _masterPort; } }
/// <summary>
/// Get the replication state
/// </summary>
public string ReplicationState { get { return _replicationState; } }
/// <summary>
/// Get the number of bytes received
/// </summary>
public long DataReceived { get { return _dataReceived; } }
internal RedisSlaveRole(string role, string masterIp, int masterPort, string replicationState, long dataReceived)
: base(role)
{
_masterIp = masterIp;
_masterPort = masterPort;
_replicationState = replicationState;
_dataReceived = dataReceived;
}
}
/// <summary>
/// Represents information on the Redis sentinel role
/// </summary>
public class RedisSentinelRole : RedisRole
{
readonly string[] _masters;
/// <summary>
/// Get the masters known to the current Sentinel
/// </summary>
public string[] Masters { get { return _masters; } }
internal RedisSentinelRole(string role, string[] masters)
: base(role)
{
_masters = masters;
}
}
/// <summary>
/// Represents the result of a Redis SCAN or SSCAN operation
/// </summary>
public class RedisScan<T>
{
readonly long _cursor;
readonly T[] _items;
/// <summary>
/// Updated cursor that should be used as the cursor argument in the next call
/// </summary>
public long Cursor { get { return _cursor; } }
/// <summary>
/// Collection of elements returned by the SCAN operation
/// </summary>
public T[] Items { get { return _items; } }
internal RedisScan(long cursor, T[] items)
{
_cursor = cursor;
_items = items;
}
}
/// <summary>
/// Represents a Redis subscription response
/// </summary>
public class RedisSubscriptionResponse
{
readonly string _channel;
readonly string _pattern;
readonly string _type;
/// <summary>
/// Get the subscription channel name
/// </summary>
public string Channel { get { return _channel; } }
/// <summary>
/// Get the subscription pattern
/// </summary>
public string Pattern { get { return _pattern; } }
/// <summary>
/// Get the message type
/// </summary>
public string Type { get { return _type; } }
internal RedisSubscriptionResponse(string type, string channel, string pattern)
{
_type = type;
_channel = channel;
_pattern = pattern;
}
}
/// <summary>
/// Represents a Redis subscription channel
/// </summary>
public class RedisSubscriptionChannel : RedisSubscriptionResponse
{
readonly long _count;
/// <summary>
/// Get the count of active subscriptions
/// </summary>
public long Count { get { return _count; } }
internal RedisSubscriptionChannel(string type, string channel, string pattern, long count)
: base(type, channel, pattern)
{
_count = count;
}
}
/// <summary>
/// Represents a Redis subscription message
/// </summary>
public class RedisSubscriptionMessage : RedisSubscriptionResponse
{
readonly string _body;
/// <summary>
/// Get the subscription message
/// </summary>
public string Body { get { return _body; } }
internal RedisSubscriptionMessage(string type, string channel, string body)
: base(type, channel, null)
{
_body = body;
}
internal RedisSubscriptionMessage(string type, string pattern, string channel, string body)
: base(type, channel, pattern)
{
_body = body;
}
}
/// <summary>
/// Base class for Redis server-info objects reported by Sentinel
/// </summary>
public abstract class RedisServerInfo : ISerializable
{
/// <summary>
/// Create new RedisServerInfo via deserialization
/// </summary>
/// <param name="info">Serialization info</param>
/// <param name="context">Serialization context</param>
public RedisServerInfo(SerializationInfo info, StreamingContext context)
{
Name = info.GetString("name");
Ip = info.GetString("ip");
Port = info.GetInt32("port");
RunId = info.GetString("runid");
Flags = info.GetString("flags").Split(',');
//PendingCommands = info.GetInt64("pending-commands");
//LastOkPingReply = info.GetInt64("last-ok-ping-reply");
//LastPingReply = info.GetInt64("last-ping-reply");
//DownAfterMilliseconds = info.GetInt64("down-after-milliseconds");
}
/// <summary>
/// Get or set Redis server name
/// </summary>
public string Name { get; set; }
/// <summary>
/// Get or set Redis server IP
/// </summary>
public string Ip { get; set; }
/// <summary>
/// Get or set Redis server port
/// </summary>
public int Port { get; set; }
/// <summary>
/// Get or set Redis server run ID
/// </summary>
public string RunId { get; set; }
/// <summary>
/// Get or set Redis server flags
/// </summary>
public string[] Flags { get; set; }
/// <summary>
/// Get or set number of pending Redis server commands
/// </summary>
public long PendingCommands { get; set; }
/// <summary>
/// Get or set last ping sent
/// </summary>
public long LastPingSent { get; set; }
/// <summary>
/// Get or set milliseconds since last successful ping reply
/// </summary>
public long LastOkPingReply { get; set; }
/// <summary>
/// Get or set milliseconds since last ping reply
/// </summary>
public long LastPingReply { get; set; }
/// <summary>
/// Get or set down after milliseconds
/// </summary>
public long DownAfterMilliseconds { get; set; }
/// <summary>
/// Not implemented
/// </summary>
/// <param name="info">info</param>
/// <param name="context">info</param>
public void GetObjectData(SerializationInfo info, StreamingContext context)
{
throw new NotImplementedException();
}
}
/// <summary>
/// Base class for Redis master/slave objects reported by Sentinel
/// </summary>
public abstract class RedisMasterSlaveInfo : RedisServerInfo
{
/// <summary>
/// Create new RedisMasterSlaveInfo via deserialization
/// </summary>
/// <param name="info">Serialization info</param>
/// <param name="context">Serialization context</param>
public RedisMasterSlaveInfo(SerializationInfo info, StreamingContext context)
: base(info, context)
{
InfoRefresh = info.GetInt64("info-refresh");
RoleReported = info.GetString("role-reported");
RoleReportedTime = info.GetInt64("role-reported-time");
}
/// <summary>
/// Get or set info refresh
/// </summary>
public long InfoRefresh { get; set; }
/// <summary>
/// Get or set role reported
/// </summary>
public string RoleReported { get; set; }
/// <summary>
/// Get or set role reported time
/// </summary>
public long RoleReportedTime { get; set; }
}
/// <summary>
/// Represents a Redis master node as reported by a Redis Sentinel
/// </summary>
public class RedisMasterInfo : RedisMasterSlaveInfo
{
/// <summary>
/// Create new RedisMasterInfo via deserialization
/// </summary>
/// <param name="info">Serialization info</param>
/// <param name="context">Serialization context</param>
public RedisMasterInfo(SerializationInfo info, StreamingContext context)
: base(info, context)
{
ConfigEpoch = info.GetInt64("config-epoch");
NumSlaves = info.GetInt64("num-slaves");
NumOtherSentinels = info.GetInt64("num-other-sentinels");
Quorum = info.GetInt64("quorum");
FailoverTimeout = info.GetInt64("failover-timeout");
ParallelSyncs = info.GetInt64("parallel-syncs");
}
/// <summary>
/// Get or set the config epoch
/// </summary>
public long ConfigEpoch { get; set; }
/// <summary>
/// Get or set number of slaves of the current master node
/// </summary>
public long NumSlaves { get; set; }
/// <summary>
/// Get or set number of other Sentinels
/// </summary>
public long NumOtherSentinels { get; set; }
/// <summary>
/// Get or set Sentinel quorum count
/// </summary>
public long Quorum { get; set; }
/// <summary>
/// Get or set the failover timeout
/// </summary>
public long FailoverTimeout { get; set; }
/// <summary>
/// Get or set the parallel syncs
/// </summary>
public long ParallelSyncs { get; set; }
}
/// <summary>
/// Represents a Redis slave node as reported by a Redis Setinel
/// </summary>
public class RedisSlaveInfo : RedisMasterSlaveInfo
{
/// <summary>
/// Create new RedisSlaveInfo via deserialization
/// </summary>
/// <param name="info">Serialization info</param>
/// <param name="context">Serialization context</param>
public RedisSlaveInfo(SerializationInfo info, StreamingContext context)
: base(info, context)
{
MasterLinkDownTime = info.GetInt64("master-link-down-time");
MasterLinkStatus = info.GetString("master-link-status");
MasterHost = info.GetString("master-host");
MasterPort = info.GetInt32("master-port");
SlavePriority = info.GetInt64("slave-priority");
SlaveReplOffset = info.GetInt64("slave-repl-offset");
}
/// <summary>
/// Get or set the master link down time
/// </summary>
public long MasterLinkDownTime { get; set; }
/// <summary>
/// Get or set status of master link
/// </summary>
public string MasterLinkStatus { get; set; }
/// <summary>
/// Get or set the master host of the current Redis slave node
/// </summary>
public string MasterHost { get; set; }
/// <summary>
/// Get or set the master port of the current Redis slave node
/// </summary>
public int MasterPort { get; set; }
/// <summary>
/// Get or set the priority of the current Redis slave node
/// </summary>
public long SlavePriority { get; set; }
/// <summary>
/// Get or set the slave replication offset
/// </summary>
public long SlaveReplOffset { get; set; }
}
/// <summary>
/// Represents a Redis Sentinel node as reported by a Redis Sentinel
/// </summary>
public class RedisSentinelInfo : RedisServerInfo
{
/// <summary>
/// Create new RedisSentinelInfo via deserialization
/// </summary>
/// <param name="info">Serialization info</param>
/// <param name="context">Serialization context</param>
public RedisSentinelInfo(SerializationInfo info, StreamingContext context)
: base(info, context)
{
SDownTime = this.GetSerializationItemValue<long>(info, "s-down-time");
LastHelloMessage = this.GetSerializationItemValue<long>(info, "last-hello-message");
VotedLeader = this.GetSerializationItemValue<string>(info, "voted-leader");
VotedLeaderEpoch = this.GetSerializationItemValue<long>(info, "voted-leader-epoch");
if (SDownTime == 0) SDownTime = -1;
if (LastHelloMessage == 0) LastHelloMessage = -1;
if (VotedLeaderEpoch == 0) VotedLeaderEpoch = -1;
}
/// <summary>
/// Get a value from an instance of the SerializationInfo
/// </summary>
/// <typeparam name="T"></typeparam>
/// <param name="info"></param>
/// <param name="key"></param>
/// <returns></returns>
private T GetSerializationItemValue<T>(SerializationInfo info, string key)
{
foreach (SerializationEntry entry in info)
{
if (entry.Name == key)
{
return (T)Convert.ChangeType(entry.Value, typeof(T), System.Globalization.CultureInfo.InvariantCulture);
}
}
return default(T);
}
/// <summary>
/// Get or set the subjective down time
/// </summary>
public long SDownTime { get; set; }
/// <summary>
/// Get or set milliseconds(?) since last hello message from current Sentinel node
/// </summary>
public long LastHelloMessage { get; set; }
/// <summary>
/// Get or set the voted-leader value
/// </summary>
public string VotedLeader { get; set; }
/// <summary>
/// Get or set the voted-leader epoch
/// </summary>
public long VotedLeaderEpoch { get; set; }
}
/// <summary>
/// Represents an entry from the Redis slow log
/// </summary>
public class RedisSlowLogEntry
{
readonly long _id;
readonly DateTime _date;
readonly TimeSpan _latency;
readonly string[] _arguments;
/// <summary>
/// Get the entry ID
/// </summary>
public long Id { get { return _id; } }
/// <summary>
/// Get the entry date
/// </summary>
public DateTime Date { get { return _date; } }
/// <summary>
/// Get the entry latency
/// </summary>
public TimeSpan Latency { get { return _latency; } }
/// <summary>
/// Get the entry arguments
/// </summary>
public string[] Arguments { get { return _arguments; } }
internal RedisSlowLogEntry(long id, DateTime date, TimeSpan latency, string[] arguments)
{
_id = id;
_date = date;
_latency = latency;
_arguments = arguments;
}
}
/// <summary>
/// Represents state as reported by Sentinel
/// </summary>
public class RedisMasterState
{
readonly long _downState;
readonly string _leader;
readonly long _voteEpoch;
/// <summary>
/// Get the master down state
/// </summary>
public long DownState { get { return _downState; } }
/// <summary>
/// Get the leader
/// </summary>
public string Leader { get { return _leader; } }
/// <summary>
/// Get the vote epoch
/// </summary>
public long VoteEpoch { get { return _voteEpoch; } }
internal RedisMasterState(long downState, string leader, long voteEpoch)
{
_downState = downState;
_leader = leader;
_voteEpoch = voteEpoch;
}
}
}
|
2881099/csredis | 9,219 | src/CSRedisCore/RedisSentinelManager.cs | using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.IO;
// http://redis.io/topics/sentinel-clients
namespace CSRedis
{
/// <summary>
/// 哨兵主机转换委托
/// </summary>
/// <param name="master">哨兵返回的主机信息</param>
/// <returns>客户端可连接的主机信息</returns>
public delegate Tuple<string, int> SentinelMasterConverter(Tuple<string, int> master);
/// <summary>
/// Represents a managed connection to a Redis master instance via a set of Redis sentinel nodes
/// </summary>
public class RedisSentinelManager : IDisposable
{
const int DefaultPort = 26379;
readonly LinkedList<Tuple<string, int>> _sentinels;
string _masterName;
int _connectTimeout;
RedisClient _redisClient;
bool _readOnly;
/// <summary>
/// Occurs when the master connection has sucessfully connected
/// </summary>
public event EventHandler Connected;
/// <summary>
/// Create a new RedisSentinenlManager
/// </summary>
/// <param name="sentinels">Sentinel addresses (host:ip)</param>
public RedisSentinelManager(bool readOnly, params string[] sentinels)
{
_readOnly = readOnly;
_sentinels = new LinkedList<Tuple<string, int>>();
foreach (var host in sentinels)
{
string[] parts = host.Split(':');
string hostname = parts[0].Trim();
int port = Int32.Parse(parts[1]);
Add(hostname, port);
}
}
/// <summary>
/// Add a new sentinel host using default port
/// </summary>
/// <param name="host">Sentinel hostname</param>
public void Add(string host)
{
Add(host, DefaultPort);
}
/// <summary>
/// Add a new sentinel host
/// </summary>
/// <param name="host">Sentinel hostname</param>
/// <param name="port">Sentinel port</param>
public void Add(string host, int port)
{
foreach (var sentinel in _sentinels)
{
if (sentinel.Item1 == host && sentinel.Item2 == port)
return;
}
_sentinels.AddLast(Tuple.Create(host, port));
}
/// <summary>
/// Obtain connection to the specified master node
/// </summary>
/// <param name="masterName">Name of Redis master</param>
/// <param name="timeout">Connection timeout (milliseconds)</param>
/// <returns>host:port of Master server that responded</returns>
public string Connect(string masterName, int timeout = 200)
{
_masterName = masterName;
_connectTimeout = timeout;
if (_readOnly == false)
{
string masterEndPoint = SetMaster(masterName, timeout);
if (masterEndPoint == null)
throw new IOException("Could not connect to sentinel or master");
_redisClient.ReconnectAttempts = 0;
return masterEndPoint;
}
string slaveEndPoint = SetSlave(masterName, timeout);
if (slaveEndPoint == null)
throw new IOException("Could not connect to sentinel or slave");
_redisClient.ReconnectAttempts = 0;
return slaveEndPoint;
}
/// <summary>
/// Execute command against the master, reconnecting if necessary
/// </summary>
/// <typeparam name="T">Command return type</typeparam>
/// <param name="redisAction">Command to execute</param>
/// <returns>Command result</returns>
public T Call<T>(Func<RedisClient, T> redisAction)
{
if (_masterName == null)
throw new InvalidOperationException("Master not set");
try
{
return redisAction(_redisClient);
}
catch (IOException)
{
Next();
Connect(_masterName, _connectTimeout);
return Call(redisAction);
}
}
/// <summary>
/// Release resources held by the current RedisSentinelManager
/// </summary>
public void Dispose()
{
if (_redisClient != null)
_redisClient.Dispose();
}
/// <summary>
/// 哨兵主机转换委托
/// </summary>
/// <value>客户端可识别的主机转换委托</value>
public SentinelMasterConverter SentinelMasterConverter { get; set; }
string SetMaster(string name, int timeout)
{
for (int i = 0; i < _sentinels.Count; i++)
{
if (i > 0)
Next();
using (var sentinel = Current())
{
try
{
if (!sentinel.Connect(timeout))
continue;
}
catch (Exception)
{
continue;
}
var master = sentinel.GetMasterAddrByName(name);
if (master == null)
continue;
if (_redisClient != null)
_redisClient.Dispose();
if (SentinelMasterConverter != null)
master = SentinelMasterConverter(master);
_redisClient = new RedisClient(master.Item1, master.Item2);
_redisClient.Connected += OnConnectionConnected;
try
{
if (!_redisClient.Connect(timeout))
continue;
var role = _redisClient.Role();
if (role.RoleName != "master")
continue;
//测试 write
var testid = Guid.NewGuid().ToString("N");
_redisClient.StartPipe();
_redisClient.Set(testid, 1);
_redisClient.Del(testid);
_redisClient.EndPipe();
foreach (var remoteSentinel in sentinel.Sentinels(name))
Add(remoteSentinel.Ip, remoteSentinel.Port);
}
catch (Exception ex)
{
Trace.WriteLine(ex.Message);
Console.WriteLine(ex.Message);
continue;
}
return master.Item1 + ':' + master.Item2;
}
}
return null;
}
string SetSlave(string name, int timeout)
{
for (int i = 0; i < _sentinels.Count; i++)
{
if (i > 0)
Next();
using (var sentinel = Current())
{
try
{
if (!sentinel.Connect(timeout))
continue;
}
catch (Exception)
{
continue;
}
var slaves = sentinel.Slaves(name);
if (slaves == null)
continue;
foreach (var slave in slaves)
{
if (_redisClient != null)
_redisClient.Dispose();
_redisClient = new RedisClient(slave.Ip, slave.Port);
_redisClient.Connected += OnConnectionConnected;
try
{
if (!_redisClient.Connect(timeout))
continue;
var role = _redisClient.Role();
if (role.RoleName != "slave")
continue;
foreach (var remoteSentinel in sentinel.Sentinels(name))
Add(remoteSentinel.Ip, remoteSentinel.Port);
}
catch (Exception ex)
{
Trace.WriteLine(ex.Message);
Console.WriteLine(ex.Message);
continue;
}
return slave.Ip + ':' + slave.Port;
}
}
}
return null;
}
RedisSentinelClient Current()
{
return new RedisSentinelClient(_sentinels.First.Value.Item1, _sentinels.First.Value.Item2);
}
void Next()
{
var first = _sentinels.First;
_sentinels.RemoveFirst();
_sentinels.AddLast(first.Value);
}
void OnConnectionConnected(object sender, EventArgs args)
{
if (Connected != null)
Connected(this, new EventArgs());
}
}
}
|
27182812/ChatGLM-LLaMA-chinese-insturct | 10,997 | src/transformers/models/deberta_v2/tokenization_deberta_v2_fast.py | # coding=utf-8
# Copyright 2020 Microsoft and the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast Tokenization class for model DeBERTa."""
import os
from shutil import copyfile
from typing import Optional, Tuple
from ...file_utils import is_sentencepiece_available
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if is_sentencepiece_available():
from .tokenization_deberta_v2 import DebertaV2Tokenizer
else:
DebertaV2Tokenizer = None
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "spm.model", "tokenizer_file": "tokenizer.json"}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"microsoft/deberta-v2-xlarge": "https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/spm.model",
"microsoft/deberta-v2-xxlarge": "https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/spm.model",
"microsoft/deberta-v2-xlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/spm.model"
),
"microsoft/deberta-v2-xxlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/spm.model"
),
}
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"microsoft/deberta-v2-xlarge": 512,
"microsoft/deberta-v2-xxlarge": 512,
"microsoft/deberta-v2-xlarge-mnli": 512,
"microsoft/deberta-v2-xxlarge-mnli": 512,
}
PRETRAINED_INIT_CONFIGURATION = {
"microsoft/deberta-v2-xlarge": {"do_lower_case": False},
"microsoft/deberta-v2-xxlarge": {"do_lower_case": False},
"microsoft/deberta-v2-xlarge-mnli": {"do_lower_case": False},
"microsoft/deberta-v2-xxlarge-mnli": {"do_lower_case": False},
}
class DebertaV2TokenizerFast(PreTrainedTokenizerFast):
r"""
Constructs a DeBERTa-v2 fast tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece).
Args:
vocab_file (`str`):
[SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
contains the vocabulary necessary to instantiate a tokenizer.
do_lower_case (`bool`, *optional*, defaults to `False`):
Whether or not to lowercase the input when tokenizing.
bos_token (`string`, *optional*, defaults to `"[CLS]"`):
The beginning of sequence token that was used during pre-training. Can be used a sequence classifier token.
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the `cls_token`.
eos_token (`string`, *optional*, defaults to `"[SEP]"`):
The end of sequence token. When building a sequence using special tokens, this is not the token that is
used for the end of sequence. The token used is the `sep_token`.
unk_token (`str`, *optional*, defaults to `"[UNK]"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
sep_token (`str`, *optional*, defaults to `"[SEP]"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
pad_token (`str`, *optional*, defaults to `"[PAD]"`):
The token used for padding, for example when batching sequences of different lengths.
cls_token (`str`, *optional*, defaults to `"[CLS]"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
mask_token (`str`, *optional*, defaults to `"[MASK]"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
sp_model_kwargs (`dict`, *optional*):
Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
to set:
- `enable_sampling`: Enable subword regularization.
- `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
- `nbest_size = {0,1}`: No sampling is performed.
- `nbest_size > 1`: samples from the nbest_size results.
- `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
using forward-filtering-and-backward-sampling algorithm.
- `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
BPE-dropout.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
slow_tokenizer_class = DebertaV2Tokenizer
def __init__(
self,
vocab_file=None,
tokenizer_file=None,
do_lower_case=False,
split_by_punct=False,
bos_token="[CLS]",
eos_token="[SEP]",
unk_token="[UNK]",
sep_token="[SEP]",
pad_token="[PAD]",
cls_token="[CLS]",
mask_token="[MASK]",
**kwargs,
) -> None:
super().__init__(
vocab_file,
tokenizer_file=tokenizer_file,
do_lower_case=do_lower_case,
bos_token=bos_token,
eos_token=eos_token,
unk_token=unk_token,
sep_token=sep_token,
pad_token=pad_token,
cls_token=cls_token,
mask_token=mask_token,
split_by_punct=split_by_punct,
**kwargs,
)
self.do_lower_case = do_lower_case
self.split_by_punct = split_by_punct
self.vocab_file = vocab_file
self.can_save_slow_tokenizer = False if not self.vocab_file else True
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A DeBERTa sequence has the following format:
- single sequence: [CLS] X [SEP]
- pair of sequences: [CLS] A [SEP] B [SEP]
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return cls + token_ids_0 + sep + token_ids_1 + sep
def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False):
"""
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
)
if token_ids_1 is not None:
return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
return [1] + ([0] * len(token_ids_0)) + [1]
def create_token_type_ids_from_sequences(self, token_ids_0, token_ids_1=None):
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. A DeBERTa
sequence pair mask has the following format:
```
0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
| first sequence | second sequence |
```
If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer."
)
if not os.path.isdir(save_directory):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
out_vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
copyfile(self.vocab_file, out_vocab_file)
return (out_vocab_file,)
|
2881099/csredis | 1,566 | src/CSRedisCore/CSRedisCore.csproj | <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<TargetFrameworks>netstandard2.0;net45;net40</TargetFrameworks>
<AssemblyName>CSRedisCore</AssemblyName>
<PackageId>CSRedisCore</PackageId>
<RootNamespace>CSRedisCore</RootNamespace>
<Version>3.8.806</Version>
<GeneratePackageOnBuild>true</GeneratePackageOnBuild>
<PackageProjectUrl>https://github.com/2881099/csredis</PackageProjectUrl>
<Description>CSRedis 是 redis.io 官方推荐库,支持 redis-trib集群、哨兵、私有分区与连接池管理技术,简易 RedisHelper 静态类。</Description>
<RepositoryUrl>https://github.com/2881099/csredis</RepositoryUrl>
<PackageTags>csredis redis-trib cluster redis c# 缓存壳 集群 哨兵 分区 负载</PackageTags>
<RepositoryType>git</RepositoryType>
<PackageLicenseExpression>MIT</PackageLicenseExpression>
<Title>$(AssemblyName)</Title>
<IsPackable>true</IsPackable>
<GenerateAssemblyInfo>true</GenerateAssemblyInfo>
<SignAssembly>true</SignAssembly>
<AssemblyOriginatorKeyFile>key.snk</AssemblyOriginatorKeyFile>
<DelaySign>false</DelaySign>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|AnyCPU'">
<DocumentationFile>bin\Debug\netstandard2.0\CSRedisCore.xml</DocumentationFile>
<WarningLevel>3</WarningLevel>
<NoWarn>1701;1702;1591</NoWarn>
</PropertyGroup>
<ItemGroup>
<PackageReference Include="Newtonsoft.Json" Version="13.0.1" />
<PackageReference Include="System.ValueTuple" Version="4.5.0" />
</ItemGroup>
<PropertyGroup Condition="'$(TargetFramework)' == 'net40'">
<DefineConstants>net40</DefineConstants>
</PropertyGroup>
</Project>
|
27182812/ChatGLM-LLaMA-chinese-insturct | 9,175 | src/transformers/models/deberta_v2/configuration_deberta_v2.py | # coding=utf-8
# Copyright 2020, Microsoft and the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" DeBERTa-v2 model configuration"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
logger = logging.get_logger(__name__)
DEBERTA_V2_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"microsoft/deberta-v2-xlarge": "https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json",
"microsoft/deberta-v2-xxlarge": "https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json",
"microsoft/deberta-v2-xlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"
),
"microsoft/deberta-v2-xxlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"
),
}
class DebertaV2Config(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`DebertaV2Model`]. It is used to instantiate a
DeBERTa-v2 model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the DeBERTa
[microsoft/deberta-v2-xlarge](https://huggingface.co/microsoft/deberta-v2-xlarge) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Arguments:
vocab_size (`int`, *optional*, defaults to 128100):
Vocabulary size of the DeBERTa-v2 model. Defines the number of different tokens that can be represented by
the `inputs_ids` passed when calling [`DebertaV2Model`].
hidden_size (`int`, *optional*, defaults to 1536):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 24):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 24):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 6144):
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"`, `"gelu"`, `"tanh"`, `"gelu_fast"`, `"mish"`, `"linear"`, `"sigmoid"` and `"gelu_new"`
are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (`int`, *optional*, defaults to 0):
The vocabulary size of the `token_type_ids` passed when calling [`DebertaModel`] or [`TFDebertaModel`].
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-7):
The epsilon used by the layer normalization layers.
relative_attention (`bool`, *optional*, defaults to `True`):
Whether use relative position encoding.
max_relative_positions (`int`, *optional*, defaults to -1):
The range of relative positions `[-max_position_embeddings, max_position_embeddings]`. Use the same value
as `max_position_embeddings`.
pad_token_id (`int`, *optional*, defaults to 0):
The value used to pad input_ids.
position_biased_input (`bool`, *optional*, defaults to `False`):
Whether add absolute position embedding to content embedding.
pos_att_type (`List[str]`, *optional*):
The type of relative position attention, it can be a combination of `["p2c", "c2p"]`, e.g. `["p2c"]`,
`["p2c", "c2p"]`, `["p2c", "c2p"]`.
layer_norm_eps (`float`, optional, defaults to 1e-12):
The epsilon used by the layer normalization layers.
Example:
```python
>>> from transformers import DebertaV2Config, DebertaV2Model
>>> # Initializing a DeBERTa-v2 microsoft/deberta-v2-xlarge style configuration
>>> configuration = DebertaV2Config()
>>> # Initializing a model (with random weights) from the microsoft/deberta-v2-xlarge style configuration
>>> model = DebertaV2Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "deberta-v2"
def __init__(
self,
vocab_size=128100,
hidden_size=1536,
num_hidden_layers=24,
num_attention_heads=24,
intermediate_size=6144,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=0,
initializer_range=0.02,
layer_norm_eps=1e-7,
relative_attention=False,
max_relative_positions=-1,
pad_token_id=0,
position_biased_input=True,
pos_att_type=None,
pooler_dropout=0,
pooler_hidden_act="gelu",
**kwargs,
):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.relative_attention = relative_attention
self.max_relative_positions = max_relative_positions
self.pad_token_id = pad_token_id
self.position_biased_input = position_biased_input
# Backwards compatibility
if type(pos_att_type) == str:
pos_att_type = [x.strip() for x in pos_att_type.lower().split("|")]
self.pos_att_type = pos_att_type
self.vocab_size = vocab_size
self.layer_norm_eps = layer_norm_eps
self.pooler_hidden_size = kwargs.get("pooler_hidden_size", hidden_size)
self.pooler_dropout = pooler_dropout
self.pooler_hidden_act = pooler_hidden_act
class DebertaV2OnnxConfig(OnnxConfig):
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"}
else:
dynamic_axis = {0: "batch", 1: "sequence"}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis)]
)
else:
return OrderedDict([("input_ids", dynamic_axis), ("attention_mask", dynamic_axis)])
@property
def default_onnx_opset(self) -> int:
return 12
def generate_dummy_inputs(
self,
preprocessor: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"],
batch_size: int = -1,
seq_length: int = -1,
num_choices: int = -1,
is_pair: bool = False,
framework: Optional["TensorType"] = None,
num_channels: int = 3,
image_width: int = 40,
image_height: int = 40,
tokenizer: "PreTrainedTokenizerBase" = None,
) -> Mapping[str, Any]:
dummy_inputs = super().generate_dummy_inputs(preprocessor=preprocessor, framework=framework)
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
|
27182812/ChatGLM-LLaMA-chinese-insturct | 2,434 | src/transformers/models/cvt/__init__.py | # Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_import_structure = {"configuration_cvt": ["CVT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CvtConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["modeling_cvt"] = [
"CVT_PRETRAINED_MODEL_ARCHIVE_LIST",
"CvtForImageClassification",
"CvtModel",
"CvtPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["modeling_tf_cvt"] = [
"TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCvtForImageClassification",
"TFCvtModel",
"TFCvtPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_cvt import CVT_PRETRAINED_CONFIG_ARCHIVE_MAP, CvtConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cvt import (
CVT_PRETRAINED_MODEL_ARCHIVE_LIST,
CvtForImageClassification,
CvtModel,
CvtPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_cvt import (
TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCvtForImageClassification,
TFCvtModel,
TFCvtPreTrainedModel,
)
else:
import sys
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
2881099/csredis | 115,477 | src/CSRedisCore/RedisClient.Sync.cs | using CSRedis.Internal.Commands;
using System;
using System.Collections.Generic;
using System.Linq;
namespace CSRedis
{
public partial class RedisClient
{
/// <summary>
/// Connect to the remote host
/// </summary>
/// <param name="timeout">Connection timeout in milliseconds</param>
/// <returns>True if connected</returns>
public bool Connect(int timeout)
{
return _connector.Connect(timeout);
}
/// <summary>
/// Call arbitrary Redis command
/// </summary>
/// <param name="command">Command name</param>
/// <param name="args">Command arguments</param>
/// <returns>Redis object</returns>
public object Call(string command, params string[] args)
{
return Write(RedisCommands.Call(command, args));
}
internal T Write<T>(RedisCommand<T> command)
{
if (_transaction.Active)
return _transaction.Write(command);
else if (_monitor.Listening)
return default(T);
else if (_streaming)
{
_connector.Write(command);
return default(T);
}
else
return _connector.Call(command);
}
internal void WriteNoneRead(RedisCommand command) => _connector.CallNoneRead(command);
#region Connection
/// <summary>
/// Authenticate to the server
/// </summary>
/// <param name="password">Redis server password</param>
/// <returns>Status message</returns>
public string Auth(string password)
{
return Write(RedisCommands.Auth(null, password));
}
public string Auth(string user, string password)
{
return Write(RedisCommands.Auth(user, password));
}
/// <summary>
/// Echo the given string
/// </summary>
/// <param name="message">Message to echo</param>
/// <returns>Message</returns>
public string Echo(string message)
{
return Write(RedisCommands.Echo(message));
}
/// <summary>
/// Ping the server
/// </summary>
/// <returns>Status message</returns>
public string Ping()
{
return Write(RedisCommands.Ping());
}
/// <summary>
/// Close the connection
/// </summary>
/// <returns>Status message</returns>
public string Quit()
{
string response = Write(RedisCommands.Quit());
_connector.Dispose();
return response;
}
/// <summary>
/// Change the selected database for the current connection
/// </summary>
/// <param name="index">Zero-based database index</param>
/// <returns>Status message</returns>
public string Select(int index)
{
return Write(RedisCommands.Select(index));
}
#endregion
#region Keys
/// <summary>
/// [redis-server 3.2.1] 修改指定key(s) 最后访问时间 若key不存在,不做操作
/// </summary>
/// <param name="keys">Keys</param>
/// <returns></returns>
public long Touch(params string[] keys)
{
return Write(RedisCommands.Touch(keys));
}
/// <summary>
/// [redis-server 4.0.0] Delete a key, 该命令和DEL十分相似:删除指定的key(s),若key不存在则该key被跳过。但是,相比DEL会产生阻塞,该命令会在另一个线程中回收内存,因此它是非阻塞的。 这也是该命令名字的由来:仅将keys从keyspace元数据中删除,真正的删除会在后续异步操作。
/// </summary>
/// <param name="keys">Keys to delete</param>
/// <returns>Number of keys removed</returns>
public long UnLink(params string[] keys)
{
return Write(RedisCommands.UnLink(keys));
}
/// <summary>
/// Delete a key
/// </summary>
/// <param name="keys">Keys to delete</param>
/// <returns>Number of keys removed</returns>
public long Del(params string[] keys)
{
return Write(RedisCommands.Del(keys));
}
/// <summary>
/// Return a serialized version of the value stored at the specified key
/// </summary>
/// <param name="key">Key to dump</param>
/// <returns>Serialized value</returns>
public byte[] Dump(string key)
{
return Write(RedisCommands.Dump(key));
}
/// <summary>
/// Determine if a key exists
/// </summary>
/// <param name="key">Key to check</param>
/// <returns>True if key exists</returns>
public bool Exists(string key)
{
return Write(RedisCommands.Exists(key));
}
public long Exists(string[] keys)
{
return Write(RedisCommands.Exists(keys));
}
/// <summary>
/// Set a key's time to live in seconds
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="expiration">Expiration (nearest second)</param>
/// <returns>True if timeout was set; false if key does not exist or timeout could not be set</returns>
public bool Expire(string key, TimeSpan expiration)
{
return Write(RedisCommands.Expire(key, expiration));
}
/// <summary>
/// Set a key's time to live in seconds
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="seconds">Expiration in seconds</param>
/// <returns>True if timeout was set; false if key does not exist or timeout could not be set</returns>
public bool Expire(string key, int seconds)
{
return Write(RedisCommands.Expire(key, seconds));
}
/// <summary>
/// Set the expiration for a key (nearest second)
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="expirationDate">Date of expiration, to nearest second</param>
/// <returns>True if timeout was set; false if key does not exist or timeout could not be set</returns>
public bool ExpireAt(string key, DateTime expirationDate)
{
return Write(RedisCommands.ExpireAt(key, expirationDate));
}
/// <summary>
/// Set the expiration for a key as a UNIX timestamp
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="timestamp">UNIX timestamp</param>
/// <returns>True if timeout was set; false if key does not exist or timeout could not be set</returns>
public bool ExpireAt(string key, int timestamp)
{
return Write(RedisCommands.ExpireAt(key, timestamp));
}
/// <summary>
/// Find all keys matching the given pattern
/// </summary>
/// <param name="pattern">Pattern to match</param>
/// <returns>Array of keys matching pattern</returns>
public string[] Keys(string pattern)
{
return Write(RedisCommands.Keys(pattern));
}
/// <summary>
/// Atomically transfer a key from a Redis instance to another one
/// </summary>
/// <param name="host">Remote Redis host</param>
/// <param name="port">Remote Redis port</param>
/// <param name="key">Key to migrate</param>
/// <param name="destinationDb">Remote database ID</param>
/// <param name="timeoutMilliseconds">Timeout in milliseconds</param>
/// <returns>Status message</returns>
public string Migrate(string host, int port, string key, int destinationDb, int timeoutMilliseconds)
{
return Write(RedisCommands.Migrate(host, port, key, destinationDb, timeoutMilliseconds));
}
/// <summary>
/// Atomically transfer a key from a Redis instance to another one
/// </summary>
/// <param name="host">Remote Redis host</param>
/// <param name="port">Remote Redis port</param>
/// <param name="key">Key to migrate</param>
/// <param name="destinationDb">Remote database ID</param>
/// <param name="timeout">Timeout in milliseconds</param>
/// <returns>Status message</returns>
public string Migrate(string host, int port, string key, int destinationDb, TimeSpan timeout)
{
return Write(RedisCommands.Migrate(host, port, key, destinationDb, timeout));
}
/// <summary>
/// Move a key to another database
/// </summary>
/// <param name="key">Key to move</param>
/// <param name="database">Database destination ID</param>
/// <returns>True if key was moved</returns>
public bool Move(string key, int database)
{
return Write(RedisCommands.Move(key, database));
}
/// <summary>
/// Get the number of references of the value associated with the specified key
/// </summary>
/// <param name="arguments">Subcommand arguments</param>
/// <returns>The type of internal representation used to store the value at the specified key</returns>
public string ObjectEncoding(params string[] arguments)
{
return Write(RedisCommands.ObjectEncoding(arguments));
}
/// <summary>
/// Inspect the internals of Redis objects
/// </summary>
/// <param name="subCommand">Type of Object command to send</param>
/// <param name="arguments">Subcommand arguments</param>
/// <returns>Varies depending on subCommand</returns>
public long? Object(RedisObjectSubCommand subCommand, params string[] arguments)
{
return Write(RedisCommands.Object(subCommand, arguments));
}
/// <summary>
/// Remove the expiration from a key
/// </summary>
/// <param name="key">Key to modify</param>
/// <returns>True if timeout was removed</returns>
public bool Persist(string key)
{
return Write(RedisCommands.Persist(key));
}
/// <summary>
/// Set a key's time to live in milliseconds
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="expiration">Expiration (nearest millisecond)</param>
/// <returns>True if timeout was set</returns>
public bool PExpire(string key, TimeSpan expiration)
{
return Write(RedisCommands.PExpire(key, expiration));
}
/// <summary>
/// Set a key's time to live in milliseconds
/// </summary>
/// <param name="key">Key</param>
/// <param name="milliseconds">Expiration in milliseconds</param>
/// <returns>True if timeout was set</returns>
public bool PExpire(string key, long milliseconds)
{
return Write(RedisCommands.PExpire(key, milliseconds));
}
/// <summary>
/// Set the expiration for a key (nearest millisecond)
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="date">Expiration date</param>
/// <returns>True if timeout was set</returns>
public bool PExpireAt(string key, DateTime date)
{
return Write(RedisCommands.PExpireAt(key, date));
}
/// <summary>
/// Set the expiration for a key as a UNIX timestamp specified in milliseconds
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="timestamp">Expiration timestamp (milliseconds)</param>
/// <returns>True if timeout was set</returns>
public bool PExpireAt(string key, long timestamp)
{
return Write(RedisCommands.PExpireAt(key, timestamp));
}
/// <summary>
/// Get the time to live for a key in milliseconds
/// </summary>
/// <param name="key">Key to check</param>
/// <returns>Time-to-live in milliseconds</returns>
public long PTtl(string key)
{
return Write(RedisCommands.PTtl(key));
}
/// <summary>
/// Return a random key from the keyspace
/// </summary>
/// <returns>A random key</returns>
public string RandomKey()
{
return Write(RedisCommands.RandomKey());
}
/// <summary>
/// Rename a key
/// </summary>
/// <param name="key">Key to rename</param>
/// <param name="newKey">New key name</param>
/// <returns>Status code</returns>
public string Rename(string key, string newKey)
{
return Write(RedisCommands.Rename(key, newKey));
}
/// <summary>
/// Rename a key, only if the new key does not exist
/// </summary>
/// <param name="key">Key to rename</param>
/// <param name="newKey">New key name</param>
/// <returns>True if key was renamed</returns>
public bool RenameNx(string key, string newKey)
{
return Write(RedisCommands.RenameNx(key, newKey));
}
/// <summary>
/// Create a key using the provided serialized value, previously obtained using dump
/// </summary>
/// <param name="key">Key to restore</param>
/// <param name="ttlMilliseconds">Time-to-live in milliseconds</param>
/// <param name="serializedValue">Serialized value from DUMP</param>
/// <returns>Status code</returns>
public string Restore(string key, long ttlMilliseconds, byte[] serializedValue)
{
return Write(RedisCommands.Restore(key, ttlMilliseconds, serializedValue));
}
/// <summary>
/// Sort the elements in a list, set or sorted set
/// </summary>
/// <param name="key">Key to sort</param>
/// <param name="offset">Number of elements to skip</param>
/// <param name="count">Number of elements to return</param>
/// <param name="by">Sort by external key</param>
/// <param name="dir">Sort direction</param>
/// <param name="isAlpha">Sort lexicographically</param>
/// <param name="get">Retrieve external keys</param>
/// <returns>The sorted list</returns>
public string[] Sort(string key, long? offset = null, long? count = 0, string by = null, RedisSortDir? dir = null, bool? isAlpha = null, params string[] get)
{
return Write(RedisCommands.Sort(key, offset, count, by, dir, isAlpha, get));
}
/// <summary>
/// Sort the elements in a list, set or sorted set, then store the result in a new list
/// </summary>
/// <param name="key">Key to sort</param>
/// <param name="destination">Destination key name of stored sort</param>
/// <param name="offset">Number of elements to skip</param>
/// <param name="count">Number of elements to return</param>
/// <param name="by">Sort by external key</param>
/// <param name="dir">Sort direction</param>
/// <param name="isAlpha">Sort lexicographically</param>
/// <param name="get">Retrieve external keys</param>
/// <returns>Number of elements stored</returns>
public long SortAndStore(string key, string destination, long? offset = null, long? count = 0, string by = null, RedisSortDir? dir = null, bool? isAlpha = false, params string[] get)
{
return Write(RedisCommands.SortAndStore(key, destination, offset, count, by, dir, isAlpha, get));
}
/// <summary>
/// Get the time to live for a key
/// </summary>
/// <param name="key">Key to check</param>
/// <returns>Time-to-live in seconds</returns>
public long Ttl(string key)
{
return Write(RedisCommands.Ttl(key));
}
/// <summary>
/// Determine the type stored at key
/// </summary>
/// <param name="key">Key to check</param>
/// <returns>Type of key</returns>
public string Type(string key)
{
return Write(RedisCommands.Type(key));
}
/// <summary>
/// Iterate the set of keys in the currently selected Redis database
/// </summary>
/// <param name="cursor">The cursor returned by the server in the previous call, or 0 if this is the first call</param>
/// <param name="pattern">Glob-style pattern to filter returned elements</param>
/// <param name="count">Set the maximum number of elements to return</param>
/// <returns>Updated cursor and result set</returns>
public RedisScan<string> Scan(long cursor, string pattern = null, long? count = null)
{
return Write(RedisCommands.Scan(cursor, pattern, count));
}
public RedisScan<byte[]> ScanBytes(long cursor, string pattern = null, long? count = null)
{
return Write(RedisCommands.ScanBytes(cursor, pattern, count));
}
#endregion
#region Hashes
/// <summary>
/// [redis-server 3.2.0] 返回hash指定field的value的字符串长度,如果hash或者field不存在,返回0.
/// </summary>
/// <param name="key">Hash key</param>
/// <param name="field">Field</param>
/// <returns></returns>
public long HStrLen(string key, string field)
{
return Write(RedisCommands.HStrLen(key, field));
}
/// <summary>
/// Delete one or more hash fields
/// </summary>
/// <param name="key">Hash key</param>
/// <param name="fields">Fields to delete</param>
/// <returns>Number of fields removed from hash</returns>
public long HDel(string key, params string[] fields)
{
return Write(RedisCommands.HDel(key, fields));
}
/// <summary>
/// Determine if a hash field exists
/// </summary>
/// <param name="key">Hash key</param>
/// <param name="field">Field to check</param>
/// <returns>True if hash field exists</returns>
public bool HExists(string key, string field)
{
return Write(RedisCommands.HExists(key, field));
}
/// <summary>
/// Get the value of a hash field
/// </summary>
/// <param name="key">Hash key</param>
/// <param name="field">Field to get</param>
/// <returns>Value of hash field</returns>
public string HGet(string key, string field)
{
return Write(RedisCommands.HGet(key, field));
}
public byte[] HGetBytes(string key, string field)
{
return Write(RedisCommands.HGetBytes(key, field));
}
/// <summary>
/// Get all the fields and values in a hash
/// </summary>
/// <typeparam name="T">Object to map hash</typeparam>
/// <param name="key">Hash key</param>
/// <returns>Strongly typed object mapped from hash</returns>
public T HGetAll<T>(string key)
where T : class
{
return Write(RedisCommands.HGetAll<T>(key));
}
/// <summary>
/// Get all the fields and values in a hash
/// </summary>
/// <param name="key">Hash key</param>
/// <returns>Dictionary mapped from string</returns>
public Dictionary<string, string> HGetAll(string key)
{
return Write(RedisCommands.HGetAll(key));
}
public Dictionary<string, byte[]> HGetAllBytes(string key)
{
return Write(RedisCommands.HGetAllBytes(key));
}
/// <summary>
/// Increment the integer value of a hash field by the given number
/// </summary>
/// <param name="key">Hash key</param>
/// <param name="field">Field to increment</param>
/// <param name="increment">Increment value</param>
/// <returns>Value of field after increment</returns>
public long HIncrBy(string key, string field, long increment)
{
return Write(RedisCommands.HIncrBy(key, field, increment));
}
/// <summary>
/// Increment the float value of a hash field by the given number
/// </summary>
/// <param name="key">Hash key</param>
/// <param name="field">Field to increment</param>
/// <param name="increment">Increment value</param>
/// <returns>Value of field after increment</returns>
public decimal HIncrByFloat(string key, string field, decimal increment)
{
return Write(RedisCommands.HIncrByFloat(key, field, increment));
}
/// <summary>
/// Get all the fields in a hash
/// </summary>
/// <param name="key">Hash key</param>
/// <returns>All hash field names</returns>
public string[] HKeys(string key)
{
return Write(RedisCommands.HKeys(key));
}
/// <summary>
/// Get the number of fields in a hash
/// </summary>
/// <param name="key">Hash key</param>
/// <returns>Number of fields in hash</returns>
public long HLen(string key)
{
return Write(RedisCommands.HLen(key));
}
/// <summary>
/// Get the values of all the given hash fields
/// </summary>
/// <param name="key">Hash key</param>
/// <param name="fields">Fields to return</param>
/// <returns>Values of given fields</returns>
public string[] HMGet(string key, params string[] fields)
{
return Write(RedisCommands.HMGet(key, fields));
}
public byte[][] HMGetBytes(string key, params string[] fields)
{
return Write(RedisCommands.HMGetBytes(key, fields));
}
/// <summary>
/// Set multiple hash fields to multiple values
/// </summary>
/// <param name="key">Hash key</param>
/// <param name="dict">Dictionary mapping of hash</param>
/// <returns>Status code</returns>
public string HMSet(string key, Dictionary<string, object> dict)
{
return Write(RedisCommands.HMSet(key, dict));
}
/// <summary>
/// Set multiple hash fields to multiple values
/// </summary>
/// <typeparam name="T">Type of object to map hash</typeparam>
/// <param name="key">Hash key</param>
/// <param name="obj">Object mapping of hash</param>
/// <returns>Status code</returns>
public string HMSet<T>(string key, T obj)
where T : class
{
return Write(RedisCommands.HMSet<T>(key, obj));
}
/// <summary>
/// Set multiple hash fields to multiple values
/// </summary>
/// <param name="key">Hash key</param>
/// <param name="keyValues">Array of [key,value,key,value,..]</param>
/// <returns>Status code</returns>
public string HMSet(string key, params object[] keyValues)
{
return Write(RedisCommands.HMSet(key, keyValues));
}
/// <summary>
/// Set the value of a hash field
/// </summary>
/// <param name="key">Hash key</param>
/// <param name="field">Hash field to set</param>
/// <param name="value">Value to set</param>
/// <returns>True if field is new</returns>
public bool HSet(string key, string field, object value)
{
return Write(RedisCommands.HSet(key, field, value));
}
/// <summary>
/// Set the value of a hash field, only if the field does not exist
/// </summary>
/// <param name="key">Hash key</param>
/// <param name="field">Hash field to set</param>
/// <param name="value">Value to set</param>
/// <returns>True if field was set to value</returns>
public bool HSetNx(string key, string field, object value)
{
return Write(RedisCommands.HSetNx(key, field, value));
}
/// <summary>
/// Get all the values in a hash
/// </summary>
/// <param name="key">Hash key</param>
/// <returns>Array of all values in hash</returns>
public string[] HVals(string key)
{
return Write(RedisCommands.HVals(key));
}
public byte[][] HValsBytes(string key)
{
return Write(RedisCommands.HValsBytes(key));
}
/// <summary>
/// Iterate the keys and values of a hash field
/// </summary>
/// <param name="key">Hash key</param>
/// <param name="cursor">The cursor returned by the server in the previous call, or 0 if this is the first call</param>
/// <param name="pattern">Glob-style pattern to filter returned elements</param>
/// <param name="count">Maximum number of elements to return</param>
/// <returns>Updated cursor and result set</returns>
public RedisScan<Tuple<string, string>> HScan(string key, long cursor, string pattern = null, long? count = null)
{
return Write(RedisCommands.HScan(key, cursor, pattern, count));
}
public RedisScan<Tuple<string, byte[]>> HScanBytes(string key, long cursor, string pattern = null, long? count = null)
{
return Write(RedisCommands.HScanBytes(key, cursor, pattern, count));
}
#endregion
#region Lists
/// <summary>
/// Remove and get the first element and key in a list, or block until one is available
/// </summary>
/// <param name="timeout">Timeout in seconds</param>
/// <param name="keys">List keys</param>
/// <returns>List key and list value</returns>
public Tuple<string, string> BLPopWithKey(int timeout, params string[] keys)
{
return Write(RedisCommands.BLPopWithKey(timeout, keys));
}
public Tuple<string, byte[]> BLPopBytesWithKey(int timeout, params string[] keys)
{
return Write(RedisCommands.BLPopBytesWithKey(timeout, keys));
}
/// <summary>
/// Remove and get the first element and key in a list, or block until one is available
/// </summary>
/// <param name="timeout">Timeout in seconds</param>
/// <param name="keys">List keys</param>
/// <returns>List key and list value</returns>
public Tuple<string, string> BLPopWithKey(TimeSpan timeout, params string[] keys)
{
return Write(RedisCommands.BLPopWithKey(timeout, keys));
}
public Tuple<string, byte[]> BLPopBytesWithKey(TimeSpan timeout, params string[] keys)
{
return Write(RedisCommands.BLPopBytesWithKey(timeout, keys));
}
/// <summary>
/// Remove and get the first element value in a list, or block until one is available
/// </summary>
/// <param name="timeout">Timeout in seconds</param>
/// <param name="keys">List keys</param>
/// <returns>List value</returns>
public string BLPop(int timeout, params string[] keys)
{
return Write(RedisCommands.BLPop(timeout, keys));
}
public byte[] BLPopBytes(int timeout, params string[] keys)
{
return Write(RedisCommands.BLPopBytes(timeout, keys));
}
/// <summary>
/// Remove and get the first element value in a list, or block until one is available
/// </summary>
/// <param name="timeout">Timeout in seconds</param>
/// <param name="keys">List keys</param>
/// <returns>List value</returns>
public string BLPop(TimeSpan timeout, params string[] keys)
{
return Write(RedisCommands.BLPop(timeout, keys));
}
public byte[] BLPopBytes(TimeSpan timeout, params string[] keys)
{
return Write(RedisCommands.BLPopBytes(timeout, keys));
}
/// <summary>
/// Remove and get the last element and key in a list, or block until one is available
/// </summary>
/// <param name="timeout">Timeout in seconds</param>
/// <param name="keys">List keys</param>
/// <returns>List key and list value</returns>
public Tuple<string, string> BRPopWithKey(int timeout, params string[] keys)
{
return Write(RedisCommands.BRPopWithKey(timeout, keys));
}
public Tuple<string, byte[]> BRPopBytesWithKey(int timeout, params string[] keys)
{
return Write(RedisCommands.BRPopBytesWithKey(timeout, keys));
}
/// <summary>
/// Remove and get the last element and key in a list, or block until one is available
/// </summary>
/// <param name="timeout">Timeout in seconds</param>
/// <param name="keys">List keys</param>
/// <returns>List key and list value</returns>
public Tuple<string, string> BRPopWithKey(TimeSpan timeout, params string[] keys)
{
return Write(RedisCommands.BRPopWithKey(timeout, keys));
}
public Tuple<string, byte[]> BRPopBytesWithKey(TimeSpan timeout, params string[] keys)
{
return Write(RedisCommands.BRPopBytesWithKey(timeout, keys));
}
/// <summary>
/// Remove and get the last element value in a list, or block until one is available
/// </summary>
/// <param name="timeout">Timeout in seconds</param>
/// <param name="keys">List value</param>
/// <returns></returns>
public string BRPop(int timeout, params string[] keys)
{
return Write(RedisCommands.BRPop(timeout, keys));
}
public byte[] BRPopBytes(int timeout, params string[] keys)
{
return Write(RedisCommands.BRPopBytes(timeout, keys));
}
/// <summary>
/// Remove and get the last element value in a list, or block until one is available
/// </summary>
/// <param name="timeout">Timeout in seconds</param>
/// <param name="keys">List keys</param>
/// <returns>List value</returns>
public string BRPop(TimeSpan timeout, params string[] keys)
{
return Write(RedisCommands.BRPop(timeout, keys));
}
public byte[] BRPopBytes(TimeSpan timeout, params string[] keys)
{
return Write(RedisCommands.BRPopBytes(timeout, keys));
}
/// <summary>
/// Pop a value from a list, push it to another list and return it; or block until one is available
/// </summary>
/// <param name="source">Source list key</param>
/// <param name="destination">Destination key</param>
/// <param name="timeout">Timeout in seconds</param>
/// <returns>Element popped</returns>
public string BRPopLPush(string source, string destination, int timeout)
{
return Write(RedisCommands.BRPopLPush(source, destination, timeout));
}
public byte[] BRPopBytesLPush(string source, string destination, int timeout)
{
return Write(RedisCommands.BRPopBytesLPush(source, destination, timeout));
}
/// <summary>
/// Pop a value from a list, push it to another list and return it; or block until one is available
/// </summary>
/// <param name="source">Source list key</param>
/// <param name="destination">Destination key</param>
/// <param name="timeout">Timeout in seconds</param>
/// <returns>Element popped</returns>
public string BRPopLPush(string source, string destination, TimeSpan timeout)
{
return Write(RedisCommands.BRPopLPush(source, destination, timeout));
}
public byte[] BRPopBytesLPush(string source, string destination, TimeSpan timeout)
{
return Write(RedisCommands.BRPopBytesLPush(source, destination, timeout));
}
/// <summary>
/// Get an element from a list by its index
/// </summary>
/// <param name="key">List key</param>
/// <param name="index">Zero-based index of item to return</param>
/// <returns>Element at index</returns>
public string LIndex(string key, long index)
{
return Write(RedisCommands.LIndex(key, index));
}
public byte[] LIndexBytes(string key, long index)
{
return Write(RedisCommands.LIndexBytes(key, index));
}
/// <summary>
/// Insert an element before or after another element in a list
/// </summary>
/// <param name="key">List key</param>
/// <param name="insertType">Relative position</param>
/// <param name="pivot">Relative element</param>
/// <param name="value">Element to insert</param>
/// <returns>Length of list after insert or -1 if pivot not found</returns>
public long LInsert(string key, RedisInsert insertType, object pivot, object value)
{
return Write(RedisCommands.LInsert(key, insertType, pivot, value));
}
/// <summary>
/// Get the length of a list
/// </summary>
/// <param name="key">List key</param>
/// <returns>Length of list at key</returns>
public long LLen(string key)
{
return Write(RedisCommands.LLen(key));
}
/// <summary>
/// Remove and get the first element in a list
/// </summary>
/// <param name="key">List key</param>
/// <returns>First element in list</returns>
public string LPop(string key)
{
return Write(RedisCommands.LPop(key));
}
public byte[] LPopBytes(string key)
{
return Write(RedisCommands.LPopBytes(key));
}
/// <summary>
/// Prepend one or multiple values to a list
/// </summary>
/// <param name="key">List key</param>
/// <param name="values">Values to push</param>
/// <returns>Length of list after push</returns>
public long LPush(string key, params object[] values)
{
return Write(RedisCommands.LPush(key, values));
}
/// <summary>
/// Prepend a value to a list, only if the list exists
/// </summary>
/// <param name="key">List key</param>
/// <param name="value">Value to push</param>
/// <returns>Length of list after push</returns>
public long LPushX(string key, object value)
{
return Write(RedisCommands.LPushX(key, value));
}
/// <summary>
/// Get a range of elements from a list
/// </summary>
/// <param name="key">List key</param>
/// <param name="start">Start offset</param>
/// <param name="stop">Stop offset</param>
/// <returns>List of elements in range</returns>
public string[] LRange(string key, long start, long stop)
{
return Write(RedisCommands.LRange(key, start, stop));
}
public byte[][] LRangeBytes(string key, long start, long stop)
{
return Write(RedisCommands.LRangeBytes(key, start, stop));
}
/// <summary>
/// Remove elements from a list
/// </summary>
/// <param name="key">List key</param>
/// <param name="count">>0: remove N elements from head to tail; <0: remove N elements from tail to head; =0: remove all elements</param>
/// <param name="value">Remove elements equal to value</param>
/// <returns>Number of removed elements</returns>
public long LRem(string key, long count, object value)
{
return Write(RedisCommands.LRem(key, count, value));
}
/// <summary>
/// Set the value of an element in a list by its index
/// </summary>
/// <param name="key">List key</param>
/// <param name="index">List index to modify</param>
/// <param name="value">New element value</param>
/// <returns>Status code</returns>
public string LSet(string key, long index, object value)
{
return Write(RedisCommands.LSet(key, index, value));
}
/// <summary>
/// Trim a list to the specified range
/// </summary>
/// <param name="key">List key</param>
/// <param name="start">Zero-based start index</param>
/// <param name="stop">Zero-based stop index</param>
/// <returns>Status code</returns>
public string LTrim(string key, long start, long stop)
{
return Write(RedisCommands.LTrim(key, start, stop));
}
/// <summary>
/// Remove and get the last elment in a list
/// </summary>
/// <param name="key">List key</param>
/// <returns>Value of last list element</returns>
public string RPop(string key)
{
return Write(RedisCommands.RPop(key));
}
public byte[] RPopBytes(string key)
{
return Write(RedisCommands.RPopBytes(key));
}
/// <summary>
/// Remove the last elment in a list, append it to another list and return it
/// </summary>
/// <param name="source">List source key</param>
/// <param name="destination">Destination key</param>
/// <returns>Element being popped and pushed</returns>
public string RPopLPush(string source, string destination)
{
return Write(RedisCommands.RPopLPush(source, destination));
}
public byte[] RPopBytesLPush(string source, string destination)
{
return Write(RedisCommands.RPopBytesLPush(source, destination));
}
/// <summary>
/// Append one or multiple values to a list
/// </summary>
/// <param name="key">List key</param>
/// <param name="values">Values to push</param>
/// <returns>Length of list after push</returns>
public long RPush(string key, params object[] values)
{
return Write(RedisCommands.RPush(key, values));
}
/// <summary>
/// Append a value to a list, only if the list exists
/// </summary>
/// <param name="key">List key</param>
/// <param name="value">Value to push</param>
/// <returns>Length of list after push</returns>
public long RPushX(string key, object value)
{
return Write(RedisCommands.RPushX(key, value));
}
#endregion
#region Sets
/// <summary>
/// Add one or more members to a set
/// </summary>
/// <param name="key">Set key</param>
/// <param name="members">Members to add to set</param>
/// <returns>Number of elements added to set</returns>
public long SAdd(string key, params object[] members)
{
return Write(RedisCommands.SAdd(key, members));
}
/// <summary>
/// Get the number of members in a set
/// </summary>
/// <param name="key">Set key</param>
/// <returns>Number of elements in set</returns>
public long SCard(string key)
{
return Write(RedisCommands.SCard(key));
}
/// <summary>
/// Subtract multiple sets
/// </summary>
/// <param name="keys">Set keys to subtract</param>
/// <returns>Array of elements in resulting set</returns>
public string[] SDiff(params string[] keys)
{
return Write(RedisCommands.SDiff(keys));
}
public byte[][] SDiffBytes(params string[] keys)
{
return Write(RedisCommands.SDiffBytes(keys));
}
/// <summary>
/// Subtract multiple sets and store the resulting set in a key
/// </summary>
/// <param name="destination">Destination key</param>
/// <param name="keys">Set keys to subtract</param>
/// <returns>Number of elements in the resulting set</returns>
public long SDiffStore(string destination, params string[] keys)
{
return Write(RedisCommands.SDiffStore(destination, keys));
}
/// <summary>
/// Intersect multiple sets
/// </summary>
/// <param name="keys">Set keys to intersect</param>
/// <returns>Array of elements in resulting set</returns>
public string[] SInter(params string[] keys)
{
return Write(RedisCommands.SInter(keys));
}
public byte[][] SInterBytes(params string[] keys)
{
return Write(RedisCommands.SInterBytes(keys));
}
/// <summary>
/// Intersect multiple sets and store the resulting set in a key
/// </summary>
/// <param name="destination">Destination key</param>
/// <param name="keys">Set keys to intersect</param>
/// <returns>Number of elements in resulting set</returns>
public long SInterStore(string destination, params string[] keys)
{
return Write(RedisCommands.SInterStore(destination, keys));
}
/// <summary>
/// Determine if a given value is a member of a set
/// </summary>
/// <param name="key">Set key</param>
/// <param name="member">Member to lookup</param>
/// <returns>True if member exists in set</returns>
public bool SIsMember(string key, object member)
{
return Write(RedisCommands.SIsMember(key, member));
}
/// <summary>
/// Get all the members in a set
/// </summary>
/// <param name="key">Set key</param>
/// <returns>All elements in the set</returns>
public string[] SMembers(string key)
{
return Write(RedisCommands.SMembers(key));
}
public byte[][] SMembersBytes(string key)
{
return Write(RedisCommands.SMembersBytes(key));
}
/// <summary>
/// Move a member from one set to another
/// </summary>
/// <param name="source">Source key</param>
/// <param name="destination">Destination key</param>
/// <param name="member">Member to move</param>
/// <returns>True if element was moved</returns>
public bool SMove(string source, string destination, object member)
{
return Write(RedisCommands.SMove(source, destination, member));
}
/// <summary>
/// Remove and return a random member from a set
/// </summary>
/// <param name="key">Set key</param>
/// <returns>The removed element</returns>
public string SPop(string key)
{
return Write(RedisCommands.SPop(key));
}
public byte[] SPopBytes(string key)
{
return Write(RedisCommands.SPopBytes(key));
}
/// <summary>
/// Remove and return one or more random member from a set
/// </summary>
/// <param name="key">Set key</param>
/// <param name="count">Number of elements to remove and return</param>
/// <returns>The removed elements</returns>
public string[] SPop(string key, long count)
{
return Write(RedisCommands.SPop(key, count));
}
public byte[][] SPopBytes(string key, long count)
{
return Write(RedisCommands.SPopBytes(key, count));
}
/// <summary>
/// Get a random member from a set
/// </summary>
/// <param name="key">Set key</param>
/// <returns>One random element from set</returns>
public string SRandMember(string key)
{
return Write(RedisCommands.SRandMember(key));
}
public byte[] SRandMemberBytes(string key)
{
return Write(RedisCommands.SRandMemberBytes(key));
}
/// <summary>
/// Get one or more random members from a set
/// </summary>
/// <param name="key">Set key</param>
/// <param name="count">Number of elements to return</param>
/// <returns>One or more random elements from set</returns>
public string[] SRandMembers(string key, long count)
{
return Write(RedisCommands.SRandMembers(key, count));
}
public byte[][] SRandMembersBytes(string key, long count)
{
return Write(RedisCommands.SRandMembersBytes(key, count));
}
/// <summary>
/// Remove one or more members from a set
/// </summary>
/// <param name="key">Set key</param>
/// <param name="members">Set members to remove</param>
/// <returns>Number of elements removed from set</returns>
public long SRem(string key, params object[] members)
{
return Write(RedisCommands.SRem(key, members));
}
/// <summary>
/// Add multiple sets
/// </summary>
/// <param name="keys">Set keys to union</param>
/// <returns>Array of elements in resulting set</returns>
public string[] SUnion(params string[] keys)
{
return Write(RedisCommands.SUnion(keys));
}
public byte[][] SUnionBytes(params string[] keys)
{
return Write(RedisCommands.SUnionBytes(keys));
}
/// <summary>
/// Add multiple sets and store the resulting set in a key
/// </summary>
/// <param name="destination">Destination key</param>
/// <param name="keys">Set keys to union</param>
/// <returns>Number of elements in resulting set</returns>
public long SUnionStore(string destination, params string[] keys)
{
return Write(RedisCommands.SUnionStore(destination, keys));
}
/// <summary>
/// Iterate the elements of a set field
/// </summary>
/// <param name="key">Set key</param>
/// <param name="cursor">The cursor returned by the server in the previous call, or 0 if this is the first call</param>
/// <param name="pattern">Glob-style pattern to filter returned elements</param>
/// <param name="count">Maximum number of elements to return</param>
/// <returns>Updated cursor and result set</returns>
public RedisScan<string> SScan(string key, long cursor, string pattern = null, long? count = null)
{
return Write(RedisCommands.SScan(key, cursor, pattern, count));
}
public RedisScan<byte[]> SScanBytes(string key, long cursor, string pattern = null, long? count = null)
{
return Write(RedisCommands.SScanBytes(key, cursor, pattern, count));
}
#endregion
#region Sorted Sets
public Tuple<string, decimal>[] ZPopMax(string key, long count)
{
return Write(RedisCommands.ZPopMax(key, count));
}
public Tuple<byte[], decimal>[] ZPopMaxBytes(string key, long count)
{
return Write(RedisCommands.ZPopMaxBytes(key, count));
}
public Tuple<string, decimal>[] ZPopMin(string key, long count)
{
return Write(RedisCommands.ZPopMin(key, count));
}
public Tuple<byte[], decimal>[] ZPopMinBytes(string key, long count)
{
return Write(RedisCommands.ZPopMinBytes(key, count));
}
/// <summary>
/// Add one or more members to a sorted set, or update its score if it already exists
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="scoreMembers">Array of member scores to add to sorted set</param>
/// <returns>Number of elements added to the sorted set (not including member updates)</returns>
public long ZAdd<TScore, TMember>(string key, params Tuple<TScore, TMember>[] scoreMembers)
{
return Write(RedisCommands.ZAdd(key, scoreMembers));
}
/// <summary>
/// Add one or more members to a sorted set, or update its score if it already exists
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="scoreMembers">Array of member scores [s1, m1, s2, m2, ..]</param>
/// <returns>Number of elements added to the sorted set (not including member updates)</returns>
public long ZAdd(string key, params object[] scoreMembers)
{
return Write(RedisCommands.ZAdd(key, scoreMembers));
}
/// <summary>
/// Get the number of members in a sorted set
/// </summary>
/// <param name="key">Sorted set key</param>
/// <returns>Number of elements in the sorted set</returns>
public long ZCard(string key)
{
return Write(RedisCommands.ZCard(key));
}
/// <summary>
/// Count the members in a sorted set with scores within the given values
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="min">Minimum score</param>
/// <param name="max">Maximum score</param>
/// <param name="exclusiveMin">Minimum score is exclusive</param>
/// <param name="exclusiveMax">Maximum score is exclusive</param>
/// <returns>Number of elements in the specified score range</returns>
public long ZCount(string key, decimal min, decimal max, bool exclusiveMin = false, bool exclusiveMax = false)
{
return Write(RedisCommands.ZCount(key, min, max, exclusiveMin, exclusiveMax));
}
/// <summary>
/// Count the members in a sorted set with scores within the given values
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="min">Minimum score</param>
/// <param name="max">Maximum score</param>
/// <returns>Number of elements in the specified score range</returns>
public long ZCount(string key, string min, string max)
{
return Write(RedisCommands.ZCount(key, min, max));
}
/// <summary>
/// Increment the score of a member in a sorted set
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="increment">Increment by value</param>
/// <param name="member">Sorted set member to increment</param>
/// <returns>New score of member</returns>
public decimal ZIncrBy(string key, decimal increment, object member)
{
return Write(RedisCommands.ZIncrBy(key, increment, member));
}
/// <summary>
/// Intersect multiple sorted sets and store the resulting set in a new key
/// </summary>
/// <param name="destination">Destination key</param>
/// <param name="weights">Multiplication factor for each input set</param>
/// <param name="aggregate">Aggregation function of resulting set</param>
/// <param name="keys">Sorted set keys to intersect</param>
/// <returns>Number of elements in the resulting sorted set</returns>
public long ZInterStore(string destination, decimal[] weights = null, RedisAggregate? aggregate = null, params string[] keys)
{
return Write(RedisCommands.ZInterStore(destination, weights, aggregate, keys));
}
/// <summary>
/// Intersect multiple sorted sets and store the resulting set in a new key
/// </summary>
/// <param name="destination">Destination key</param>
/// <param name="keys">Sorted set keys to intersect</param>
/// <returns>Number of elements in the resulting sorted set</returns>
public long ZInterStore(string destination, params string[] keys)
{
return ZInterStore(destination, null, null, keys);
}
/// <summary>
/// Return a range of members in a sorted set, by index
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="start">Start offset</param>
/// <param name="stop">Stop offset</param>
/// <param name="withScores">Include scores in result</param>
/// <returns>Array of elements in the specified range (with optional scores)</returns>
public string[] ZRange(string key, long start, long stop, bool withScores = false)
{
return Write(RedisCommands.ZRange(key, start, stop, withScores));
}
public byte[][] ZRangeBytes(string key, long start, long stop, bool withScores = false)
{
return Write(RedisCommands.ZRangeBytes(key, start, stop, withScores));
}
/// <summary>
/// Return a range of members in a sorted set, by index, with scores
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="start">Start offset</param>
/// <param name="stop">Stop offset</param>
/// <returns>Array of elements in the specified range with scores</returns>
public Tuple<string, decimal>[] ZRangeWithScores(string key, long start, long stop)
{
return Write(RedisCommands.ZRangeWithScores(key, start, stop));
}
public Tuple<byte[], decimal>[] ZRangeBytesWithScores(string key, long start, long stop)
{
return Write(RedisCommands.ZRangeBytesWithScores(key, start, stop));
}
/// <summary>
/// Return a range of members in a sorted set, by score
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="min">Minimum score</param>
/// <param name="max">Maximum score</param>
/// <param name="withScores">Include scores in result</param>
/// <param name="exclusiveMin">Minimum score is exclusive</param>
/// <param name="exclusiveMax">Maximum score is exclusive</param>
/// <param name="offset">Start offset</param>
/// <param name="count">Number of elements to return</param>
/// <returns>List of elements in the specified range (with optional scores)</returns>
public string[] ZRangeByScore(string key, decimal min, decimal max, bool withScores = false, bool exclusiveMin = false, bool exclusiveMax = false, long? offset = null, long? count = null)
{
return Write(RedisCommands.ZRangeByScore(key, min, max, withScores, exclusiveMin, exclusiveMax, offset, count));
}
public byte[][] ZRangeBytesByScore(string key, decimal min, decimal max, bool withScores = false, bool exclusiveMin = false, bool exclusiveMax = false, long? offset = null, long? count = null)
{
return Write(RedisCommands.ZRangeBytesByScore(key, min, max, withScores, exclusiveMin, exclusiveMax, offset, count));
}
/// <summary>
/// Return a range of members in a sorted set, by score
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="min">Minimum score</param>
/// <param name="max">Maximum score</param>
/// <param name="withScores">Include scores in result</param>
/// <param name="offset">Start offset</param>
/// <param name="count">Number of elements to return</param>
/// <returns>List of elements in the specified range (with optional scores)</returns>
public string[] ZRangeByScore(string key, string min, string max, bool withScores = false, long? offset = null, long? count = null)
{
return Write(RedisCommands.ZRangeByScore(key, min, max, withScores, offset, count));
}
public byte[][] ZRangeBytesByScore(string key, string min, string max, bool withScores = false, long? offset = null, long? count = null)
{
return Write(RedisCommands.ZRangeBytesByScore(key, min, max, withScores, offset, count));
}
/// <summary>
/// Return a range of members in a sorted set, by score, with scores
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="min">Minimum score</param>
/// <param name="max">Maximum score</param>
/// <param name="exclusiveMin">Minimum score is exclusive</param>
/// <param name="exclusiveMax">Maximum score is exclusive</param>
/// <param name="offset">Start offset</param>
/// <param name="count">Number of elements to return</param>
/// <returns>List of elements in the specified range (with optional scores)</returns>
public Tuple<string, decimal>[] ZRangeByScoreWithScores(string key, decimal min, decimal max, bool exclusiveMin = false, bool exclusiveMax = false, long? offset = null, long? count = null)
{
return Write(RedisCommands.ZRangeByScoreWithScores(key, min, max, exclusiveMin, exclusiveMax, offset, count));
}
public Tuple<byte[], decimal>[] ZRangeBytesByScoreWithScores(string key, decimal min, decimal max, bool exclusiveMin = false, bool exclusiveMax = false, long? offset = null, long? count = null)
{
return Write(RedisCommands.ZRangeBytesByScoreWithScores(key, min, max, exclusiveMin, exclusiveMax, offset, count));
}
/// <summary>
/// Return a range of members in a sorted set, by score, with scores
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="min">Minimum score</param>
/// <param name="max">Maximum score</param>
/// <param name="offset">Start offset</param>
/// <param name="count">Number of elements to return</param>
/// <returns>List of elements in the specified range (with optional scores)</returns>
public Tuple<string, decimal>[] ZRangeByScoreWithScores(string key, string min, string max, long? offset = null, long? count = null)
{
return Write(RedisCommands.ZRangeByScoreWithScores(key, min, max, offset, count));
}
public Tuple<byte[], decimal>[] ZRangeBytesByScoreWithScores(string key, string min, string max, long? offset = null, long? count = null)
{
return Write(RedisCommands.ZRangeBytesByScoreWithScores(key, min, max, offset, count));
}
/// <summary>
/// Determine the index of a member in a sorted set
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="member">Member to lookup</param>
/// <returns>Rank of member or null if key does not exist</returns>
public long? ZRank(string key, object member)
{
return Write(RedisCommands.ZRank(key, member));
}
/// <summary>
/// Remove one or more members from a sorted set
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="members">Members to remove</param>
/// <returns>Number of elements removed</returns>
public long ZRem(string key, params object[] members)
{
return Write(RedisCommands.ZRem(key, members));
}
/// <summary>
/// Remove all members in a sorted set within the given indexes
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="start">Start offset</param>
/// <param name="stop">Stop offset</param>
/// <returns>Number of elements removed</returns>
public long ZRemRangeByRank(string key, long start, long stop)
{
return Write(RedisCommands.ZRemRangeByRank(key, start, stop));
}
/// <summary>
/// Remove all members in a sorted set within the given scores
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="min">Minimum score</param>
/// <param name="max">Maximum score</param>
/// <param name="exclusiveMin">Minimum score is exclusive</param>
/// <param name="exclusiveMax">Maximum score is exclusive</param>
/// <returns>Number of elements removed</returns>
public long ZRemRangeByScore(string key, decimal min, decimal max, bool exclusiveMin = false, bool exclusiveMax = false)
{
return Write(RedisCommands.ZRemRangeByScore(key, min, max, exclusiveMin, exclusiveMax));
}
public long ZRemRangeByScore(string key, string min, string max)
{
return Write(RedisCommands.ZRemRangeByScore(key, min, max));
}
/// <summary>
/// Return a range of members in a sorted set, by index, with scores ordered from high to low
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="start">Start offset</param>
/// <param name="stop">Stop offset</param>
/// <param name="withScores">Include scores in result</param>
/// <returns>List of elements in the specified range (with optional scores)</returns>
public string[] ZRevRange(string key, long start, long stop, bool withScores = false)
{
return Write(RedisCommands.ZRevRange(key, start, stop, withScores));
}
public byte[][] ZRevRangeBytes(string key, long start, long stop, bool withScores = false)
{
return Write(RedisCommands.ZRevRangeBytes(key, start, stop, withScores));
}
/// <summary>
/// Return a range of members in a sorted set, by index, with scores ordered from high to low
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="start">Start offset</param>
/// <param name="stop">Stop offset</param>
/// <returns>List of elements in the specified range (with optional scores)</returns>
public Tuple<string, decimal>[] ZRevRangeWithScores(string key, long start, long stop)
{
return Write(RedisCommands.ZRevRangeWithScores(key, start, stop));
}
public Tuple<byte[], decimal>[] ZRevRangeBytesWithScores(string key, long start, long stop)
{
return Write(RedisCommands.ZRevRangeBytesWithScores(key, start, stop));
}
/// <summary>
/// Return a range of members in a sorted set, by score, with scores ordered from high to low
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="max">Maximum score</param>
/// <param name="min">Minimum score</param>
/// <param name="withScores">Include scores in result</param>
/// <param name="exclusiveMax">Maximum score is exclusive</param>
/// <param name="exclusiveMin">Minimum score is exclusive</param>
/// <param name="offset">Start offset</param>
/// <param name="count">Number of elements to return</param>
/// <returns>List of elements in the specified score range (with optional scores)</returns>
public string[] ZRevRangeByScore(string key, decimal max, decimal min, bool withScores = false, bool exclusiveMax = false, bool exclusiveMin = false, long? offset = null, long? count = null)
{
return Write(RedisCommands.ZRevRangeByScore(key, max, min, withScores, exclusiveMax, exclusiveMin, offset, count));
}
public byte[][] ZRevRangeBytesByScore(string key, decimal max, decimal min, bool withScores = false, bool exclusiveMax = false, bool exclusiveMin = false, long? offset = null, long? count = null)
{
return Write(RedisCommands.ZRevRangeBytesByScore(key, max, min, withScores, exclusiveMax, exclusiveMin, offset, count));
}
/// <summary>
/// Return a range of members in a sorted set, by score, with scores ordered from high to low
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="max">Maximum score</param>
/// <param name="min">Minimum score</param>
/// <param name="withScores">Include scores in result</param>
/// <param name="offset">Start offset</param>
/// <param name="count">Number of elements to return</param>
/// <returns>List of elements in the specified score range (with optional scores)</returns>
public string[] ZRevRangeByScore(string key, string max, string min, bool withScores = false, long? offset = null, long? count = null)
{
return Write(RedisCommands.ZRevRangeByScore(key, max, min, withScores, offset, count));
}
public byte[][] ZRevRangeBytesByScore(string key, string max, string min, bool withScores = false, long? offset = null, long? count = null)
{
return Write(RedisCommands.ZRevRangeBytesByScore(key, max, min, withScores, offset, count));
}
/// <summary>
/// Return a range of members in a sorted set, by score, with scores ordered from high to low
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="max">Maximum score</param>
/// <param name="min">Minimum score</param>
/// <param name="exclusiveMax">Maximum score is exclusive</param>
/// <param name="exclusiveMin">Minimum score is exclusive</param>
/// <param name="offset">Start offset</param>
/// <param name="count">Number of elements to return</param>
/// <returns>List of elements in the specified score range (with optional scores)</returns>
public Tuple<string, decimal>[] ZRevRangeByScoreWithScores(string key, decimal max, decimal min, bool exclusiveMax = false, bool exclusiveMin = false, long? offset = null, long? count = null)
{
return Write(RedisCommands.ZRevRangeByScoreWithScores(key, max, min, exclusiveMax, exclusiveMin, offset, count));
}
public Tuple<byte[], decimal>[] ZRevRangeBytesByScoreWithScores(string key, decimal max, decimal min, bool exclusiveMax = false, bool exclusiveMin = false, long? offset = null, long? count = null)
{
return Write(RedisCommands.ZRevRangeBytesByScoreWithScores(key, max, min, exclusiveMax, exclusiveMin, offset, count));
}
/// <summary>
/// Return a range of members in a sorted set, by score, with scores ordered from high to low
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="max">Maximum score</param>
/// <param name="min">Minimum score</param>
/// <param name="offset">Start offset</param>
/// <param name="count">Number of elements to return</param>
/// <returns>List of elements in the specified score range (with optional scores)</returns>
public Tuple<string, decimal>[] ZRevRangeByScoreWithScores(string key, string max, string min, long? offset = null, long? count = null)
{
return Write(RedisCommands.ZRevRangeByScoreWithScores(key, max, min, offset, count));
}
public Tuple<byte[], decimal>[] ZRevRangeBytesByScoreWithScores(string key, string max, string min, long? offset = null, long? count = null)
{
return Write(RedisCommands.ZRevRangeBytesByScoreWithScores(key, max, min, offset, count));
}
/// <summary>
/// Determine the index of a member in a sorted set, with scores ordered from high to low
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="member">Member to lookup</param>
/// <returns>Rank of member, or null if member does not exist</returns>
public long? ZRevRank(string key, object member)
{
return Write(RedisCommands.ZRevRank(key, member));
}
/// <summary>
/// Get the score associated with the given member in a sorted set
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="member">Member to lookup</param>
/// <returns>Score of member, or null if member does not exist</returns>
public decimal? ZScore(string key, object member)
{
return Write(RedisCommands.ZScore(key, member));
}
/// <summary>
/// Add multiple sorted sets and store the resulting sorted set in a new key
/// </summary>
/// <param name="destination">Destination key</param>
/// <param name="weights">Multiplication factor for each input set</param>
/// <param name="aggregate">Aggregation function of resulting set</param>
/// <param name="keys">Sorted set keys to union</param>
/// <returns>Number of elements in the resulting sorted set</returns>
public long ZUnionStore(string destination, decimal[] weights = null, RedisAggregate? aggregate = null, params string[] keys)
{
return Write(RedisCommands.ZUnionStore(destination, weights, aggregate, keys));
}
/// <summary>
/// Add multiple sorted sets and store the resulting sorted set in a new key
/// </summary>
/// <param name="destination">Destination key</param>
/// <param name="keys">Sorted set keys to union</param>
/// <returns>Number of elements in the resulting sorted set</returns>
public long ZUnionStore(string destination, params string[] keys)
{
return Write(RedisCommands.ZUnionStore(destination, null, null, keys));
}
/// <summary>
/// Iterate the scores and elements of a sorted set field
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="cursor">The cursor returned by the server in the previous call, or 0 if this is the first call</param>
/// <param name="pattern">Glob-style pattern to filter returned elements</param>
/// <param name="count">Maximum number of elements to return</param>
/// <returns>Updated cursor and result set</returns>
public RedisScan<Tuple<string, decimal>> ZScan(string key, long cursor, string pattern = null, long? count = null)
{
return Write(RedisCommands.ZScan(key, cursor, pattern, count));
}
public RedisScan<Tuple<byte[], decimal>> ZScanBytes(string key, long cursor, string pattern = null, long? count = null)
{
return Write(RedisCommands.ZScanBytes(key, cursor, pattern, count));
}
/// <summary>
/// Retrieve all the elements in a sorted set with a value between min and max
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="min">Lexagraphic start value. Prefix value with '(' to indicate exclusive; '[' to indicate inclusive. Use '-' or '+' to specify infinity.</param>
/// <param name="max">Lexagraphic stop value. Prefix value with '(' to indicate exclusive; '[' to indicate inclusive. Use '-' or '+' to specify infinity.</param>
/// <param name="offset">Limit result set by offset</param>
/// <param name="count">Limimt result set by size</param>
/// <returns>List of elements in the specified range</returns>
public string[] ZRangeByLex(string key, string min, string max, long? offset = null, long? count = null)
{
return Write(RedisCommands.ZRangeByLex(key, min, max, offset, count));
}
public byte[][] ZRangeBytesByLex(string key, string min, string max, long? offset = null, long? count = null)
{
return Write(RedisCommands.ZRangeBytesByLex(key, min, max, offset, count));
}
/// <summary>
/// Remove all elements in the sorted set with a value between min and max
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="min">Lexagraphic start value. Prefix value with '(' to indicate exclusive; '[' to indicate inclusive. Use '-' or '+' to specify infinity.</param>
/// <param name="max">Lexagraphic stop value. Prefix value with '(' to indicate exclusive; '[' to indicate inclusive. Use '-' or '+' to specify infinity.</param>
/// <returns>Number of elements removed</returns>
public long ZRemRangeByLex(string key, string min, string max)
{
return Write(RedisCommands.ZRemRangeByLex(key, min, max));
}
/// <summary>
/// Returns the number of elements in the sorted set with a value between min and max.
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="min">Lexagraphic start value. Prefix value with '(' to indicate exclusive; '[' to indicate inclusive. Use '-' or '+' to specify infinity.</param>
/// <param name="max">Lexagraphic stop value. Prefix value with '(' to indicate exclusive; '[' to indicate inclusive. Use '-' or '+' to specify infinity.</param>
/// <returns>Number of elements in the specified score range</returns>
public long ZLexCount(string key, string min, string max)
{
return Write(RedisCommands.ZLexCount(key, min, max));
}
#endregion
#region Pub/Sub
/// <summary>
/// Listen for messages published to channels matching the given patterns
/// </summary>
/// <param name="channelPatterns">Patterns to subscribe</param>
public void PSubscribe(params string[] channelPatterns)
{
_subscription.Send(RedisCommands.PSubscribe(channelPatterns));
}
/// <summary>
/// Post a message to a channel
/// </summary>
/// <param name="channel">Channel to post message</param>
/// <param name="message">Message to send</param>
/// <returns>Number of clients that received the message</returns>
public long Publish(string channel, string message)
{
return Write(RedisCommands.Publish(channel, message));
}
/// <summary>
/// List the currently active channels
/// </summary>
/// <param name="pattern">Return only channels matching this pattern</param>
/// <returns>Array of channel names</returns>
public string[] PubSubChannels(string pattern = null)
{
return Write(RedisCommands.PubSubChannels(pattern));
}
/// <summary>
/// Return the number of subscribers for the specified channels
/// </summary>
/// <param name="channels">Channel names</param>
/// <returns>Array of channel/count tuples</returns>
public Tuple<string, long>[] PubSubNumSub(params string[] channels)
{
return Write(RedisCommands.PubSubNumSub(channels));
}
/// <summary>
/// Return the number of subscriptions to patterns
/// </summary>
/// <returns>Number of patterns all clients are subscribed to</returns>
public long PubSubNumPat()
{
return Write(RedisCommands.PubSubNumPat());
}
/// <summary>
/// Stop listening for messages posted to channels matching the given patterns
/// </summary>
/// <param name="channelPatterns">Patterns to unsubscribe</param>
public void PUnsubscribe(params string[] channelPatterns)
{
_subscription.Send(RedisCommands.PUnsubscribe(channelPatterns));
}
/// <summary>
/// Listen for messages published to the given channels
/// </summary>
/// <param name="channels">Channels to subscribe</param>
public void Subscribe(params string[] channels)
{
_subscription.Send(RedisCommands.Subscribe(channels));
}
/// <summary>
/// Stop listening for messages posted to the given channels
/// </summary>
/// <param name="channels">Channels to unsubscribe</param>
public void Unsubscribe(params string[] channels)
{
_subscription.Send(RedisCommands.Unsubscribe(channels));
}
#endregion
#region Scripting
/// <summary>
/// Execute a Lua script server side
/// </summary>
/// <param name="script">Script to run on server</param>
/// <param name="keys">Keys used by script</param>
/// <param name="arguments">Arguments to pass to script</param>
/// <returns>Redis object</returns>
public object Eval(string script, string[] keys, params object[] arguments)
{
return Write(RedisCommands.Eval(script, keys, arguments));
}
/// <summary>
/// Execute a Lua script server side, sending only the script's cached SHA hash
/// </summary>
/// <param name="sha1">SHA1 hash of script</param>
/// <param name="keys">Keys used by script</param>
/// <param name="arguments">Arguments to pass to script</param>
/// <returns>Redis object</returns>
public object EvalSHA(string sha1, string[] keys, params object[] arguments)
{
return Write(RedisCommands.EvalSHA(sha1, keys, arguments));
}
/// <summary>
/// Check existence of script SHA hashes in the script cache
/// </summary>
/// <param name="sha1s">SHA1 script hashes</param>
/// <returns>Array of boolean values indicating script existence on server</returns>
public bool[] ScriptExists(params string[] sha1s)
{
return Write(RedisCommands.ScriptExists(sha1s));
}
/// <summary>
/// Remove all scripts from the script cache
/// </summary>
/// <returns>Status code</returns>
public string ScriptFlush()
{
return Write(RedisCommands.ScriptFlush());
}
/// <summary>
/// Kill the script currently in execution
/// </summary>
/// <returns>Status code</returns>
public string ScriptKill()
{
return Write(RedisCommands.ScriptKill());
}
/// <summary>
/// Load the specified Lua script into the script cache
/// </summary>
/// <param name="script">Lua script to load</param>
/// <returns>SHA1 hash of script</returns>
public string ScriptLoad(string script)
{
return Write(RedisCommands.ScriptLoad(script));
}
#endregion
#region Strings
/// <summary>
/// Append a value to a key
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="value">Value to append to key</param>
/// <returns>Length of string after append</returns>
public long Append(string key, object value)
{
return Write(RedisCommands.Append(key, value));
}
/// <summary>
/// Count set bits in a string
/// </summary>
/// <param name="key">Key to check</param>
/// <param name="start">Start offset</param>
/// <param name="end">Stop offset</param>
/// <returns>Number of bits set to 1</returns>
public long BitCount(string key, long? start = null, long? end = null)
{
return Write(RedisCommands.BitCount(key, start, end));
}
/// <summary>
/// Perform bitwise operations between strings
/// </summary>
/// <param name="operation">Bit command to execute</param>
/// <param name="destKey">Store result in destination key</param>
/// <param name="keys">Keys to operate</param>
/// <returns>Size of string stored in the destination key</returns>
public long BitOp(RedisBitOp operation, string destKey, params string[] keys)
{
return Write(RedisCommands.BitOp(operation, destKey, keys));
}
/// <summary>
/// Find first bit set or clear in a string
/// </summary>
/// <param name="key">Key to examine</param>
/// <param name="bit">Bit value (1 or 0)</param>
/// <param name="start">Examine string at specified byte offset</param>
/// <param name="end">Examine string to specified byte offset</param>
/// <returns>Position of the first bit set to the specified value</returns>
public long BitPos(string key, bool bit, long? start = null, long? end = null)
{
return Write(RedisCommands.BitPos(key, bit, start, end));
}
/// <summary>
/// Decrement the integer value of a key by one
/// </summary>
/// <param name="key">Key to modify</param>
/// <returns>Value of key after decrement</returns>
public long Decr(string key)
{
return Write(RedisCommands.Decr(key));
}
/// <summary>
/// Decrement the integer value of a key by the given number
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="decrement">Decrement value</param>
/// <returns>Value of key after decrement</returns>
public long DecrBy(string key, long decrement)
{
return Write(RedisCommands.DecrBy(key, decrement));
}
/// <summary>
/// Get the value of a key
/// </summary>
/// <param name="key">Key to lookup</param>
/// <returns>Value of key</returns>
public string Get(string key)
{
return Write(RedisCommands.Get(key));
}
public byte[] GetBytes(string key)
{
return Write(RedisCommands.GetBytes(key));
}
/// <summary>
/// Returns the bit value at offset in the string value stored at key
/// </summary>
/// <param name="key">Key to lookup</param>
/// <param name="offset">Offset of key to check</param>
/// <returns>Bit value stored at offset</returns>
public bool GetBit(string key, uint offset)
{
return Write(RedisCommands.GetBit(key, offset));
}
/// <summary>
/// Get a substring of the string stored at a key
/// </summary>
/// <param name="key">Key to lookup</param>
/// <param name="start">Start offset</param>
/// <param name="end">End offset</param>
/// <returns>Substring in the specified range</returns>
public string GetRange(string key, long start, long end)
{
return Write(RedisCommands.GetRange(key, start, end));
}
public byte[] GetRangeBytes(string key, long start, long end)
{
return Write(RedisCommands.GetRangeBytes(key, start, end));
}
/// <summary>
/// Set the string value of a key and return its old value
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="value">Value to set</param>
/// <returns>Old value stored at key, or null if key did not exist</returns>
public string GetSet(string key, object value)
{
return Write(RedisCommands.GetSet(key, value));
}
public byte[] GetSetBytes(string key, object value)
{
return Write(RedisCommands.GetSetBytes(key, value));
}
/// <summary>
/// Increment the integer value of a key by one
/// </summary>
/// <param name="key">Key to modify</param>
/// <returns>Value of key after increment</returns>
public long Incr(string key)
{
return Write(RedisCommands.Incr(key));
}
/// <summary>
/// Increment the integer value of a key by the given amount
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="increment">Increment amount</param>
/// <returns>Value of key after increment</returns>
public long IncrBy(string key, long increment)
{
return Write(RedisCommands.IncrBy(key, increment));
}
/// <summary>
/// Increment the float value of a key by the given amount
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="increment">Increment amount</param>
/// <returns>Value of key after increment</returns>
public decimal IncrByFloat(string key, decimal increment)
{
return Write(RedisCommands.IncrByFloat(key, increment));
}
/// <summary>
/// Get the values of all the given keys
/// </summary>
/// <param name="keys">Keys to lookup</param>
/// <returns>Array of values at the specified keys</returns>
public string[] MGet(params string[] keys)
{
return Write(RedisCommands.MGet(keys));
}
public byte[][] MGetBytes(params string[] keys)
{
return Write(RedisCommands.MGetBytes(keys));
}
/// <summary>
/// Set multiple keys to multiple values
/// </summary>
/// <param name="keyValues">Key values to set</param>
/// <returns>Status code</returns>
public string MSet(params Tuple<string, object>[] keyValues)
{
return Write(RedisCommands.MSet(keyValues));
}
/// <summary>
/// Set multiple keys to multiple values
/// </summary>
/// <param name="keyValues">Key values to set [k1, v1, k2, v2, ..]</param>
/// <returns>Status code</returns>
public string MSet(params object[] keyValues)
{
return Write(RedisCommands.MSet(keyValues));
}
/// <summary>
/// Set multiple keys to multiple values, only if none of the keys exist
/// </summary>
/// <param name="keyValues">Key values to set</param>
/// <returns>True if all keys were set</returns>
public bool MSetNx(params Tuple<string, object>[] keyValues)
{
return Write(RedisCommands.MSetNx(keyValues));
}
/// <summary>
/// Set multiple keys to multiple values, only if none of the keys exist
/// </summary>
/// <param name="keyValues">Key values to set [k1, v1, k2, v2, ..]</param>
/// <returns>True if all keys were set</returns>
public bool MSetNx(params object[] keyValues)
{
return Write(RedisCommands.MSetNx(keyValues));
}
/// <summary>
/// Set the value and expiration in milliseconds of a key
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="milliseconds">Expiration in milliseconds</param>
/// <param name="value">Value to set</param>
/// <returns>Status code</returns>
public string PSetEx(string key, long milliseconds, object value)
{
return Write(RedisCommands.PSetEx(key, milliseconds, value));
}
/// <summary>
/// Set the string value of a key
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="value">Value to set</param>
/// <returns>Status code</returns>
public string Set(string key, object value)
{
return Write(RedisCommands.Set(key, value));
}
/// <summary>
/// Set the string value of a key with atomic expiration and existence condition
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="value">Value to set</param>
/// <param name="expiration">Set expiration to nearest millisecond</param>
/// <param name="condition">Set key if existence condition</param>
/// <returns>Status code, or null if condition not met</returns>
public string Set(string key, object value, TimeSpan expiration, RedisExistence? condition = null)
{
return Write(RedisCommands.Set(key, value, expiration, condition));
}
/// <summary>
/// Set the string value of a key with atomic expiration and existence condition
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="value">Value to set</param>
/// <param name="expirationSeconds">Set expiration to nearest second</param>
/// <param name="condition">Set key if existence condition</param>
/// <returns>Status code, or null if condition not met</returns>
public string Set(string key, object value, int? expirationSeconds = null, RedisExistence? condition = null)
{
return Write(RedisCommands.Set(key, value, expirationSeconds, condition));
}
/// <summary>
/// Set the string value of a key with atomic expiration and existence condition
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="value">Value to set</param>
/// <param name="expirationMilliseconds">Set expiration to nearest millisecond</param>
/// <param name="condition">Set key if existence condition</param>
/// <returns>Status code, or null if condition not met</returns>
public string Set(string key, object value, long? expirationMilliseconds = null, RedisExistence? condition = null)
{
return Write(RedisCommands.Set(key, value, expirationMilliseconds, condition));
}
/// <summary>
/// Sets or clears the bit at offset in the string value stored at key
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="offset">Modify key at offset</param>
/// <param name="value">Value to set (on or off)</param>
/// <returns>Original bit stored at offset</returns>
public bool SetBit(string key, uint offset, bool value)
{
return Write(RedisCommands.SetBit(key, offset, value));
}
/// <summary>
/// Set the value and expiration of a key
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="seconds">Expiration in seconds</param>
/// <param name="value">Value to set</param>
/// <returns>Status code</returns>
public string SetEx(string key, long seconds, object value)
{
return Write(RedisCommands.SetEx(key, seconds, value));
}
/// <summary>
/// Set the value of a key, only if the key does not exist
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="value">Value to set</param>
/// <returns>True if key was set</returns>
public bool SetNx(string key, object value)
{
return Write(RedisCommands.SetNx(key, value));
}
/// <summary>
/// Overwrite part of a string at key starting at the specified offset
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="offset">Start offset</param>
/// <param name="value">Value to write at offset</param>
/// <returns>Length of string after operation</returns>
public long SetRange(string key, uint offset, object value)
{
return Write(RedisCommands.SetRange(key, offset, value));
}
/// <summary>
/// Get the length of the value stored in a key
/// </summary>
/// <param name="key">Key to lookup</param>
/// <returns>Length of string at key</returns>
public long StrLen(string key)
{
return Write(RedisCommands.StrLen(key));
}
#endregion
#region Server
/// <summary>
/// Asyncronously rewrite the append-only file
/// </summary>
/// <returns>Status code</returns>
public string BgRewriteAof()
{
return Write(RedisCommands.BgRewriteAof());
}
/// <summary>
/// Asynchronously save the dataset to disk
/// </summary>
/// <returns>Status code</returns>
public string BgSave()
{
return Write(RedisCommands.BgSave());
}
/// <summary>
/// Kill the connection of a client
/// </summary>
/// <param name="ip">Client IP returned from CLIENT LIST</param>
/// <param name="port">Client port returned from CLIENT LIST</param>
/// <returns>Status code</returns>
public string ClientKill(string ip, int port)
{
return Write(RedisCommands.ClientKill(ip, port));
}
/// <summary>
/// Kill the connection of a client
/// </summary>
/// <param name="addr">client's ip:port</param>
/// <param name="id">client's unique ID</param>
/// <param name="type">client type (normal|slave|pubsub)</param>
/// <param name="skipMe">do not kill the calling client</param>
/// <returns>Nummber of clients killed</returns>
public long ClientKill(string addr = null, string id = null, string type = null, bool? skipMe = null)
{
return Write(RedisCommands.ClientKill(addr, id, type, skipMe));
}
/// <summary>
/// Get the list of client connections
/// </summary>
/// <returns>Formatted string of clients</returns>
public string ClientList()
{
return Write(RedisCommands.ClientList());
}
/// <summary>
/// Suspend all Redis clients for the specified amount of time
/// </summary>
/// <param name="milliseconds">Time to pause in milliseconds</param>
/// <returns>Status code</returns>
public string ClientPause(int milliseconds)
{
return Write(RedisCommands.ClientPause(milliseconds));
}
/// <summary>
/// Suspend all Redis clients for the specified amount of time
/// </summary>
/// <param name="timeout">Time to pause</param>
/// <returns>Status code</returns>
public string ClientPause(TimeSpan timeout)
{
return Write(RedisCommands.ClientPause(timeout));
}
/// <summary>
/// Get the current connection name
/// </summary>
/// <returns>Connection name</returns>
public string ClientGetName()
{
return Write(RedisCommands.ClientGetName());
}
/// <summary>
/// Set the current connection name
/// </summary>
/// <param name="connectionName">Name of connection (no spaces)</param>
/// <returns>Status code</returns>
public string ClientSetName(string connectionName)
{
return Write(RedisCommands.ClientSetName(connectionName));
}
/// <summary>
/// Get the value of a configuration paramter
/// </summary>
/// <param name="parameter">Configuration parameter to lookup</param>
/// <returns>Configuration value</returns>
public Tuple<string, string>[] ConfigGet(string parameter)
{
return Write(RedisCommands.ConfigGet(parameter));
}
/// <summary>
/// Reset the stats returned by INFO
/// </summary>
/// <returns>Status code</returns>
public string ConfigResetStat()
{
return Write(RedisCommands.ConfigResetStat());
}
/// <summary>
/// Rewrite the redis.conf file the server was started with, applying the minimal changes needed to make it reflect current configuration
/// </summary>
/// <returns>Status code</returns>
public string ConfigRewrite()
{
return Write(RedisCommands.ConfigRewrite());
}
/// <summary>
/// Set a configuration parameter to the given value
/// </summary>
/// <param name="parameter">Parameter to set</param>
/// <param name="value">Value to set</param>
/// <returns>Status code</returns>
public string ConfigSet(string parameter, string value)
{
return Write(RedisCommands.ConfigSet(parameter, value));
}
/// <summary>
/// Return the number of keys in the selected database
/// </summary>
/// <returns>Number of keys</returns>
public long DbSize()
{
return Write(RedisCommands.DbSize());
}
/// <summary>
/// Make the server crash :(
/// </summary>
/// <returns>Status code</returns>
public string DebugSegFault()
{
return Write(RedisCommands.DebugSegFault());
}
/// <summary>
/// Remove all keys from all databases
/// </summary>
/// <returns>Status code</returns>
public string FlushAll()
{
return Write(RedisCommands.FlushAll());
}
/// <summary>
/// Remove all keys from the current database
/// </summary>
/// <returns>Status code</returns>
public string FlushDb()
{
return Write(RedisCommands.FlushDb());
}
/// <summary>
/// Get information and statistics about the server
/// </summary>
/// <param name="section">all|default|server|clients|memory|persistence|stats|replication|cpu|commandstats|cluster|keyspace</param>
/// <returns>Formatted string</returns>
public string Info(string section = null)
{
return Write(RedisCommands.Info(section));
}
/// <summary>
/// Get the timestamp of the last successful save to disk
/// </summary>
/// <returns>Date of last save</returns>
public DateTime LastSave()
{
return Write(RedisCommands.LastSave());
}
/// <summary>
/// Listen for all requests received by the server in real time
/// </summary>
/// <returns>Status code</returns>
public string Monitor()
{
return _monitor.Start();
}
/// <summary>
/// Get role information for the current Redis instance
/// </summary>
/// <returns>RedisMasterRole|RedisSlaveRole|RedisSentinelRole</returns>
public RedisRole Role()
{
return Write(RedisCommands.Role());
}
/// <summary>
/// Syncronously save the dataset to disk
/// </summary>
/// <returns>Status code</returns>
public string Save()
{
return Write(RedisCommands.Save());
}
/// <summary>
/// Syncronously save the dataset to disk an then shut down the server
/// </summary>
/// <param name="save">Force a DB saving operation even if no save points are configured</param>
/// <returns>Status code</returns>
public string Shutdown(bool? save = null)
{
return Write(RedisCommands.Shutdown(save));
}
/// <summary>
/// Make the server a slave of another instance or promote it as master
/// </summary>
/// <param name="host">Master host</param>
/// <param name="port">master port</param>
/// <returns>Status code</returns>
public string SlaveOf(string host, int port)
{
return Write(RedisCommands.SlaveOf(host, port));
}
/// <summary>
/// Turn off replication, turning the Redis server into a master
/// </summary>
/// <returns>Status code</returns>
public string SlaveOfNoOne()
{
return Write(RedisCommands.SlaveOfNoOne());
}
/// <summary>
/// Get latest entries from the slow log
/// </summary>
/// <param name="count">Limit entries returned</param>
/// <returns>Slow log entries</returns>
public RedisSlowLogEntry[] SlowLogGet(long? count = null)
{
return Write(RedisCommands.SlowLogGet(count));
}
/// <summary>
/// Get the length of the slow log
/// </summary>
/// <returns>Slow log length</returns>
public long SlowLogLen()
{
return Write(RedisCommands.SlowLogLen());
}
/// <summary>
/// Reset the slow log
/// </summary>
/// <returns>Status code</returns>
public string SlowLogReset()
{
return Write(RedisCommands.SlowLogReset());
}
/// <summary>
/// Internal command used for replication
/// </summary>
/// <returns>Byte array of Redis sync data</returns>
public byte[] Sync()
{
return Write(RedisCommands.Sync());
}
/// <summary>
/// Return the current server time
/// </summary>
/// <returns>Server time</returns>
public DateTime Time()
{
return Write(RedisCommands.Time());
}
#endregion
#region Transactions
/// <summary>
/// Discard all commands issued after MULTI
/// </summary>
/// <returns>Status code</returns>
public string Discard()
{
string response = _transaction.Abort();
if (_connector.IsPipelined)
return _connector.EndPipe()[0].ToString();
return response;
}
/// <summary>
/// Execute all commands issued after MULTI
/// </summary>
/// <returns>Array of output from all transaction commands</returns>
public object[] Exec()
{
return _transaction.Execute();
}
/// <summary>
/// Mark the start of a transaction block
/// </summary>
/// <returns>Status code</returns>
public string Multi()
{
return _transaction.Start();
}
/// <summary>
/// Forget about all watched keys
/// </summary>
/// <returns>Status code</returns>
public string Unwatch()
{
return Write(RedisCommands.Unwatch());
}
/// <summary>
/// Watch the given keys to determine execution of the MULTI/EXEC block
/// </summary>
/// <param name="keys">Keys to watch</param>
/// <returns>Status code</returns>
public string Watch(params string[] keys)
{
return Write(RedisCommands.Watch(keys));
}
#endregion
#region HyperLogLog
/// <summary>
/// Adds the specified elements to the specified HyperLogLog.
/// </summary>
/// <param name="key">Key to update</param>
/// <param name="elements">Elements to add</param>
/// <returns>1 if at least 1 HyperLogLog internal register was altered. 0 otherwise.</returns>
public bool PfAdd(string key, params object[] elements)
{
return Write(RedisCommands.PfAdd(key, elements));
}
/// <summary>
/// Return the approximated cardinality of the set(s) observed by the HyperLogLog at key(s)
/// </summary>
/// <param name="keys">One or more HyperLogLog keys to examine</param>
/// <returns>Approximated number of unique elements observed via PFADD</returns>
public long PfCount(params string[] keys)
{
return Write(RedisCommands.PfCount(keys));
}
/// <summary>
/// Merge N different HyperLogLogs into a single key.
/// </summary>
/// <param name="destKey">Where to store the merged HyperLogLogs</param>
/// <param name="sourceKeys">The HyperLogLogs keys that will be combined</param>
/// <returns>Status code</returns>
public string PfMerge(string destKey, params string[] sourceKeys)
{
return Write(RedisCommands.PfMerge(destKey, sourceKeys));
}
#endregion
#region Geo redis-server 3.2
public long GeoAdd(string key, params (decimal longitude, decimal latitude, object member)[] values)
{
if (values == null || values.Length == 0) throw new Exception("values 参数不能为空");
var args = new List<object>();
args.Add(key);
foreach (var v in values) args.AddRange(new object[] { v.longitude, v.latitude, v.member });
return Write(new RedisInt("GEOADD", args.ToArray()));
}
public decimal? GeoDist(string key, object member1, object member2, GeoUnit unit = GeoUnit.m)
{
if (unit == GeoUnit.m) return Write(new RedisFloat.Nullable("GEODIST", key, member1, member2));
return Write(new RedisFloat.Nullable("GEODIST", key, member1, member2, unit));
}
public string[] GeoHash(string key, object[] members)
{
if (members == null || members.Length == 0) throw new Exception("values 参数不能为空");
var args = new List<object>();
args.Add(key);
args.AddRange(members);
return Write(new RedisArray.Strings("GEOHASH", args.ToArray()));
}
public (decimal longitude, decimal latitude)?[] GeoPos(string key, object[] members)
{
if (members == null || members.Length == 0) throw new Exception("values 参数不能为空");
var args = new List<object>();
args.Add(key);
args.AddRange(members);
var ret = Write(new RedisArray.Generic<decimal[]>(new RedisArray.Generic<decimal>(new RedisFloat("GEOPOS", args.ToArray()))));
return ret.Select(a => a != null && a.Length == 2 ? new (decimal, decimal)?((a[0], a[1])) : null).ToArray();
}
public (string member, decimal dist, decimal longitude, decimal latitude, long hash)[] GeoRadius(string key, decimal longitude, decimal latitude, decimal radius, GeoUnit unit = GeoUnit.m, long? count = null, GeoOrderBy? sorting = null, bool withCoord = false, bool withDist = false, bool withHash = false)
{
var args = new List<object>(new object[] { key, longitude, latitude, radius, unit });
if (withCoord) args.Add("WITHCOORD");
if (withDist) args.Add("WITHDIST");
if (withHash) args.Add("WITHHASH");
if (count.HasValue) args.AddRange(new object[] { "COUNT", count });
if (sorting.HasValue) args.Add(sorting);
var cmd = new RedisTuple.Generic<string, decimal, long, decimal[]>.Single(
new RedisString(null),
withDist == false ? null : new RedisFloat(null),
withHash == false ? null : new RedisInt(null),
withCoord == false ? null : new RedisArray.Generic<decimal>(new RedisFloat(null)), "GEORADIUS", args.ToArray());
var ret = Write(new RedisArray.Generic<Tuple<string, decimal, long, decimal[]>>(cmd));
return ret.Select(a => (a.Item1, a.Item2, a.Item4 == null ? default(decimal) : a.Item4[0], a.Item4 == null ? default(decimal) : a.Item4[1], a.Item3)).ToArray();
}
public (byte[] member, decimal dist, decimal longitude, decimal latitude, long hash)[] GeoRadiusBytes(string key, decimal longitude, decimal latitude, decimal radius, GeoUnit unit = GeoUnit.m, long? count = null, GeoOrderBy? sorting = null, bool withCoord = false, bool withDist = false, bool withHash = false)
{
var args = new List<object>(new object[] { key, longitude, latitude, radius, unit });
if (withCoord) args.Add("WITHCOORD");
if (withDist) args.Add("WITHDIST");
if (withHash) args.Add("WITHHASH");
if (count.HasValue) args.AddRange(new object[] { "COUNT", count });
if (sorting.HasValue) args.Add(sorting);
var cmd = new RedisTuple.Generic<byte[], decimal, long, decimal[]>.Single(
new RedisBytes(null),
withDist == false ? null : new RedisFloat(null),
withHash == false ? null : new RedisInt(null),
withCoord == false ? null : new RedisArray.Generic<decimal>(new RedisFloat(null)), "GEORADIUS", args.ToArray());
var ret = Write(new RedisArray.Generic<Tuple<byte[], decimal, long, decimal[]>>(cmd));
return ret.Select(a => (a.Item1, a.Item2, a.Item4 == null ? default(decimal) : a.Item4[0], a.Item4 == null ? default(decimal) : a.Item4[1], a.Item3)).ToArray();
}
public (string member, decimal dist, decimal longitude, decimal latitude, long hash)[] GeoRadiusByMember(string key, object member, decimal radius, GeoUnit unit = GeoUnit.m, long? count = null, GeoOrderBy? sorting = null, bool withCoord = false, bool withDist = false, bool withHash = false)
{
var args = new List<object>(new object[] { key, member, radius, unit });
if (withCoord) args.Add("WITHCOORD");
if (withDist) args.Add("WITHDIST");
if (withHash) args.Add("WITHHASH");
if (count.HasValue) args.AddRange(new object[] { "COUNT", count });
if (sorting.HasValue) args.Add(sorting);
var cmd = new RedisTuple.Generic<string, decimal, long, decimal[]>.Single(
new RedisString(null),
withDist == false ? null : new RedisFloat(null),
withHash == false ? null : new RedisInt(null),
withCoord == false ? null : new RedisArray.Generic<decimal>(new RedisFloat(null)), "GEORADIUSBYMEMBER", args.ToArray());
var ret = Write(new RedisArray.Generic<Tuple<string, decimal, long, decimal[]>>(cmd));
return ret.Select(a => (a.Item1, a.Item2, a.Item4 == null ? default(decimal) : a.Item4[0], a.Item4 == null ? default(decimal) : a.Item4[1], a.Item3)).ToArray();
}
public (byte[] member, decimal dist, decimal longitude, decimal latitude, long hash)[] GeoRadiusBytesByMember(string key, object member, decimal radius, GeoUnit unit = GeoUnit.m, long? count = null, GeoOrderBy? sorting = null, bool withCoord = false, bool withDist = false, bool withHash = false)
{
var args = new List<object>(new object[] { key, member, radius, unit });
if (withCoord) args.Add("WITHCOORD");
if (withDist) args.Add("WITHDIST");
if (withHash) args.Add("WITHHASH");
if (count.HasValue) args.AddRange(new object[] { "COUNT", count });
if (sorting.HasValue) args.Add(sorting);
var cmd = new RedisTuple.Generic<byte[], decimal, long, decimal[]>.Single(
new RedisBytes(null),
withDist == false ? null : new RedisFloat(null),
withHash == false ? null : new RedisInt(null),
withCoord == false ? null : new RedisArray.Generic<decimal>(new RedisFloat(null)), "GEORADIUSBYMEMBER", args.ToArray());
var ret = Write(new RedisArray.Generic<Tuple<byte[], decimal, long, decimal[]>>(cmd));
return ret.Select(a => (a.Item1, a.Item2, a.Item4 == null ? default(decimal) : a.Item4[0], a.Item4 == null ? default(decimal) : a.Item4[1], a.Item3)).ToArray();
}
#endregion
#region Streams 5.0
public long XAck(string key, string group, params string[] id) => Write(RedisCommands.XAck(key, group, id));
public string XAdd(string key, long maxLen, string id = "*", params (string, string)[] fieldValues) => Write(RedisCommands.XAdd(key, maxLen, id, fieldValues));
public (string id, string[] items)[] XClaim(string key, string group, string consumer, long minIdleTime, params string[] id) =>
Write(RedisCommands.XClaim(key, group, consumer, minIdleTime, id));
public (string id, string[] items)[] XClaim(string key, string group, string consumer, long minIdleTime, string[] id, long idle, long retryCount, bool force) =>
Write(RedisCommands.XClaim(key, group, consumer, minIdleTime, id, idle, retryCount, force));
public string[] XClaimJustId(string key, string group, string consumer, long minIdleTime, params string[] id) =>
Write(RedisCommands.XClaimJustId(key, group, consumer, minIdleTime, id));
public string[] XClaimJustId(string key, string group, string consumer, long minIdleTime, string[] id, long idle, long retryCount, bool force) =>
Write(RedisCommands.XClaimJustId(key, group, consumer, minIdleTime, id, idle, retryCount, force));
public long XDel(string key, params string[] id) => Write(RedisCommands.XDel(key, id));
public string XGroupCreate(string key, string group, string id = "$", bool MkStream = false) => Write(RedisCommands.XGroup.Create(key, group, id, MkStream));
public string XGroupSetId(string key, string group, string id = "$") => Write(RedisCommands.XGroup.SetId(key, group, id));
public bool XGroupDestroy(string key, string group) => Write(RedisCommands.XGroup.Destroy(key, group));
public bool XGroupDelConsumer(string key, string group, string consumer) => Write(RedisCommands.XGroup.DelConsumer(key, group, consumer));
public (long length, long radixTreeKeys, long radixTreeNodes, long groups, string lastGeneratedId, (string id, string[] items) firstEntry, (string id, string[] items) lastEntry) XInfoStream(string key) => Write(RedisCommands.XInfoStream(key));
public (string name, long consumers, long pending, string lastDeliveredId)[] XInfoGroups(string key) => Write(RedisCommands.XInfoGroups(key));
public (string name, long pending, long idle)[] XInfoConsumers(string key, string group) => Write(RedisCommands.XInfoConsumers(key, group));
public long XLen(string key) => Write(RedisCommands.XLen(key));
public (long count, string minId, string maxId, (string consumer, long count)[] pendings) XPending(string key, string group) => Write(RedisCommands.XPending(key, group));
public (string id, string consumer, long millisecond, long transferTimes)[] XPending(string key, string group, string start, string end, long count, string consumer = null) => Write(RedisCommands.XPending(key, group, start, end, count, consumer));
public (string id, string[] items)[] XRange(string key, string start, string end, long count = 1) => Write(RedisCommands.XRange(key, start, end, count));
public (string id, string[] items)[] XRevRange(string key, string end, string start, long count = 1) => Write(RedisCommands.XRevRange(key, end, start, count));
public (string key, (string id, string[] items)[] data)[] XRead(long count, long block, params (string key, string id)[] streams) =>
Write(RedisCommands.XRead(count, block, streams));
public (string key, (string id, string[] items)[] data)[] XReadGroup(string group, string consumer, long count, long block, params (string key, string id)[] streams) =>
Write(RedisCommands.XReadGroup(group, consumer, count, block, streams));
public long XTrim(string key, long maxLen) => Write(RedisCommands.XTrim(key, maxLen));
#endregion
}
}
|
2881099/csredis | 13,003 | src/CSRedisCore/RedisClient.cs | using System.Globalization;
using System.Threading;
using CSRedis.Internal;
using System;
using System.Text;
using System.Threading.Tasks;
using System.IO;
using CSRedis.Internal.IO;
using System.Net;
using System.Net.Sockets;
namespace CSRedis
{
/// <summary>
/// Represents a client connection to a Redis server instance
/// </summary>
public partial class RedisClient : IRedisClientSync, IRedisClientAsync
{
const int DefaultPort = 6379;
const bool DefaultSSL = false;
const int DefaultConcurrency = 1000;
const int DefaultBufferSize = 10240;
readonly RedisConnector _connector;
readonly RedisTransaction _transaction;
readonly SubscriptionListener _subscription;
readonly MonitorListener _monitor;
bool _streaming;
internal RedisReader _reader => _connector?._io?.Reader;
internal Socket Socket => (_connector?._redisSocket as RedisSocket)?._socket;
/// <summary>
/// Occurs when a subscription message is received
/// </summary>
public event EventHandler<RedisSubscriptionReceivedEventArgs> SubscriptionReceived;
/// <summary>
/// Occurs when a subscription channel is added or removed
/// </summary>
public event EventHandler<RedisSubscriptionChangedEventArgs> SubscriptionChanged;
/// <summary>
/// Occurs when a transaction command is acknowledged by the server
/// </summary>
public event EventHandler<RedisTransactionQueuedEventArgs> TransactionQueued;
/// <summary>
/// Occurs when a monitor message is received
/// </summary>
public event EventHandler<RedisMonitorEventArgs> MonitorReceived;
/// <summary>
/// Occurs when the connection has sucessfully reconnected
/// </summary>
public event EventHandler Connected;
/// <summary>
/// Get the Redis server hostname
/// </summary>
public string Host { get { return GetHost(); } }
/// <summary>
/// Get the Redis server port
/// </summary>
public int Port { get { return GetPort(); } }
/// <summary>
/// Get a value indicating whether the Redis client is connected to the server
/// </summary>
public bool IsConnected { get { return _connector.IsConnected; } }
/// <summary>
/// Get or set the string encoding used to communicate with the server
/// </summary>
public Encoding Encoding
{
get { return _connector.Encoding; }
set { _connector.Encoding = value; }
}
/// <summary>
/// Get or set the connection read timeout (milliseconds)
/// </summary>
public int ReceiveTimeout
{
get { return _connector.ReceiveTimeout; }
set { _connector.ReceiveTimeout = value; }
}
/// <summary>
/// Get or set the connection send timeout (milliseconds)
/// </summary>
public int SendTimeout
{
get { return _connector.SendTimeout; }
set { _connector.SendTimeout = value; }
}
/// <summary>
/// Get or set the number of times to attempt a reconnect after a connection fails
/// </summary>
public int ReconnectAttempts
{
get { return _connector.ReconnectAttempts; }
set { _connector.ReconnectAttempts = value; }
}
/// <summary>
/// Get or set the amount of time (milliseconds) to wait between reconnect attempts
/// </summary>
public int ReconnectWait
{
get { return _connector.ReconnectWait; }
set { _connector.ReconnectWait = value; }
}
/// <summary>
/// Create a new RedisClient using default port and encoding
/// </summary>
/// <param name="host">Redis server hostname</param>
public RedisClient(string host)
: this(host, DefaultPort)
{ }
/// <summary>
/// Create a new RedisClient
/// </summary>
/// <param name="host">Redis server hostname</param>
/// <param name="port">Redis server port</param>
public RedisClient(string host, int port)
: this(host, port, DefaultSSL)
{ }
/// <summary>
/// Create a new RedisClient
/// </summary>
/// <param name="host">Redis server hostname</param>
/// <param name="port">Redis server port</param>
/// <param name="ssl">Set to true if remote Redis server expects SSL</param>
public RedisClient(string host, int port, bool ssl)
: this(host, port, ssl, DefaultConcurrency, DefaultBufferSize)
{ }
/// <summary>
/// Create a new RedisClient
/// </summary>
/// <param name="endpoint">Redis server</param>
public RedisClient(EndPoint endpoint)
: this(endpoint, DefaultSSL)
{ }
/// <summary>
/// Create a new RedisClient
/// </summary>
/// <param name="endpoint">Redis server</param>
/// <param name="ssl">Set to true if remote Redis server expects SSL</param>
public RedisClient(EndPoint endpoint, bool ssl)
: this(endpoint, ssl, DefaultConcurrency, DefaultBufferSize)
{ }
/// <summary>
/// Create a new RedisClient with specific async concurrency settings
/// </summary>
/// <param name="host">Redis server hostname</param>
/// <param name="port">Redis server port</param>
/// <param name="asyncConcurrency">Max concurrent threads (default 1000)</param>
/// <param name="asyncBufferSize">Async thread buffer size (default 10240 bytes)</param>
public RedisClient(string host, int port, int asyncConcurrency, int asyncBufferSize)
: this(host, port, DefaultSSL, asyncConcurrency, asyncBufferSize)
{ }
/// <summary>
/// Create a new RedisClient with specific async concurrency settings
/// </summary>
/// <param name="host">Redis server hostname</param>
/// <param name="port">Redis server port</param>
/// <param name="ssl">Set to true if remote Redis server expects SSL</param>
/// <param name="asyncConcurrency">Max concurrent threads (default 1000)</param>
/// <param name="asyncBufferSize">Async thread buffer size (default 10240 bytes)</param>
public RedisClient(string host, int port, bool ssl, int asyncConcurrency, int asyncBufferSize)
: this(new DnsEndPoint(host, port), ssl, asyncConcurrency, asyncBufferSize)
{ }
/// <summary>
/// Create a new RedisClient with specific async concurrency settings
/// </summary>
/// <param name="endpoint">Redis server</param>
/// <param name="asyncConcurrency">Max concurrent threads (default 1000)</param>
/// <param name="asyncBufferSize">Async thread buffer size (default 10240 bytes)</param>
public RedisClient(EndPoint endpoint, int asyncConcurrency, int asyncBufferSize)
: this(endpoint, DefaultSSL, asyncConcurrency, asyncBufferSize)
{ }
/// <summary>
/// Create a new RedisClient with specific async concurrency settings
/// </summary>
/// <param name="endpoint">Redis server</param>
/// <param name="ssl">Set to true if remote Redis server expects SSL</param>
/// <param name="asyncConcurrency">Max concurrent threads (default 1000)</param>
/// <param name="asyncBufferSize">Async thread buffer size (default 10240 bytes)</param>
public RedisClient(EndPoint endpoint, bool ssl, int asyncConcurrency, int asyncBufferSize)
: this(new RedisSocket(ssl), endpoint, asyncConcurrency, asyncBufferSize)
{ }
internal RedisClient(IRedisSocket socket, EndPoint endpoint)
: this(socket, endpoint, DefaultConcurrency, DefaultBufferSize)
{ }
internal RedisClient(IRedisSocket socket, EndPoint endpoint, int asyncConcurrency, int asyncBufferSize)
{
// use invariant culture - we have to set it explicitly for every thread we create to
// prevent any floating-point problems (mostly because of number formats in non en-US cultures).
//CultureInfo.DefaultThreadCurrentCulture = CultureInfo.InvariantCulture; 这行会影响 string.Compare 结果
_connector = new RedisConnector(endpoint, socket, asyncConcurrency, asyncBufferSize);
_transaction = new RedisTransaction(_connector);
_subscription = new SubscriptionListener(_connector);
_monitor = new MonitorListener(_connector);
_subscription.MessageReceived += OnSubscriptionReceived;
_subscription.Changed += OnSubscriptionChanged;
_monitor.MonitorReceived += OnMonitorReceived;
_connector.Connected += OnConnectionConnected;
_transaction.TransactionQueued += OnTransactionQueued;
}
/// <summary>
/// Begin buffered pipeline mode (calls return immediately; use EndPipe() to execute batch)
/// </summary>
public void StartPipe()
{
_connector.BeginPipe();
}
/// <summary>
/// Begin buffered pipeline mode within the context of a transaction (calls return immediately; use EndPipe() to excute batch)
/// </summary>
public void StartPipeTransaction()
{
_connector.BeginPipe();
Multi();
}
/// <summary>
/// Execute pipeline commands
/// </summary>
/// <returns>Array of batched command results</returns>
public object[] EndPipe()
{
if (_transaction.Active)
return _transaction.Execute();
else
return _connector.EndPipe();
}
/// <summary>
/// Stream a BULK reply from the server using default buffer size
/// </summary>
/// <typeparam name="T">Response type</typeparam>
/// <param name="destination">Destination stream</param>
/// <param name="func">Client command to execute (BULK reply only)</param>
public void StreamTo<T>(Stream destination, Func<IRedisClientSync, T> func)
{
StreamTo(destination, DefaultBufferSize, func);
}
/// <summary>
/// Stream a BULK reply from the server
/// </summary>
/// <typeparam name="T">Response type</typeparam>
/// <param name="destination">Destination stream</param>
/// <param name="bufferSize">Size of buffer used to write server response</param>
/// <param name="func">Client command to execute (BULK reply only)</param>
public void StreamTo<T>(Stream destination, int bufferSize, Func<IRedisClientSync, T> func)
{
_streaming = true;
func(this);
_streaming = false;
_connector.Read(destination, bufferSize);
}
/// <summary>
/// Dispose all resources used by the current RedisClient
/// </summary>
public void Dispose()
{
_connector.Dispose();
}
void OnMonitorReceived(object sender, RedisMonitorEventArgs obj)
{
if (MonitorReceived != null)
MonitorReceived(this, obj);
}
void OnSubscriptionReceived(object sender, RedisSubscriptionReceivedEventArgs args)
{
if (SubscriptionReceived != null)
SubscriptionReceived(this, args);
}
void OnSubscriptionChanged(object sender, RedisSubscriptionChangedEventArgs args)
{
if (SubscriptionChanged != null)
SubscriptionChanged(this, args);
}
void OnConnectionConnected(object sender, EventArgs args)
{
if (Connected != null)
Connected(this, args);
}
void OnTransactionQueued(object sender, RedisTransactionQueuedEventArgs args)
{
if (TransactionQueued != null)
TransactionQueued(this, args);
}
string GetHost()
{
if (_connector.EndPoint is IPEndPoint)
return (_connector.EndPoint as IPEndPoint).Address.ToString();
else if (_connector.EndPoint is DnsEndPoint)
return (_connector.EndPoint as DnsEndPoint).Host;
else
return null;
}
int GetPort()
{
if (_connector.EndPoint is IPEndPoint)
return (_connector.EndPoint as IPEndPoint).Port;
else if (_connector.EndPoint is DnsEndPoint)
return (_connector.EndPoint as DnsEndPoint).Port;
else
return -1;
}
}
}
|
27182812/ChatGLM-LLaMA-chinese-insturct | 28,913 | src/transformers/models/cvt/modeling_cvt.py | # coding=utf-8
# Copyright 2022 Microsoft Research and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch CvT model."""
import collections.abc
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import ImageClassifierOutputWithNoAttention, ModelOutput
from ...modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer
from ...utils import logging
from .configuration_cvt import CvtConfig
logger = logging.get_logger(__name__)
# General docstring
_CONFIG_FOR_DOC = "CvtConfig"
# Base docstring
_CHECKPOINT_FOR_DOC = "microsoft/cvt-13"
_EXPECTED_OUTPUT_SHAPE = [1, 384, 14, 14]
# Image classification docstring
_IMAGE_CLASS_CHECKPOINT = "microsoft/cvt-13"
_IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat"
CVT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"microsoft/cvt-13",
"microsoft/cvt-13-384",
"microsoft/cvt-13-384-22k",
"microsoft/cvt-21",
"microsoft/cvt-21-384",
"microsoft/cvt-21-384-22k",
# See all Cvt models at https://huggingface.co/models?filter=cvt
]
@dataclass
class BaseModelOutputWithCLSToken(ModelOutput):
"""
Base class for model's outputs, with potential hidden states and attentions.
Args:
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
cls_token_value (`torch.FloatTensor` of shape `(batch_size, 1, hidden_size)`):
Classification token at the output of the last layer of the model.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer
plus the initial embedding outputs.
"""
last_hidden_state: torch.FloatTensor = None
cls_token_value: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
# Copied from transformers.models.beit.modeling_beit.drop_path
def drop_path(input, drop_prob: float = 0.0, training: bool = False):
"""
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
argument.
"""
if drop_prob == 0.0 or not training:
return input
keep_prob = 1 - drop_prob
shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
random_tensor.floor_() # binarize
output = input.div(keep_prob) * random_tensor
return output
# Copied from transformers.models.beit.modeling_beit.BeitDropPath
class CvtDropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
def __init__(self, drop_prob: Optional[float] = None) -> None:
super().__init__()
self.drop_prob = drop_prob
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
return drop_path(hidden_states, self.drop_prob, self.training)
def extra_repr(self) -> str:
return "p={}".format(self.drop_prob)
class CvtEmbeddings(nn.Module):
"""
Construct the CvT embeddings.
"""
def __init__(self, patch_size, num_channels, embed_dim, stride, padding, dropout_rate):
super().__init__()
self.convolution_embeddings = CvtConvEmbeddings(
patch_size=patch_size, num_channels=num_channels, embed_dim=embed_dim, stride=stride, padding=padding
)
self.dropout = nn.Dropout(dropout_rate)
def forward(self, pixel_values):
hidden_state = self.convolution_embeddings(pixel_values)
hidden_state = self.dropout(hidden_state)
return hidden_state
class CvtConvEmbeddings(nn.Module):
"""
Image to Conv Embedding.
"""
def __init__(self, patch_size, num_channels, embed_dim, stride, padding):
super().__init__()
patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
self.patch_size = patch_size
self.projection = nn.Conv2d(num_channels, embed_dim, kernel_size=patch_size, stride=stride, padding=padding)
self.normalization = nn.LayerNorm(embed_dim)
def forward(self, pixel_values):
pixel_values = self.projection(pixel_values)
batch_size, num_channels, height, width = pixel_values.shape
hidden_size = height * width
# rearrange "b c h w -> b (h w) c"
pixel_values = pixel_values.view(batch_size, num_channels, hidden_size).permute(0, 2, 1)
if self.normalization:
pixel_values = self.normalization(pixel_values)
# rearrange "b (h w) c" -> b c h w"
pixel_values = pixel_values.permute(0, 2, 1).view(batch_size, num_channels, height, width)
return pixel_values
class CvtSelfAttentionConvProjection(nn.Module):
def __init__(self, embed_dim, kernel_size, padding, stride):
super().__init__()
self.convolution = nn.Conv2d(
embed_dim,
embed_dim,
kernel_size=kernel_size,
padding=padding,
stride=stride,
bias=False,
groups=embed_dim,
)
self.normalization = nn.BatchNorm2d(embed_dim)
def forward(self, hidden_state):
hidden_state = self.convolution(hidden_state)
hidden_state = self.normalization(hidden_state)
return hidden_state
class CvtSelfAttentionLinearProjection(nn.Module):
def forward(self, hidden_state):
batch_size, num_channels, height, width = hidden_state.shape
hidden_size = height * width
# rearrange " b c h w -> b (h w) c"
hidden_state = hidden_state.view(batch_size, num_channels, hidden_size).permute(0, 2, 1)
return hidden_state
class CvtSelfAttentionProjection(nn.Module):
def __init__(self, embed_dim, kernel_size, padding, stride, projection_method="dw_bn"):
super().__init__()
if projection_method == "dw_bn":
self.convolution_projection = CvtSelfAttentionConvProjection(embed_dim, kernel_size, padding, stride)
self.linear_projection = CvtSelfAttentionLinearProjection()
def forward(self, hidden_state):
hidden_state = self.convolution_projection(hidden_state)
hidden_state = self.linear_projection(hidden_state)
return hidden_state
class CvtSelfAttention(nn.Module):
def __init__(
self,
num_heads,
embed_dim,
kernel_size,
padding_q,
padding_kv,
stride_q,
stride_kv,
qkv_projection_method,
qkv_bias,
attention_drop_rate,
with_cls_token=True,
**kwargs,
):
super().__init__()
self.scale = embed_dim**-0.5
self.with_cls_token = with_cls_token
self.embed_dim = embed_dim
self.num_heads = num_heads
self.convolution_projection_query = CvtSelfAttentionProjection(
embed_dim,
kernel_size,
padding_q,
stride_q,
projection_method="linear" if qkv_projection_method == "avg" else qkv_projection_method,
)
self.convolution_projection_key = CvtSelfAttentionProjection(
embed_dim, kernel_size, padding_kv, stride_kv, projection_method=qkv_projection_method
)
self.convolution_projection_value = CvtSelfAttentionProjection(
embed_dim, kernel_size, padding_kv, stride_kv, projection_method=qkv_projection_method
)
self.projection_query = nn.Linear(embed_dim, embed_dim, bias=qkv_bias)
self.projection_key = nn.Linear(embed_dim, embed_dim, bias=qkv_bias)
self.projection_value = nn.Linear(embed_dim, embed_dim, bias=qkv_bias)
self.dropout = nn.Dropout(attention_drop_rate)
def rearrange_for_multi_head_attention(self, hidden_state):
batch_size, hidden_size, _ = hidden_state.shape
head_dim = self.embed_dim // self.num_heads
# rearrange 'b t (h d) -> b h t d'
return hidden_state.view(batch_size, hidden_size, self.num_heads, head_dim).permute(0, 2, 1, 3)
def forward(self, hidden_state, height, width):
if self.with_cls_token:
cls_token, hidden_state = torch.split(hidden_state, [1, height * width], 1)
batch_size, hidden_size, num_channels = hidden_state.shape
# rearrange "b (h w) c -> b c h w"
hidden_state = hidden_state.permute(0, 2, 1).view(batch_size, num_channels, height, width)
key = self.convolution_projection_key(hidden_state)
query = self.convolution_projection_query(hidden_state)
value = self.convolution_projection_value(hidden_state)
if self.with_cls_token:
query = torch.cat((cls_token, query), dim=1)
key = torch.cat((cls_token, key), dim=1)
value = torch.cat((cls_token, value), dim=1)
head_dim = self.embed_dim // self.num_heads
query = self.rearrange_for_multi_head_attention(self.projection_query(query))
key = self.rearrange_for_multi_head_attention(self.projection_key(key))
value = self.rearrange_for_multi_head_attention(self.projection_value(value))
attention_score = torch.einsum("bhlk,bhtk->bhlt", [query, key]) * self.scale
attention_probs = torch.nn.functional.softmax(attention_score, dim=-1)
attention_probs = self.dropout(attention_probs)
context = torch.einsum("bhlt,bhtv->bhlv", [attention_probs, value])
# rearrange"b h t d -> b t (h d)"
_, _, hidden_size, _ = context.shape
context = context.permute(0, 2, 1, 3).contiguous().view(batch_size, hidden_size, self.num_heads * head_dim)
return context
class CvtSelfOutput(nn.Module):
"""
The residual connection is defined in CvtLayer instead of here (as is the case with other models), due to the
layernorm applied before each block.
"""
def __init__(self, embed_dim, drop_rate):
super().__init__()
self.dense = nn.Linear(embed_dim, embed_dim)
self.dropout = nn.Dropout(drop_rate)
def forward(self, hidden_state, input_tensor):
hidden_state = self.dense(hidden_state)
hidden_state = self.dropout(hidden_state)
return hidden_state
class CvtAttention(nn.Module):
def __init__(
self,
num_heads,
embed_dim,
kernel_size,
padding_q,
padding_kv,
stride_q,
stride_kv,
qkv_projection_method,
qkv_bias,
attention_drop_rate,
drop_rate,
with_cls_token=True,
):
super().__init__()
self.attention = CvtSelfAttention(
num_heads,
embed_dim,
kernel_size,
padding_q,
padding_kv,
stride_q,
stride_kv,
qkv_projection_method,
qkv_bias,
attention_drop_rate,
with_cls_token,
)
self.output = CvtSelfOutput(embed_dim, drop_rate)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.attention.query = prune_linear_layer(self.attention.query, index)
self.attention.key = prune_linear_layer(self.attention.key, index)
self.attention.value = prune_linear_layer(self.attention.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads)
self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, hidden_state, height, width):
self_output = self.attention(hidden_state, height, width)
attention_output = self.output(self_output, hidden_state)
return attention_output
class CvtIntermediate(nn.Module):
def __init__(self, embed_dim, mlp_ratio):
super().__init__()
self.dense = nn.Linear(embed_dim, int(embed_dim * mlp_ratio))
self.activation = nn.GELU()
def forward(self, hidden_state):
hidden_state = self.dense(hidden_state)
hidden_state = self.activation(hidden_state)
return hidden_state
class CvtOutput(nn.Module):
def __init__(self, embed_dim, mlp_ratio, drop_rate):
super().__init__()
self.dense = nn.Linear(int(embed_dim * mlp_ratio), embed_dim)
self.dropout = nn.Dropout(drop_rate)
def forward(self, hidden_state, input_tensor):
hidden_state = self.dense(hidden_state)
hidden_state = self.dropout(hidden_state)
hidden_state = hidden_state + input_tensor
return hidden_state
class CvtLayer(nn.Module):
"""
CvtLayer composed by attention layers, normalization and multi-layer perceptrons (mlps).
"""
def __init__(
self,
num_heads,
embed_dim,
kernel_size,
padding_q,
padding_kv,
stride_q,
stride_kv,
qkv_projection_method,
qkv_bias,
attention_drop_rate,
drop_rate,
mlp_ratio,
drop_path_rate,
with_cls_token=True,
):
super().__init__()
self.attention = CvtAttention(
num_heads,
embed_dim,
kernel_size,
padding_q,
padding_kv,
stride_q,
stride_kv,
qkv_projection_method,
qkv_bias,
attention_drop_rate,
drop_rate,
with_cls_token,
)
self.intermediate = CvtIntermediate(embed_dim, mlp_ratio)
self.output = CvtOutput(embed_dim, mlp_ratio, drop_rate)
self.drop_path = CvtDropPath(drop_prob=drop_path_rate) if drop_path_rate > 0.0 else nn.Identity()
self.layernorm_before = nn.LayerNorm(embed_dim)
self.layernorm_after = nn.LayerNorm(embed_dim)
def forward(self, hidden_state, height, width):
self_attention_output = self.attention(
self.layernorm_before(hidden_state), # in Cvt, layernorm is applied before self-attention
height,
width,
)
attention_output = self_attention_output
attention_output = self.drop_path(attention_output)
# first residual connection
hidden_state = attention_output + hidden_state
# in Cvt, layernorm is also applied after self-attention
layer_output = self.layernorm_after(hidden_state)
layer_output = self.intermediate(layer_output)
# second residual connection is done here
layer_output = self.output(layer_output, hidden_state)
layer_output = self.drop_path(layer_output)
return layer_output
class CvtStage(nn.Module):
def __init__(self, config, stage):
super().__init__()
self.config = config
self.stage = stage
if self.config.cls_token[self.stage]:
self.cls_token = nn.Parameter(torch.randn(1, 1, self.config.embed_dim[-1]))
self.embedding = CvtEmbeddings(
patch_size=config.patch_sizes[self.stage],
stride=config.patch_stride[self.stage],
num_channels=config.num_channels if self.stage == 0 else config.embed_dim[self.stage - 1],
embed_dim=config.embed_dim[self.stage],
padding=config.patch_padding[self.stage],
dropout_rate=config.drop_rate[self.stage],
)
drop_path_rates = [x.item() for x in torch.linspace(0, config.drop_path_rate[self.stage], config.depth[stage])]
self.layers = nn.Sequential(
*[
CvtLayer(
num_heads=config.num_heads[self.stage],
embed_dim=config.embed_dim[self.stage],
kernel_size=config.kernel_qkv[self.stage],
padding_q=config.padding_q[self.stage],
padding_kv=config.padding_kv[self.stage],
stride_kv=config.stride_kv[self.stage],
stride_q=config.stride_q[self.stage],
qkv_projection_method=config.qkv_projection_method[self.stage],
qkv_bias=config.qkv_bias[self.stage],
attention_drop_rate=config.attention_drop_rate[self.stage],
drop_rate=config.drop_rate[self.stage],
drop_path_rate=drop_path_rates[self.stage],
mlp_ratio=config.mlp_ratio[self.stage],
with_cls_token=config.cls_token[self.stage],
)
for _ in range(config.depth[self.stage])
]
)
def forward(self, hidden_state):
cls_token = None
hidden_state = self.embedding(hidden_state)
batch_size, num_channels, height, width = hidden_state.shape
# rearrange b c h w -> b (h w) c"
hidden_state = hidden_state.view(batch_size, num_channels, height * width).permute(0, 2, 1)
if self.config.cls_token[self.stage]:
cls_token = self.cls_token.expand(batch_size, -1, -1)
hidden_state = torch.cat((cls_token, hidden_state), dim=1)
for layer in self.layers:
layer_outputs = layer(hidden_state, height, width)
hidden_state = layer_outputs
if self.config.cls_token[self.stage]:
cls_token, hidden_state = torch.split(hidden_state, [1, height * width], 1)
hidden_state = hidden_state.permute(0, 2, 1).view(batch_size, num_channels, height, width)
return hidden_state, cls_token
class CvtEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.stages = nn.ModuleList([])
for stage_idx in range(len(config.depth)):
self.stages.append(CvtStage(config, stage_idx))
def forward(self, pixel_values, output_hidden_states=False, return_dict=True):
all_hidden_states = () if output_hidden_states else None
hidden_state = pixel_values
cls_token = None
for _, (stage_module) in enumerate(self.stages):
hidden_state, cls_token = stage_module(hidden_state)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, cls_token, all_hidden_states] if v is not None)
return BaseModelOutputWithCLSToken(
last_hidden_state=hidden_state,
cls_token_value=cls_token,
hidden_states=all_hidden_states,
)
class CvtPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = CvtConfig
base_model_prefix = "cvt"
main_input_name = "pixel_values"
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Conv2d)):
module.weight.data = nn.init.trunc_normal_(module.weight.data, mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, CvtStage):
if self.config.cls_token[module.stage]:
module.cls_token.data = nn.init.trunc_normal_(
torch.zeros(1, 1, self.config.embed_dim[-1]), mean=0.0, std=self.config.initializer_range
)
CVT_START_DOCSTRING = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`CvtConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
CVT_INPUTS_DOCSTRING = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`CvtImageProcessor.__call__`]
for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare Cvt Model transformer outputting raw hidden-states without any specific head on top.",
CVT_START_DOCSTRING,
)
class CvtModel(CvtPreTrainedModel):
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.encoder = CvtEncoder(config)
self.post_init()
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(CVT_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutputWithCLSToken,
config_class=_CONFIG_FOR_DOC,
modality="vision",
expected_output=_EXPECTED_OUTPUT_SHAPE,
)
def forward(
self,
pixel_values: Optional[torch.Tensor] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BaseModelOutputWithCLSToken]:
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
encoder_outputs = self.encoder(
pixel_values,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
if not return_dict:
return (sequence_output,) + encoder_outputs[1:]
return BaseModelOutputWithCLSToken(
last_hidden_state=sequence_output,
cls_token_value=encoder_outputs.cls_token_value,
hidden_states=encoder_outputs.hidden_states,
)
@add_start_docstrings(
"""
Cvt Model transformer with an image classification head on top (a linear layer on top of the final hidden state of
the [CLS] token) e.g. for ImageNet.
""",
CVT_START_DOCSTRING,
)
class CvtForImageClassification(CvtPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.cvt = CvtModel(config, add_pooling_layer=False)
self.layernorm = nn.LayerNorm(config.embed_dim[-1])
# Classifier head
self.classifier = (
nn.Linear(config.embed_dim[-1], config.num_labels) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(CVT_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT,
output_type=ImageClassifierOutputWithNoAttention,
config_class=_CONFIG_FOR_DOC,
expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
)
def forward(
self,
pixel_values: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.cvt(
pixel_values,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
cls_token = outputs[1]
if self.config.cls_token[-1]:
sequence_output = self.layernorm(cls_token)
else:
batch_size, num_channels, height, width = sequence_output.shape
# rearrange "b c h w -> b (h w) c"
sequence_output = sequence_output.view(batch_size, num_channels, height * width).permute(0, 2, 1)
sequence_output = self.layernorm(sequence_output)
sequence_output_mean = sequence_output.mean(dim=1)
logits = self.classifier(sequence_output_mean)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.config.num_labels == 1:
self.config.problem_type = "regression"
elif self.config.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.config.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states)
|
27182812/ChatGLM-LLaMA-chinese-insturct | 6,860 | src/transformers/models/cvt/configuration_cvt.py | # coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" CvT model configuration"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)
CVT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"microsoft/cvt-13": "https://huggingface.co/microsoft/cvt-13/resolve/main/config.json",
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class CvtConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`CvtModel`]. It is used to instantiate a CvT model
according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the CvT
[microsoft/cvt-13](https://huggingface.co/microsoft/cvt-13) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
patch_sizes (`List[int]`, *optional*, defaults to `[7, 3, 3]`):
The kernel size of each encoder's patch embedding.
patch_stride (`List[int]`, *optional*, defaults to `[4, 2, 2]`):
The stride size of each encoder's patch embedding.
patch_padding (`List[int]`, *optional*, defaults to `[2, 1, 1]`):
The padding size of each encoder's patch embedding.
embed_dim (`List[int]`, *optional*, defaults to `[64, 192, 384]`):
Dimension of each of the encoder blocks.
num_heads (`List[int]`, *optional*, defaults to `[1, 3, 6]`):
Number of attention heads for each attention layer in each block of the Transformer encoder.
depth (`List[int]`, *optional*, defaults to `[1, 2, 10]`):
The number of layers in each encoder block.
mlp_ratios (`List[float]`, *optional*, defaults to `[4.0, 4.0, 4.0, 4.0]`):
Ratio of the size of the hidden layer compared to the size of the input layer of the Mix FFNs in the
encoder blocks.
attention_drop_rate (`List[float]`, *optional*, defaults to `[0.0, 0.0, 0.0]`):
The dropout ratio for the attention probabilities.
drop_rate (`List[float]`, *optional*, defaults to `[0.0, 0.0, 0.0]`):
The dropout ratio for the patch embeddings probabilities.
drop_path_rate (`List[float]`, *optional*, defaults to `[0.0, 0.0, 0.1]`):
The dropout probability for stochastic depth, used in the blocks of the Transformer encoder.
qkv_bias (`List[bool]`, *optional*, defaults to `[True, True, True]`):
The bias bool for query, key and value in attentions
cls_token (`List[bool]`, *optional*, defaults to `[False, False, True]`):
Whether or not to add a classification token to the output of each of the last 3 stages.
qkv_projection_method (`List[string]`, *optional*, defaults to ["dw_bn", "dw_bn", "dw_bn"]`):
The projection method for query, key and value Default is depth-wise convolutions with batch norm. For
Linear projection use "avg".
kernel_qkv (`List[int]`, *optional*, defaults to `[3, 3, 3]`):
The kernel size for query, key and value in attention layer
padding_kv (`List[int]`, *optional*, defaults to `[1, 1, 1]`):
The padding size for key and value in attention layer
stride_kv (`List[int]`, *optional*, defaults to `[2, 2, 2]`):
The stride size for key and value in attention layer
padding_q (`List[int]`, *optional*, defaults to `[1, 1, 1]`):
The padding size for query in attention layer
stride_q (`List[int]`, *optional*, defaults to `[1, 1, 1]`):
The stride size for query in attention layer
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-6):
The epsilon used by the layer normalization layers.
Example:
```python
>>> from transformers import CvtConfig, CvtModel
>>> # Initializing a Cvt msft/cvt style configuration
>>> configuration = CvtConfig()
>>> # Initializing a model (with random weights) from the msft/cvt style configuration
>>> model = CvtModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "cvt"
def __init__(
self,
num_channels=3,
patch_sizes=[7, 3, 3],
patch_stride=[4, 2, 2],
patch_padding=[2, 1, 1],
embed_dim=[64, 192, 384],
num_heads=[1, 3, 6],
depth=[1, 2, 10],
mlp_ratio=[4.0, 4.0, 4.0],
attention_drop_rate=[0.0, 0.0, 0.0],
drop_rate=[0.0, 0.0, 0.0],
drop_path_rate=[0.0, 0.0, 0.1],
qkv_bias=[True, True, True],
cls_token=[False, False, True],
qkv_projection_method=["dw_bn", "dw_bn", "dw_bn"],
kernel_qkv=[3, 3, 3],
padding_kv=[1, 1, 1],
stride_kv=[2, 2, 2],
padding_q=[1, 1, 1],
stride_q=[1, 1, 1],
initializer_range=0.02,
layer_norm_eps=1e-12,
**kwargs,
):
super().__init__(**kwargs)
self.num_channels = num_channels
self.patch_sizes = patch_sizes
self.patch_stride = patch_stride
self.patch_padding = patch_padding
self.embed_dim = embed_dim
self.num_heads = num_heads
self.depth = depth
self.mlp_ratio = mlp_ratio
self.attention_drop_rate = attention_drop_rate
self.drop_rate = drop_rate
self.drop_path_rate = drop_path_rate
self.qkv_bias = qkv_bias
self.cls_token = cls_token
self.qkv_projection_method = qkv_projection_method
self.kernel_qkv = kernel_qkv
self.padding_kv = padding_kv
self.stride_kv = stride_kv
self.padding_q = padding_q
self.stride_q = stride_q
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
|
2881099/csredis | 91,001 | src/CSRedisCore/RedisHelper.cs | using CSRedis;
using System;
using System.Collections.Generic;
using System.Collections.Concurrent;
using System.Diagnostics;
using System.Linq;
using System.Threading;
using System.IO;
public abstract class RedisHelper : RedisHelper<RedisHelper> { }
public abstract partial class RedisHelper<TMark>
{
/// <summary>
/// 永不过期
/// </summary>
public static readonly int NeverExpired = -1;
internal static ThreadLocal<Random> rnd = new ThreadLocal<Random>(()=> new Random());//fix
/// <summary>
/// 随机秒(防止所有key同一时间过期,雪崩)
/// </summary>
/// <param name="minTimeoutSeconds">最小秒数</param>
/// <param name="maxTimeoutSeconds">最大秒数</param>
/// <returns></returns>
public static int RandomExpired(int minTimeoutSeconds, int maxTimeoutSeconds) => rnd.Value.Next(minTimeoutSeconds, maxTimeoutSeconds);
private static CSRedisClient _instance;
/// <summary>
/// CSRedisClient 静态实例,使用前请初始化
/// RedisHelper.Initialization(new CSRedis.CSRedisClient(\"127.0.0.1:6379,password=123,defaultDatabase=13,poolsize=50,ssl=false,writeBuffer=10240,prefix=key前辍\"))
/// </summary>
public static CSRedisClient Instance
{
get
{
if (_instance == null) throw new Exception("使用前请初始化 RedisHelper.Initialization(new CSRedis.CSRedisClient(\"127.0.0.1:6379,password=123,defaultDatabase=13,poolsize=50,ssl=false,writeBuffer=10240,prefix=key前辍\"));");
return _instance;
}
}
public static ConcurrentDictionary<string, RedisClientPool> Nodes => Instance.Nodes;
/// <summary>
/// 获取连接字符串指定的prefix前缀
/// </summary>
public static string Prefix => Nodes.First().Value.Prefix;
/// <summary>
/// 初始化csredis静态访问类
/// RedisHelper.Initialization(new CSRedis.CSRedisClient(\"127.0.0.1:6379,password=123,defaultDatabase=13,poolsize=50,ssl=false,writeBuffer=10240,prefix=key前辍\"))
/// </summary>
/// <param name="csredis"></param>
public static void Initialization(CSRedisClient csredis)
{
_instance = csredis;
}
#region 缓存壳
/// <summary>
/// 缓存壳
/// </summary>
/// <typeparam name="T">缓存类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="timeoutSeconds">缓存秒数</param>
/// <param name="getData">获取源数据的函数</param>
/// <returns></returns>
public static T CacheShell<T>(string key, int timeoutSeconds, Func<T> getData) => Instance.CacheShell(key, timeoutSeconds, getData);
/// <summary>
/// 缓存壳(哈希表)
/// </summary>
/// <typeparam name="T">缓存类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="field">字段</param>
/// <param name="timeoutSeconds">缓存秒数</param>
/// <param name="getData">获取源数据的函数</param>
/// <returns></returns>
public static T CacheShell<T>(string key, string field, int timeoutSeconds, Func<T> getData) => Instance.CacheShell(key, field, timeoutSeconds, getData);
/// <summary>
/// 缓存壳(哈希表),将 fields 每个元素存储到单独的缓存片,实现最大化复用
/// </summary>
/// <typeparam name="T">缓存类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="fields">字段</param>
/// <param name="timeoutSeconds">缓存秒数</param>
/// <param name="getData">获取源数据的函数,输入参数是没有缓存的 fields,返回值应该是 (field, value)[]</param>
/// <returns></returns>
public static (string key, T value)[] CacheShell<T>(string key, string[] fields, int timeoutSeconds, Func<string[], (string, T)[]> getData) => Instance.CacheShell(key, fields, timeoutSeconds, getData);
#endregion
/// <summary>
/// 创建管道传输
/// </summary>
/// <param name="handler"></param>
/// <returns></returns>
public static object[] StartPipe(Action<CSRedisClientPipe<string>> handler) => Instance.StartPipe(handler);
/// <summary>
/// 创建管道传输,打包提交如:RedisHelper.StartPipe().Set("a", "1").HSet("b", "f", "2").EndPipe();
/// </summary>
/// <returns></returns>
public static CSRedisClientPipe<string> StartPipe() => Instance.StartPipe();
#region 服务器命令
/// <summary>
/// 在所有分区节点上,执行服务器命令
/// </summary>
public static CSRedisClient.NodesServerManagerProvider NodesServerManager => Instance.NodesServerManager;
/// <summary>
/// 在指定分区节点上,执行服务器命令
/// </summary>
/// <param name="node">节点</param>
/// <returns></returns>
public static CSRedisClient.NodeServerManagerProvider NodeServerManager(string node) => Instance.NodeServerManager(node);
#endregion
#region 连接命令
/// <summary>
/// 打印字符串
/// </summary>
/// <param name="nodeKey">分区key</param>
/// <param name="message">消息</param>
/// <returns></returns>
public static string Echo(string nodeKey, string message) => Instance.Echo(nodeKey, message);
/// <summary>
/// 打印字符串
/// </summary>
/// <param name="message">消息</param>
/// <returns></returns>
public static string Echo(string message) => Instance.Echo(message);
/// <summary>
/// 查看服务是否运行
/// </summary>
/// <param name="nodeKey">分区key</param>
/// <returns></returns>
public static bool Ping(string nodeKey) => Instance.Ping(nodeKey);
/// <summary>
/// 查看服务是否运行
/// </summary>
/// <returns></returns>
public static bool Ping() => Instance.Ping();
#endregion
#region Script
/// <summary>
/// 执行脚本
/// </summary>
/// <param name="script">Lua 脚本</param>
/// <param name="key">用于定位分区节点,不含prefix前辍</param>
/// <param name="args">参数</param>
/// <returns></returns>
public static object Eval(string script, string key, params object[] args) => Instance.Eval(script, key, args);
/// <summary>
/// 执行脚本
/// </summary>
/// <param name="sha1">脚本缓存的sha1</param>
/// <param name="key">用于定位分区节点,不含prefix前辍</param>
/// <param name="args">参数</param>
/// <returns></returns>
public static object EvalSHA(string sha1, string key, params object[] args) => Instance.EvalSHA(sha1, key, args);
/// <summary>
/// 校验所有分区节点中,脚本是否已经缓存。任何分区节点未缓存sha1,都返回false。
/// </summary>
/// <param name="sha1">脚本缓存的sha1</param>
/// <returns></returns>
public static bool[] ScriptExists(params string[] sha1) => Instance.ScriptExists(sha1);
/// <summary>
/// 清除所有分区节点中,所有 Lua 脚本缓存
/// </summary>
public static void ScriptFlush() => Instance.ScriptFlush();
/// <summary>
/// 杀死所有分区节点中,当前正在运行的 Lua 脚本
/// </summary>
public static void ScriptKill() => Instance.ScriptKill();
/// <summary>
/// 在所有分区节点中,缓存脚本后返回 sha1(同样的脚本在任何服务器,缓存后的 sha1 都是相同的)
/// </summary>
/// <param name="script">Lua 脚本</param>
/// <returns></returns>
public static string ScriptLoad(string script) => Instance.ScriptLoad(script);
#endregion
#region Pub/Sub
/// <summary>
/// 用于将信息发送到指定分区节点的频道,最终消息发布格式:1|message
/// </summary>
/// <param name="channel">频道名</param>
/// <param name="message">消息文本</param>
/// <returns></returns>
public static long Publish(string channel, string message) => Instance.Publish(channel, message);
/// <summary>
/// 用于将信息发送到指定分区节点的频道,与 Publish 方法不同,不返回消息id头,即 1|
/// </summary>
/// <param name="channel">频道名</param>
/// <param name="message">消息文本</param>
/// <returns></returns>
public static long PublishNoneMessageId(string channel, string message) => Instance.PublishNoneMessageId(channel, message);
/// <summary>
/// 查看所有订阅频道
/// </summary>
/// <param name="pattern"></param>
/// <returns></returns>
public static string[] PubSubChannels(string pattern) => Instance.PubSubChannels(pattern);
/// <summary>
/// 查看所有模糊订阅端的数量<para></para>
/// 注意:分区模式下,其他客户端的订阅可能不会返回
/// </summary>
/// <returns></returns>
public static long PubSubNumPat() => Instance.PubSubNumPat();
/// <summary>
/// 查看所有订阅端的数量<para></para>
/// 注意:分区模式下,其他客户端的订阅可能不会返回
/// </summary>
/// <param name="channels">频道</param>
/// <returns></returns>
public static Dictionary<string, long> PubSubNumSub(params string[] channels) => Instance.PubSubNumSub(channels);
/// <summary>
/// 订阅,根据分区规则返回SubscribeObject,Subscribe(("chan1", msg => Console.WriteLine(msg.Body)), ("chan2", msg => Console.WriteLine(msg.Body)))
/// </summary>
/// <param name="channels">频道和接收器</param>
/// <returns>返回可停止订阅的对象</returns>
public static CSRedisClient.SubscribeObject Subscribe(params (string, Action<CSRedisClient.SubscribeMessageEventArgs>)[] channels) => Instance.Subscribe(channels);
/// <summary>
/// 模糊订阅,订阅所有分区节点(同条消息只处理一次),返回SubscribeObject,PSubscribe(new [] { "chan1*", "chan2*" }, msg => Console.WriteLine(msg.Body))
/// </summary>
/// <param name="channelPatterns">模糊频道</param>
/// <param name="pmessage">接收器</param>
/// <returns>返回可停止模糊订阅的对象</returns>
public static CSRedisClient.PSubscribeObject PSubscribe(string[] channelPatterns, Action<CSRedisClient.PSubscribePMessageEventArgs> pmessage) => Instance.PSubscribe(channelPatterns, pmessage);
#endregion
#region 使用列表实现订阅发布 lpush + blpop
/// <summary>
/// 使用lpush + blpop订阅端(多端非争抢模式),都可以收到消息
/// </summary>
/// <param name="listKey">list key(不含prefix前辍)</param>
/// <param name="clientId">订阅端标识,若重复则争抢,若唯一必然收到消息</param>
/// <param name="onMessage">接收消息委托</param>
/// <returns></returns>
public static CSRedisClient.SubscribeListBroadcastObject SubscribeListBroadcast(string listKey, string clientId, Action<string> onMessage) => Instance.SubscribeListBroadcast(listKey, clientId, onMessage);
/// <summary>
/// 使用lpush + blpop订阅端(多端争抢模式),只有一端收到消息
/// </summary>
/// <param name="listKey">list key(不含prefix前辍)</param>
/// <param name="onMessage">接收消息委托</param>
/// <returns></returns>
public static CSRedisClient.SubscribeListObject SubscribeList(string listKey, Action<string> onMessage) => Instance.SubscribeList(listKey, onMessage);
/// <summary>
/// 使用lpush + blpop订阅端(多端争抢模式),只有一端收到消息
/// </summary>
/// <param name="listKeys">支持多个 key(不含prefix前辍)</param>
/// <param name="onMessage">接收消息委托,参数1:key;参数2:消息体</param>
/// <returns></returns>
public static CSRedisClient.SubscribeListObject SubscribeList(string[] listKeys, Action<string, string> onMessage) => Instance.SubscribeList(listKeys, onMessage);
#endregion
#region HyperLogLog
/// <summary>
/// 添加指定元素到 HyperLogLog
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="elements">元素</param>
/// <returns></returns>
public static bool PfAdd<T>(string key, params T[] elements) => Instance.PfAdd(key, elements);
/// <summary>
/// 返回给定 HyperLogLog 的基数估算值<para></para>
/// 注意:分区模式下,若keys分散在多个分区节点时,将报错
/// </summary>
/// <param name="keys">不含prefix前辍</param>
/// <returns></returns>
public static long PfCount(params string[] keys) => Instance.PfCount(keys);
/// <summary>
/// 将多个 HyperLogLog 合并为一个 HyperLogLog<para></para>
/// 注意:分区模式下,若keys分散在多个分区节点时,将报错
/// </summary>
/// <param name="destKey">新的 HyperLogLog,不含prefix前辍</param>
/// <param name="sourceKeys">源 HyperLogLog,不含prefix前辍</param>
/// <returns></returns>
public static bool PfMerge(string destKey, params string[] sourceKeys) => Instance.PfMerge(destKey, sourceKeys);
#endregion
#region Sorted Set
/// <summary>
/// 向有序集合添加一个或多个成员,或者更新已存在成员的分数
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="scoreMembers">一个或多个成员分数</param>
/// <returns></returns>
public static long ZAdd(string key, params (decimal, object)[] scoreMembers) => Instance.ZAdd(key, scoreMembers);
/// <summary>
/// 获取有序集合的成员数量
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public static long ZCard(string key) => Instance.ZCard(key);
/// <summary>
/// 计算在有序集合中指定区间分数的成员数量
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="min">分数最小值 decimal.MinValue 1</param>
/// <param name="max">分数最大值 decimal.MaxValue 10</param>
/// <returns></returns>
public static long ZCount(string key, decimal min, decimal max) => Instance.ZCount(key, min, max);
/// <summary>
/// 计算在有序集合中指定区间分数的成员数量
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="min">分数最小值 -inf (1 1</param>
/// <param name="max">分数最大值 +inf (10 10</param>
/// <returns></returns>
public static long ZCount(string key, string min, string max) => Instance.ZCount(key, min, max);
/// <summary>
/// 有序集合中对指定成员的分数加上增量 increment
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="member">成员</param>
/// <param name="increment">增量值(默认=1)</param>
/// <returns></returns>
public static decimal ZIncrBy(string key, string member, decimal increment = 1) => Instance.ZIncrBy(key, member, increment);
/// <summary>
/// 计算给定的一个或多个有序集的交集,将结果集存储在新的有序集合 destination 中
/// </summary>
/// <param name="destination">新的有序集合,不含prefix前辍</param>
/// <param name="weights">使用 WEIGHTS 选项,你可以为 每个 给定有序集 分别 指定一个乘法因子。如果没有指定 WEIGHTS 选项,乘法因子默认设置为 1 。</param>
/// <param name="aggregate">Sum | Min | Max</param>
/// <param name="keys">一个或多个有序集合,不含prefix前辍</param>
/// <returns></returns>
public static long ZInterStore(string destination, decimal[] weights, RedisAggregate aggregate, params string[] keys) => Instance.ZInterStore(destination, weights, aggregate, keys);
/// <summary>
/// 通过索引区间返回有序集合成指定区间内的成员
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="start">开始位置,0表示第一个元素,-1表示最后一个元素</param>
/// <param name="stop">结束位置,0表示第一个元素,-1表示最后一个元素</param>
/// <returns></returns>
public static string[] ZRange(string key, long start, long stop) => Instance.ZRange(key, start, stop);
/// <summary>
/// 通过索引区间返回有序集合成指定区间内的成员
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="start">开始位置,0表示第一个元素,-1表示最后一个元素</param>
/// <param name="stop">结束位置,0表示第一个元素,-1表示最后一个元素</param>
/// <returns></returns>
public static T[] ZRange<T>(string key, long start, long stop) => Instance.ZRange<T>(key, start, stop);
/// <summary>
/// 通过索引区间返回有序集合成指定区间内的成员和分数
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="start">开始位置,0表示第一个元素,-1表示最后一个元素</param>
/// <param name="stop">结束位置,0表示第一个元素,-1表示最后一个元素</param>
/// <returns></returns>
public static (string member, decimal score)[] ZRangeWithScores(string key, long start, long stop) => Instance.ZRangeWithScores(key, start, stop);
/// <summary>
/// 通过索引区间返回有序集合成指定区间内的成员和分数
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="start">开始位置,0表示第一个元素,-1表示最后一个元素</param>
/// <param name="stop">结束位置,0表示第一个元素,-1表示最后一个元素</param>
/// <returns></returns>
public static (T member, decimal score)[] ZRangeWithScores<T>(string key, long start, long stop) => Instance.ZRangeWithScores<T>(key, start, stop);
/// <summary>
/// 通过分数返回有序集合指定区间内的成员
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="min">分数最小值 decimal.MinValue 1</param>
/// <param name="max">分数最大值 decimal.MaxValue 10</param>
/// <param name="limit">返回多少成员</param>
/// <param name="offset">返回条件偏移位置</param>
/// <returns></returns>
public static string[] ZRangeByScore(string key, decimal min, decimal max, long? limit = null, long offset = 0) =>
Instance.ZRangeByScore(key, min, max, limit, offset);
/// <summary>
/// 通过分数返回有序集合指定区间内的成员
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="min">分数最小值 decimal.MinValue 1</param>
/// <param name="max">分数最大值 decimal.MaxValue 10</param>
/// <param name="limit">返回多少成员</param>
/// <param name="offset">返回条件偏移位置</param>
/// <returns></returns>
public static T[] ZRangeByScore<T>(string key, decimal min, decimal max, long? limit = null, long offset = 0) =>
Instance.ZRangeByScore<T>(key, min, max, limit, offset);
/// <summary>
/// 通过分数返回有序集合指定区间内的成员
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="min">分数最小值 -inf (1 1</param>
/// <param name="max">分数最大值 +inf (10 10</param>
/// <param name="limit">返回多少成员</param>
/// <param name="offset">返回条件偏移位置</param>
/// <returns></returns>
public static string[] ZRangeByScore(string key, string min, string max, long? limit = null, long offset = 0) =>
Instance.ZRangeByScore(key, min, max, limit, offset);
/// <summary>
/// 通过分数返回有序集合指定区间内的成员
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="min">分数最小值 -inf (1 1</param>
/// <param name="max">分数最大值 +inf (10 10</param>
/// <param name="limit">返回多少成员</param>
/// <param name="offset">返回条件偏移位置</param>
/// <returns></returns>
public static T[] ZRangeByScore<T>(string key, string min, string max, long? limit = null, long offset = 0) =>
Instance.ZRangeByScore<T>(key, min, max, limit, offset);
/// <summary>
/// 通过分数返回有序集合指定区间内的成员和分数
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="min">分数最小值 decimal.MinValue 1</param>
/// <param name="max">分数最大值 decimal.MaxValue 10</param>
/// <param name="limit">返回多少成员</param>
/// <param name="offset">返回条件偏移位置</param>
/// <returns></returns>
public static (string member, decimal score)[] ZRangeByScoreWithScores(string key, decimal min, decimal max, long? limit = null, long offset = 0) =>
Instance.ZRangeByScoreWithScores(key, min, max, limit, offset);
/// <summary>
/// 通过分数返回有序集合指定区间内的成员和分数
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="min">分数最小值 decimal.MinValue 1</param>
/// <param name="max">分数最大值 decimal.MaxValue 10</param>
/// <param name="limit">返回多少成员</param>
/// <param name="offset">返回条件偏移位置</param>
/// <returns></returns>
public static (T member, decimal score)[] ZRangeByScoreWithScores<T>(string key, decimal min, decimal max, long? limit = null, long offset = 0) =>
Instance.ZRangeByScoreWithScores<T>(key, min, max, limit, offset);
/// <summary>
/// 通过分数返回有序集合指定区间内的成员和分数
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="min">分数最小值 -inf (1 1</param>
/// <param name="max">分数最大值 +inf (10 10</param>
/// <param name="limit">返回多少成员</param>
/// <param name="offset">返回条件偏移位置</param>
/// <returns></returns>
public static (string member, decimal score)[] ZRangeByScoreWithScores(string key, string min, string max, long? limit = null, long offset = 0) =>
Instance.ZRangeByScoreWithScores(key, min, max, limit, offset);
/// <summary>
/// 通过分数返回有序集合指定区间内的成员和分数
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="min">分数最小值 -inf (1 1</param>
/// <param name="max">分数最大值 +inf (10 10</param>
/// <param name="limit">返回多少成员</param>
/// <param name="offset">返回条件偏移位置</param>
/// <returns></returns>
public static (T member, decimal score)[] ZRangeByScoreWithScores<T>(string key, string min, string max, long? limit = null, long offset = 0) =>
Instance.ZRangeByScoreWithScores<T>(key, min, max, limit, offset);
/// <summary>
/// 返回有序集合中指定成员的索引
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="member">成员</param>
/// <returns></returns>
public static long? ZRank(string key, object member) => Instance.ZRank(key, member);
/// <summary>
/// 移除有序集合中的一个或多个成员
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="member">一个或多个成员</param>
/// <returns></returns>
public static long ZRem<T>(string key, params T[] member) => Instance.ZRem(key, member);
/// <summary>
/// 移除有序集合中给定的排名区间的所有成员
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="start">开始位置,0表示第一个元素,-1表示最后一个元素</param>
/// <param name="stop">结束位置,0表示第一个元素,-1表示最后一个元素</param>
/// <returns></returns>
public static long ZRemRangeByRank(string key, long start, long stop) => Instance.ZRemRangeByRank(key, start, stop);
/// <summary>
/// 移除有序集合中给定的分数区间的所有成员
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="min">分数最小值 decimal.MinValue 1</param>
/// <param name="max">分数最大值 decimal.MaxValue 10</param>
/// <returns></returns>
public static long ZRemRangeByScore(string key, decimal min, decimal max) => Instance.ZRemRangeByScore(key, min, max);
/// <summary>
/// 移除有序集合中给定的分数区间的所有成员
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="min">分数最小值 -inf (1 1</param>
/// <param name="max">分数最大值 +inf (10 10</param>
/// <returns></returns>
public static long ZRemRangeByScore(string key, string min, string max) => Instance.ZRemRangeByScore(key, min, max);
/// <summary>
/// 返回有序集中指定区间内的成员,通过索引,分数从高到底
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="start">开始位置,0表示第一个元素,-1表示最后一个元素</param>
/// <param name="stop">结束位置,0表示第一个元素,-1表示最后一个元素</param>
/// <returns></returns>
public static string[] ZRevRange(string key, long start, long stop) => Instance.ZRevRange(key, start, stop);
/// <summary>
/// 返回有序集中指定区间内的成员,通过索引,分数从高到底
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="start">开始位置,0表示第一个元素,-1表示最后一个元素</param>
/// <param name="stop">结束位置,0表示第一个元素,-1表示最后一个元素</param>
/// <returns></returns>
public static T[] ZRevRange<T>(string key, long start, long stop) => Instance.ZRevRange<T>(key, start, stop);
/// <summary>
/// 返回有序集中指定区间内的成员和分数,通过索引,分数从高到底
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="start">开始位置,0表示第一个元素,-1表示最后一个元素</param>
/// <param name="stop">结束位置,0表示第一个元素,-1表示最后一个元素</param>
/// <returns></returns>
public static (string member, decimal score)[] ZRevRangeWithScores(string key, long start, long stop) => Instance.ZRevRangeWithScores(key, start, stop);
/// <summary>
/// 返回有序集中指定区间内的成员和分数,通过索引,分数从高到底
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="start">开始位置,0表示第一个元素,-1表示最后一个元素</param>
/// <param name="stop">结束位置,0表示第一个元素,-1表示最后一个元素</param>
/// <returns></returns>
public static (T member, decimal score)[] ZRevRangeWithScores<T>(string key, long start, long stop) => Instance.ZRevRangeWithScores<T>(key, start, stop);
/// <summary>
/// 返回有序集中指定分数区间内的成员,分数从高到低排序
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="max">分数最大值 decimal.MaxValue 10</param>
/// <param name="min">分数最小值 decimal.MinValue 1</param>
/// <param name="limit">返回多少成员</param>
/// <param name="offset">返回条件偏移位置</param>
/// <returns></returns>
public static string[] ZRevRangeByScore(string key, decimal max, decimal min, long? limit = null, long? offset = 0) => Instance.ZRevRangeByScore(key, max, min, limit, offset);
/// <summary>
/// 返回有序集中指定分数区间内的成员,分数从高到低排序
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="max">分数最大值 decimal.MaxValue 10</param>
/// <param name="min">分数最小值 decimal.MinValue 1</param>
/// <param name="limit">返回多少成员</param>
/// <param name="offset">返回条件偏移位置</param>
/// <returns></returns>
public static T[] ZRevRangeByScore<T>(string key, decimal max, decimal min, long? limit = null, long offset = 0) =>
Instance.ZRevRangeByScore<T>(key, max, min, limit, offset);
/// <summary>
/// 返回有序集中指定分数区间内的成员,分数从高到低排序
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="max">分数最大值 +inf (10 10</param>
/// <param name="min">分数最小值 -inf (1 1</param>
/// <param name="limit">返回多少成员</param>
/// <param name="offset">返回条件偏移位置</param>
/// <returns></returns>
public static string[] ZRevRangeByScore(string key, string max, string min, long? limit = null, long? offset = 0) => Instance.ZRevRangeByScore(key, max, min, limit, offset);
/// <summary>
/// 返回有序集中指定分数区间内的成员,分数从高到低排序
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="max">分数最大值 +inf (10 10</param>
/// <param name="min">分数最小值 -inf (1 1</param>
/// <param name="limit">返回多少成员</param>
/// <param name="offset">返回条件偏移位置</param>
/// <returns></returns>
public static T[] ZRevRangeByScore<T>(string key, string max, string min, long? limit = null, long offset = 0) =>
Instance.ZRevRangeByScore<T>(key, max, min, limit, offset);
/// <summary>
/// 返回有序集中指定分数区间内的成员和分数,分数从高到低排序
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="max">分数最大值 decimal.MaxValue 10</param>
/// <param name="min">分数最小值 decimal.MinValue 1</param>
/// <param name="limit">返回多少成员</param>
/// <param name="offset">返回条件偏移位置</param>
/// <returns></returns>
public static (string member, decimal score)[] ZRevRangeByScoreWithScores(string key, decimal max, decimal min, long? limit = null, long offset = 0) =>
Instance.ZRevRangeByScoreWithScores(key, max, min, limit, offset);
/// <summary>
/// 返回有序集中指定分数区间内的成员和分数,分数从高到低排序
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="max">分数最大值 decimal.MaxValue 10</param>
/// <param name="min">分数最小值 decimal.MinValue 1</param>
/// <param name="limit">返回多少成员</param>
/// <param name="offset">返回条件偏移位置</param>
/// <returns></returns>
public static (T member, decimal score)[] ZRevRangeByScoreWithScores<T>(string key, decimal max, decimal min, long? limit = null, long offset = 0) =>
Instance.ZRevRangeByScoreWithScores<T>(key, max, min, limit, offset);
/// <summary>
/// 返回有序集中指定分数区间内的成员和分数,分数从高到低排序
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="max">分数最大值 +inf (10 10</param>
/// <param name="min">分数最小值 -inf (1 1</param>
/// <param name="limit">返回多少成员</param>
/// <param name="offset">返回条件偏移位置</param>
/// <returns></returns>
public static (string member, decimal score)[] ZRevRangeByScoreWithScores(string key, string max, string min, long? limit = null, long offset = 0) =>
Instance.ZRevRangeByScoreWithScores(key, max, min, limit, offset);
/// <summary>
/// 返回有序集中指定分数区间内的成员和分数,分数从高到低排序
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="max">分数最大值 +inf (10 10</param>
/// <param name="min">分数最小值 -inf (1 1</param>
/// <param name="limit">返回多少成员</param>
/// <param name="offset">返回条件偏移位置</param>
/// <returns></returns>
public static (T member, decimal score)[] ZRevRangeByScoreWithScores<T>(string key, string max, string min, long? limit = null, long offset = 0) =>
Instance.ZRevRangeByScoreWithScores<T>(key, max, min, limit, offset);
/// <summary>
/// 返回有序集合中指定成员的排名,有序集成员按分数值递减(从大到小)排序
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="member">成员</param>
/// <returns></returns>
public static long? ZRevRank(string key, object member) => Instance.ZRevRank(key, member);
/// <summary>
/// 返回有序集中,成员的分数值
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="member">成员</param>
/// <returns></returns>
public static decimal? ZScore(string key, object member) => Instance.ZScore(key, member);
/// <summary>
/// 计算给定的一个或多个有序集的并集,将结果集存储在新的有序集合 destination 中
/// </summary>
/// <param name="destination">新的有序集合,不含prefix前辍</param>
/// <param name="weights">使用 WEIGHTS 选项,你可以为 每个 给定有序集 分别 指定一个乘法因子。如果没有指定 WEIGHTS 选项,乘法因子默认设置为 1 。</param>
/// <param name="aggregate">Sum | Min | Max</param>
/// <param name="keys">一个或多个有序集合,不含prefix前辍</param>
/// <returns></returns>
public static long ZUnionStore(string destination, decimal[] weights, RedisAggregate aggregate, params string[] keys) => Instance.ZUnionStore(destination, weights, aggregate, keys);
/// <summary>
/// 迭代有序集合中的元素
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="cursor">位置</param>
/// <param name="pattern">模式</param>
/// <param name="count">数量</param>
/// <returns></returns>
public static RedisScan<(string member, decimal score)> ZScan(string key, long cursor, string pattern = null, long? count = null) =>
Instance.ZScan(key, cursor, pattern, count);
/// <summary>
/// 迭代有序集合中的元素
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="cursor">位置</param>
/// <param name="pattern">模式</param>
/// <param name="count">数量</param>
/// <returns></returns>
public static RedisScan<(T member, decimal score)> ZScan<T>(string key, long cursor, string pattern = null, long? count = null) =>
Instance.ZScan<T>(key, cursor, pattern, count);
/// <summary>
/// 当有序集合的所有成员都具有相同的分值时,有序集合的元素会根据成员的字典序来进行排序,这个命令可以返回给定的有序集合键 key 中,值介于 min 和 max 之间的成员。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="min">'(' 表示包含在范围,'[' 表示不包含在范围,'+' 正无穷大,'-' 负无限。 ZRANGEBYLEX zset - + ,命令将返回有序集合中的所有元素</param>
/// <param name="max">'(' 表示包含在范围,'[' 表示不包含在范围,'+' 正无穷大,'-' 负无限。 ZRANGEBYLEX zset - + ,命令将返回有序集合中的所有元素</param>
/// <param name="limit">返回多少成员</param>
/// <param name="offset">返回条件偏移位置</param>
/// <returns></returns>
public static string[] ZRangeByLex(string key, string min, string max, long? limit = null, long offset = 0) =>
Instance.ZRangeByLex(key, min, max, limit, offset);
/// <summary>
/// 当有序集合的所有成员都具有相同的分值时,有序集合的元素会根据成员的字典序来进行排序,这个命令可以返回给定的有序集合键 key 中,值介于 min 和 max 之间的成员。
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="min">'(' 表示包含在范围,'[' 表示不包含在范围,'+' 正无穷大,'-' 负无限。 ZRANGEBYLEX zset - + ,命令将返回有序集合中的所有元素</param>
/// <param name="max">'(' 表示包含在范围,'[' 表示不包含在范围,'+' 正无穷大,'-' 负无限。 ZRANGEBYLEX zset - + ,命令将返回有序集合中的所有元素</param>
/// <param name="limit">返回多少成员</param>
/// <param name="offset">返回条件偏移位置</param>
/// <returns></returns>
public static T[] ZRangeByLex<T>(string key, string min, string max, long? limit = null, long offset = 0) =>
Instance.ZRangeByLex<T>(key, min, max, limit, offset);
/// <summary>
/// 当有序集合的所有成员都具有相同的分值时,有序集合的元素会根据成员的字典序来进行排序,这个命令可以返回给定的有序集合键 key 中,值介于 min 和 max 之间的成员。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="min">'(' 表示包含在范围,'[' 表示不包含在范围,'+' 正无穷大,'-' 负无限。 ZRANGEBYLEX zset - + ,命令将返回有序集合中的所有元素</param>
/// <param name="max">'(' 表示包含在范围,'[' 表示不包含在范围,'+' 正无穷大,'-' 负无限。 ZRANGEBYLEX zset - + ,命令将返回有序集合中的所有元素</param>
/// <returns></returns>
public static long ZRemRangeByLex(string key, string min, string max) =>
Instance.ZRemRangeByLex(key, min, max);
/// <summary>
/// 当有序集合的所有成员都具有相同的分值时,有序集合的元素会根据成员的字典序来进行排序,这个命令可以返回给定的有序集合键 key 中,值介于 min 和 max 之间的成员。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="min">'(' 表示包含在范围,'[' 表示不包含在范围,'+' 正无穷大,'-' 负无限。 ZRANGEBYLEX zset - + ,命令将返回有序集合中的所有元素</param>
/// <param name="max">'(' 表示包含在范围,'[' 表示不包含在范围,'+' 正无穷大,'-' 负无限。 ZRANGEBYLEX zset - + ,命令将返回有序集合中的所有元素</param>
/// <returns></returns>
public static long ZLexCount(string key, string min, string max) =>
Instance.ZLexCount(key, min, max);
/// <summary>
/// [redis-server 5.0.0] 删除并返回有序集合key中的最多count个具有最高得分的成员。如未指定,count的默认值为1。指定一个大于有序集合的基数的count不会产生错误。 当返回多个元素时候,得分最高的元素将是第一个元素,然后是分数较低的元素。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="count">数量</param>
/// <returns></returns>
public static (string member, decimal score)[] ZPopMax(string key, long count) =>
Instance.ZPopMax(key, count);
/// <summary>
/// [redis-server 5.0.0] 删除并返回有序集合key中的最多count个具有最高得分的成员。如未指定,count的默认值为1。指定一个大于有序集合的基数的count不会产生错误。 当返回多个元素时候,得分最高的元素将是第一个元素,然后是分数较低的元素。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="count">数量</param>
/// <returns></returns>
public static (T member, decimal score)[] ZPopMax<T>(string key, long count) =>
Instance.ZPopMax<T>(key, count);
/// <summary>
/// [redis-server 5.0.0] 删除并返回有序集合key中的最多count个具有最低得分的成员。如未指定,count的默认值为1。指定一个大于有序集合的基数的count不会产生错误。 当返回多个元素时候,得分最低的元素将是第一个元素,然后是分数较高的元素。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="count">数量</param>
/// <returns></returns>
public static (string member, decimal score)[] ZPopMin(string key, long count) =>
Instance.ZPopMin(key, count);
/// <summary>
/// [redis-server 5.0.0] 删除并返回有序集合key中的最多count个具有最低得分的成员。如未指定,count的默认值为1。指定一个大于有序集合的基数的count不会产生错误。 当返回多个元素时候,得分最低的元素将是第一个元素,然后是分数较高的元素。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="count">数量</param>
/// <returns></returns>
public static (T member, decimal score)[] ZPopMin<T>(string key, long count) =>
Instance.ZPopMin<T>(key, count);
#endregion
#region Set
/// <summary>
/// 向集合添加一个或多个成员
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="members">一个或多个成员</param>
/// <returns></returns>
public static long SAdd<T>(string key, params T[] members) => Instance.SAdd(key, members);
/// <summary>
/// 获取集合的成员数
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public static long SCard(string key) => Instance.SCard(key);
/// <summary>
/// 返回给定所有集合的差集
/// </summary>
/// <param name="keys">不含prefix前辍</param>
/// <returns></returns>
public static string[] SDiff(params string[] keys) => Instance.SDiff(keys);
/// <summary>
/// 返回给定所有集合的差集
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="keys">不含prefix前辍</param>
/// <returns></returns>
public static T[] SDiff<T>(params string[] keys) => Instance.SDiff<T>(keys);
/// <summary>
/// 返回给定所有集合的差集并存储在 destination 中
/// </summary>
/// <param name="destination">新的无序集合,不含prefix前辍</param>
/// <param name="keys">一个或多个无序集合,不含prefix前辍</param>
/// <returns></returns>
public static long SDiffStore(string destination, params string[] keys) => Instance.SDiffStore(destination, keys);
/// <summary>
/// 返回给定所有集合的交集
/// </summary>
/// <param name="keys">不含prefix前辍</param>
/// <returns></returns>
public static string[] SInter(params string[] keys) => Instance.SInter(keys);
/// <summary>
/// 返回给定所有集合的交集
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="keys">不含prefix前辍</param>
/// <returns></returns>
public static T[] SInter<T>(params string[] keys) => Instance.SInter<T>(keys);
/// <summary>
/// 返回给定所有集合的交集并存储在 destination 中
/// </summary>
/// <param name="destination">新的无序集合,不含prefix前辍</param>
/// <param name="keys">一个或多个无序集合,不含prefix前辍</param>
/// <returns></returns>
public static long SInterStore(string destination, params string[] keys) => Instance.SInterStore(destination, keys);
/// <summary>
/// 判断 member 元素是否是集合 key 的成员
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="member">成员</param>
/// <returns></returns>
public static bool SIsMember(string key, object member) => Instance.SIsMember(key, member);
/// <summary>
/// 返回集合中的所有成员
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public static string[] SMembers(string key) => Instance.SMembers(key);
/// <summary>
/// 返回集合中的所有成员
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public static T[] SMembers<T>(string key) => Instance.SMembers<T>(key);
/// <summary>
/// 将 member 元素从 source 集合移动到 destination 集合
/// </summary>
/// <param name="source">无序集合key,不含prefix前辍</param>
/// <param name="destination">目标无序集合key,不含prefix前辍</param>
/// <param name="member">成员</param>
/// <returns></returns>
public static bool SMove(string source, string destination, object member) => Instance.SMove(source, destination, member);
/// <summary>
/// 移除并返回集合中的一个随机元素
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public static string SPop(string key) => Instance.SPop(key);
/// <summary>
/// 移除并返回集合中的一个随机元素
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public static T SPop<T>(string key) => Instance.SPop<T>(key);
/// <summary>
/// [redis-server 3.2] 移除并返回集合中的一个或多个随机元素
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="count">移除并返回的个数</param>
/// <returns></returns>
public static string[] SPop(string key, long count) => Instance.SPop(key, count);
/// <summary>
/// [redis-server 3.2] 移除并返回集合中的一个或多个随机元素
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="count">移除并返回的个数</param>
/// <returns></returns>
public static T[] SPop<T>(string key, long count) => Instance.SPop<T>(key, count);
/// <summary>
/// 返回集合中的一个随机元素
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public static string SRandMember(string key) => Instance.SRandMember(key);
/// <summary>
/// 返回集合中的一个随机元素
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public static T SRandMember<T>(string key) => Instance.SRandMember<T>(key);
/// <summary>
/// 返回集合中一个或多个随机数的元素
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="count">返回个数</param>
/// <returns></returns>
public static string[] SRandMembers(string key, int count = 1) => Instance.SRandMembers(key, count);
/// <summary>
/// 返回集合中一个或多个随机数的元素
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="count">返回个数</param>
/// <returns></returns>
public static T[] SRandMembers<T>(string key, int count = 1) => Instance.SRandMembers<T>(key, count);
/// <summary>
/// 移除集合中一个或多个成员
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="members">一个或多个成员</param>
/// <returns></returns>
public static long SRem<T>(string key, params T[] members) => Instance.SRem(key, members);
/// <summary>
/// 返回所有给定集合的并集
/// </summary>
/// <param name="keys">不含prefix前辍</param>
/// <returns></returns>
public static string[] SUnion(params string[] keys) => Instance.SUnion(keys);
/// <summary>
/// 返回所有给定集合的并集
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="keys">不含prefix前辍</param>
/// <returns></returns>
public static T[] SUnion<T>(params string[] keys) => Instance.SUnion<T>(keys);
/// <summary>
/// 所有给定集合的并集存储在 destination 集合中
/// </summary>
/// <param name="destination">新的无序集合,不含prefix前辍</param>
/// <param name="keys">一个或多个无序集合,不含prefix前辍</param>
/// <returns></returns>
public static long SUnionStore(string destination, params string[] keys) => Instance.SUnionStore(destination, keys);
/// <summary>
/// 迭代集合中的元素
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="cursor">位置</param>
/// <param name="pattern">模式</param>
/// <param name="count">数量</param>
/// <returns></returns>
public static RedisScan<string> SScan(string key, long cursor, string pattern = null, long? count = null) =>
Instance.SScan(key, cursor, pattern, count);
/// <summary>
/// 迭代集合中的元素
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="cursor">位置</param>
/// <param name="pattern">模式</param>
/// <param name="count">数量</param>
/// <returns></returns>
public static RedisScan<T> SScan<T>(string key, long cursor, string pattern = null, long? count = null) =>
Instance.SScan<T>(key, cursor, pattern, count);
#endregion
#region List
/// <summary>
/// 它是 LPOP 命令的阻塞版本,当给定列表内没有任何元素可供弹出的时候,连接将被 BLPOP 命令阻塞,直到等待超时或发现可弹出元素为止,超时返回null
/// </summary>
/// <param name="timeout">超时(秒)</param>
/// <param name="keys">一个或多个列表,不含prefix前辍</param>
/// <returns></returns>
public static (string key, string value)? BLPopWithKey(int timeout, params string[] keys) => Instance.BLPopWithKey(timeout, keys);
/// <summary>
/// 它是 LPOP 命令的阻塞版本,当给定列表内没有任何元素可供弹出的时候,连接将被 BLPOP 命令阻塞,直到等待超时或发现可弹出元素为止,超时返回null
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="timeout">超时(秒)</param>
/// <param name="keys">一个或多个列表,不含prefix前辍</param>
/// <returns></returns>
public static (string key, T value)? BLPopWithKey<T>(int timeout, params string[] keys) => Instance.BLPopWithKey<T>(timeout, keys);
/// <summary>
/// 它是 LPOP 命令的阻塞版本,当给定列表内没有任何元素可供弹出的时候,连接将被 BLPOP 命令阻塞,直到等待超时或发现可弹出元素为止,超时返回null
/// </summary>
/// <param name="timeout">超时(秒)</param>
/// <param name="keys">一个或多个列表,不含prefix前辍</param>
/// <returns></returns>
public static string BLPop(int timeout, params string[] keys) => Instance.BLPop(timeout, keys);
/// <summary>
/// 它是 LPOP 命令的阻塞版本,当给定列表内没有任何元素可供弹出的时候,连接将被 BLPOP 命令阻塞,直到等待超时或发现可弹出元素为止,超时返回null
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="timeout">超时(秒)</param>
/// <param name="keys">一个或多个列表,不含prefix前辍</param>
/// <returns></returns>
public static T BLPop<T>(int timeout, params string[] keys) => Instance.BLPop<T>(timeout, keys);
/// <summary>
/// 它是 RPOP 命令的阻塞版本,当给定列表内没有任何元素可供弹出的时候,连接将被 BRPOP 命令阻塞,直到等待超时或发现可弹出元素为止,超时返回null
/// </summary>
/// <param name="timeout">超时(秒)</param>
/// <param name="keys">一个或多个列表,不含prefix前辍</param>
/// <returns></returns>
public static (string key, string value)? BRPopWithKey(int timeout, params string[] keys) => Instance.BRPopWithKey(timeout, keys);
/// <summary>
/// 它是 RPOP 命令的阻塞版本,当给定列表内没有任何元素可供弹出的时候,连接将被 BRPOP 命令阻塞,直到等待超时或发现可弹出元素为止,超时返回null
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="timeout">超时(秒)</param>
/// <param name="keys">一个或多个列表,不含prefix前辍</param>
/// <returns></returns>
public static (string key, T value)? BRPopWithKey<T>(int timeout, params string[] keys) => Instance.BRPopWithKey<T>(timeout, keys);
/// <summary>
/// 它是 RPOP 命令的阻塞版本,当给定列表内没有任何元素可供弹出的时候,连接将被 BRPOP 命令阻塞,直到等待超时或发现可弹出元素为止,超时返回null
/// </summary>
/// <param name="timeout">超时(秒)</param>
/// <param name="keys">一个或多个列表,不含prefix前辍</param>
/// <returns></returns>
public static string BRPop(int timeout, params string[] keys) => Instance.BRPop(timeout, keys);
/// <summary>
/// 它是 RPOP 命令的阻塞版本,当给定列表内没有任何元素可供弹出的时候,连接将被 BRPOP 命令阻塞,直到等待超时或发现可弹出元素为止,超时返回null
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="timeout">超时(秒)</param>
/// <param name="keys">一个或多个列表,不含prefix前辍</param>
/// <returns></returns>
public static T BRPop<T>(int timeout, params string[] keys) => Instance.BRPop<T>(timeout, keys);
/// <summary>
/// BRPOPLPUSH 是 RPOPLPUSH 的阻塞版本,当给定列表 source 不为空时, BRPOPLPUSH 的表现和 RPOPLPUSH 一样。
/// 当列表 source 为空时, BRPOPLPUSH 命令将阻塞连接,直到等待超时,或有另一个客户端对 source 执行 LPUSH 或 RPUSH 命令为止。
/// </summary>
/// <param name="source">源key,不含prefix前辍</param>
/// <param name="destination">目标key,不含prefix前辍</param>
/// <param name="timeout">超时(秒)</param>
/// <returns></returns>
public static string BRPopLPush(string source, string destination, int timeout) => Instance.BRPopLPush(source, destination, timeout);
/// <summary>
/// BRPOPLPUSH 是 RPOPLPUSH 的阻塞版本,当给定列表 source 不为空时, BRPOPLPUSH 的表现和 RPOPLPUSH 一样。
/// 当列表 source 为空时, BRPOPLPUSH 命令将阻塞连接,直到等待超时,或有另一个客户端对 source 执行 LPUSH 或 RPUSH 命令为止。
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="source">源key,不含prefix前辍</param>
/// <param name="destination">目标key,不含prefix前辍</param>
/// <param name="timeout">超时(秒)</param>
/// <returns></returns>
public static T BRPopLPush<T>(string source, string destination, int timeout) => Instance.BRPopLPush<T>(source, destination, timeout);
/// <summary>
/// 通过索引获取列表中的元素
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="index">索引</param>
/// <returns></returns>
public static string LIndex(string key, long index) => Instance.LIndex(key, index);
/// <summary>
/// 通过索引获取列表中的元素
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="index">索引</param>
/// <returns></returns>
public static T LIndex<T>(string key, long index) => Instance.LIndex<T>(key, index);
/// <summary>
/// 在列表中的元素前面插入元素
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="pivot">列表的元素</param>
/// <param name="value">新元素</param>
/// <returns></returns>
public static long LInsertBefore(string key, object pivot, object value) => Instance.LInsertBefore(key, pivot, value);
/// <summary>
/// 在列表中的元素后面插入元素
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="pivot">列表的元素</param>
/// <param name="value">新元素</param>
/// <returns></returns>
public static long LInsertAfter(string key, object pivot, object value) => Instance.LInsertAfter(key, pivot, value);
/// <summary>
/// 获取列表长度
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public static long LLen(string key) => Instance.LLen(key);
/// <summary>
/// 移出并获取列表的第一个元素
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public static string LPop(string key) => Instance.LPop(key);
/// <summary>
/// 移出并获取列表的第一个元素
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public static T LPop<T>(string key) => Instance.LPop<T>(key);
/// <summary>
/// 将一个或多个值插入到列表头部
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="value">一个或多个值</param>
/// <returns>执行 LPUSH 命令后,列表的长度</returns>
public static long LPush<T>(string key, params T[] value) => Instance.LPush(key, value);
/// <summary>
/// 将一个值插入到已存在的列表头部
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="value">值</param>
/// <returns>执行 LPUSHX 命令后,列表的长度。</returns>
public static long LPushX(string key, object value) => Instance.LPushX(key, value);
/// <summary>
/// 获取列表指定范围内的元素
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="start">开始位置,0表示第一个元素,-1表示最后一个元素</param>
/// <param name="stop">结束位置,0表示第一个元素,-1表示最后一个元素</param>
/// <returns></returns>
public static string[] LRange(string key, long start, long stop) => Instance.LRange(key, start, stop);
/// <summary>
/// 获取列表指定范围内的元素
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="start">开始位置,0表示第一个元素,-1表示最后一个元素</param>
/// <param name="stop">结束位置,0表示第一个元素,-1表示最后一个元素</param>
/// <returns></returns>
public static T[] LRange<T>(string key, long start, long stop) => Instance.LRange<T>(key, start, stop);
/// <summary>
/// 根据参数 count 的值,移除列表中与参数 value 相等的元素
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="count">移除的数量,大于0时从表头删除数量count,小于0时从表尾删除数量-count,等于0移除所有</param>
/// <param name="value">元素</param>
/// <returns></returns>
public static long LRem(string key, long count, object value) => Instance.LRem(key, count, value);
/// <summary>
/// 通过索引设置列表元素的值
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="index">索引</param>
/// <param name="value">值</param>
/// <returns></returns>
public static bool LSet(string key, long index, object value) => Instance.LSet(key, index, value);
/// <summary>
/// 对一个列表进行修剪,让列表只保留指定区间内的元素,不在指定区间之内的元素都将被删除
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="start">开始位置,0表示第一个元素,-1表示最后一个元素</param>
/// <param name="stop">结束位置,0表示第一个元素,-1表示最后一个元素</param>
/// <returns></returns>
public static bool LTrim(string key, long start, long stop) => Instance.LTrim(key, start, stop);
/// <summary>
/// 移除并获取列表最后一个元素
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public static string RPop(string key) => Instance.RPop(key);
/// <summary>
/// 移除并获取列表最后一个元素
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public static T RPop<T>(string key) => Instance.RPop<T>(key);
/// <summary>
/// 将列表 source 中的最后一个元素(尾元素)弹出,并返回给客户端。
/// 将 source 弹出的元素插入到列表 destination ,作为 destination 列表的的头元素。
/// </summary>
/// <param name="source">源key,不含prefix前辍</param>
/// <param name="destination">目标key,不含prefix前辍</param>
/// <returns></returns>
public static string RPopLPush(string source, string destination) => Instance.RPopLPush(source, destination);
/// <summary>
/// 将列表 source 中的最后一个元素(尾元素)弹出,并返回给客户端。
/// 将 source 弹出的元素插入到列表 destination ,作为 destination 列表的的头元素。
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="source">源key,不含prefix前辍</param>
/// <param name="destination">目标key,不含prefix前辍</param>
/// <returns></returns>
public static T RPopLPush<T>(string source, string destination) => Instance.RPopLPush<T>(source, destination);
/// <summary>
/// 在列表中添加一个或多个值
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="value">一个或多个值</param>
/// <returns>执行 RPUSH 命令后,列表的长度</returns>
public static long RPush<T>(string key, params T[] value) => Instance.RPush(key, value);
/// <summary>
/// 为已存在的列表添加值
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="value">一个或多个值</param>
/// <returns>执行 RPUSHX 命令后,列表的长度</returns>
public static long RPushX(string key, object value) => Instance.RPushX(key, value);
#endregion
#region Hash
/// <summary>
/// 删除一个或多个哈希表字段
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="fields">字段</param>
/// <returns></returns>
public static long HDel(string key, params string[] fields) => Instance.HDel(key, fields);
/// <summary>
/// 查看哈希表 key 中,指定的字段是否存在
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="field">字段</param>
/// <returns></returns>
public static bool HExists(string key, string field) => Instance.HExists(key, field);
/// <summary>
/// 获取存储在哈希表中指定字段的值
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="field">字段</param>
/// <returns></returns>
public static string HGet(string key, string field) => Instance.HGet(key, field);
/// <summary>
/// 获取存储在哈希表中指定字段的值
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="field">字段</param>
/// <returns></returns>
public static T HGet<T>(string key, string field) => Instance.HGet<T>(key, field);
/// <summary>
/// 获取在哈希表中指定 key 的所有字段和值
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public static Dictionary<string, string> HGetAll(string key) => Instance.HGetAll(key);
/// <summary>
/// 获取在哈希表中指定 key 的所有字段和值
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public static Dictionary<string, T> HGetAll<T>(string key) => Instance.HGetAll<T>(key);
/// <summary>
/// 为哈希表 key 中的指定字段的整数值加上增量 increment
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="field">字段</param>
/// <param name="value">增量值(默认=1)</param>
/// <returns></returns>
public static long HIncrBy(string key, string field, long value = 1) => Instance.HIncrBy(key, field, value);
/// <summary>
/// 为哈希表 key 中的指定字段的整数值加上增量 increment
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="field">字段</param>
/// <param name="value">增量值(默认=1)</param>
/// <returns></returns>
public static decimal HIncrByFloat(string key, string field, decimal value = 1) => Instance.HIncrByFloat(key, field, value);
/// <summary>
/// 获取所有哈希表中的字段
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public static string[] HKeys(string key) => Instance.HKeys(key);
/// <summary>
/// 获取哈希表中字段的数量
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public static long HLen(string key) => Instance.HLen(key);
/// <summary>
/// 获取存储在哈希表中多个字段的值
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="fields">字段</param>
/// <returns></returns>
public static string[] HMGet(string key, params string[] fields) => Instance.HMGet(key, fields);
/// <summary>
/// 获取存储在哈希表中多个字段的值
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="fields">一个或多个字段</param>
/// <returns></returns>
public static T[] HMGet<T>(string key, params string[] fields) => Instance.HMGet<T>(key, fields);
/// <summary>
/// 同时将多个 field-value (域-值)对设置到哈希表 key 中
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="keyValues">key1 value1 [key2 value2]</param>
/// <returns></returns>
public static bool HMSet(string key, params object[] keyValues) => Instance.HMSet(key, keyValues);
/// <summary>
/// 将哈希表 key 中的字段 field 的值设为 value
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="field">字段</param>
/// <param name="value">值</param>
/// <returns>如果字段是哈希表中的一个新建字段,并且值设置成功,返回true。如果哈希表中域字段已经存在且旧值已被新值覆盖,返回false。</returns>
public static bool HSet(string key, string field, object value) => Instance.HSet(key, field, value);
/// <summary>
/// 只有在字段 field 不存在时,设置哈希表字段的值
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="field">字段</param>
/// <param name="value">值(string 或 byte[])</param>
/// <returns></returns>
public static bool HSetNx(string key, string field, object value) => Instance.HSetNx(key, field, value);
/// <summary>
/// 获取哈希表中所有值
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public static string[] HVals(string key) => Instance.HVals(key);
/// <summary>
/// 获取哈希表中所有值
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public static T[] HVals<T>(string key) => Instance.HVals<T>(key);
/// <summary>
/// 迭代哈希表中的键值对
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="cursor">位置</param>
/// <param name="pattern">模式</param>
/// <param name="count">数量</param>
/// <returns></returns>
public static RedisScan<(string field, string value)> HScan(string key, long cursor, string pattern = null, long? count = null) =>
Instance.HScan(key, cursor, pattern, count);
/// <summary>
/// 迭代哈希表中的键值对
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="cursor">位置</param>
/// <param name="pattern">模式</param>
/// <param name="count">数量</param>
/// <returns></returns>
public static RedisScan<(string field, T value)> HScan<T>(string key, long cursor, string pattern = null, long? count = null) =>
Instance.HScan<T>(key, cursor, pattern, count);
#endregion
#region String
/// <summary>
/// 如果 key 已经存在并且是一个字符串, APPEND 命令将指定的 value 追加到该 key 原来值(value)的末尾
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="value">字符串</param>
/// <returns>追加指定值之后, key 中字符串的长度</returns>
public static long Append(string key, object value) => Instance.Append(key, value);
/// <summary>
/// 计算给定位置被设置为 1 的比特位的数量
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="start">开始位置</param>
/// <param name="end">结束位置</param>
/// <returns></returns>
public static long BitCount(string key, long start, long end) => Instance.BitCount(key, start, end);
/// <summary>
/// 对一个或多个保存二进制位的字符串 key 进行位元操作,并将结果保存到 destkey 上
/// </summary>
/// <param name="op">And | Or | XOr | Not</param>
/// <param name="destKey">不含prefix前辍</param>
/// <param name="keys">不含prefix前辍</param>
/// <returns>保存到 destkey 的长度,和输入 key 中最长的长度相等</returns>
public static long BitOp(RedisBitOp op, string destKey, params string[] keys) => Instance.BitOp(op, destKey, keys);
/// <summary>
/// 对 key 所储存的值,查找范围内第一个被设置为1或者0的bit位
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="bit">查找值</param>
/// <param name="start">开始位置,-1是最后一个,-2是倒数第二个</param>
/// <param name="end">结果位置,-1是最后一个,-2是倒数第二个</param>
/// <returns>返回范围内第一个被设置为1或者0的bit位</returns>
public static long BitPos(string key, bool bit, long? start = null, long? end = null) => Instance.BitPos(key, bit, start, end);
/// <summary>
/// 获取指定 key 的值
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public static string Get(string key) => Instance.Get(key);
/// <summary>
/// 获取指定 key 的值
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public static T Get<T>(string key) => Instance.Get<T>(key);
/// <summary>
/// 获取指定 key 的值(适用大对象返回)
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="destination">读取后写入目标流中</param>
/// <param name="bufferSize">读取缓冲区</param>
public static void Get(string key, Stream destination, int bufferSize = 1024) => Instance.Get(key, destination, bufferSize);
/// <summary>
/// 对 key 所储存的值,获取指定偏移量上的位(bit)
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="offset">偏移量</param>
/// <returns></returns>
public static bool GetBit(string key, uint offset) => Instance.GetBit(key, offset);
/// <summary>
/// 返回 key 中字符串值的子字符
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="start">开始位置,0表示第一个元素,-1表示最后一个元素</param>
/// <param name="end">结束位置,0表示第一个元素,-1表示最后一个元素</param>
/// <returns></returns>
public static string GetRange(string key, long start, long end) => Instance.GetRange(key, start, end);
/// <summary>
/// 返回 key 中字符串值的子字符
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="start">开始位置,0表示第一个元素,-1表示最后一个元素</param>
/// <param name="end">结束位置,0表示第一个元素,-1表示最后一个元素</param>
/// <returns></returns>
public static T GetRange<T>(string key, long start, long end) => Instance.GetRange<T>(key, start, end);
/// <summary>
/// 将给定 key 的值设为 value ,并返回 key 的旧值(old value)
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="value">值</param>
/// <returns></returns>
public static string GetSet(string key, object value) => Instance.GetSet(key, value);
/// <summary>
/// 将给定 key 的值设为 value ,并返回 key 的旧值(old value)
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="value">值</param>
/// <returns></returns>
public static T GetSet<T>(string key, object value) => Instance.GetSet<T>(key, value);
/// <summary>
/// 将 key 所储存的值加上给定的增量值(increment)
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="value">增量值(默认=1)</param>
/// <returns></returns>
public static long IncrBy(string key, long value = 1) => Instance.IncrBy(key, value);
/// <summary>
/// 将 key 所储存的值加上给定的浮点增量值(increment)
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="value">增量值(默认=1)</param>
/// <returns></returns>
public static decimal IncrByFloat(string key, decimal value = 1) => Instance.IncrByFloat(key, value);
/// <summary>
/// 获取多个指定 key 的值(数组)
/// </summary>
/// <param name="keys">不含prefix前辍</param>
/// <returns></returns>
public static string[] MGet(params string[] keys) => Instance.MGet(keys);
/// <summary>
/// 获取多个指定 key 的值(数组)
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="keys">不含prefix前辍</param>
/// <returns></returns>
public static T[] MGet<T>(params string[] keys) => Instance.MGet<T>(keys);
/// <summary>
/// 同时设置一个或多个 key-value 对
/// </summary>
/// <param name="keyValues">key1 value1 [key2 value2]</param>
/// <returns></returns>
public static bool MSet(params object[] keyValues) => Instance.MSet(keyValues);
/// <summary>
/// 同时设置一个或多个 key-value 对,当且仅当所有给定 key 都不存在
/// </summary>
/// <param name="keyValues">key1 value1 [key2 value2]</param>
/// <returns></returns>
public static bool MSetNx(params object[] keyValues) => Instance.MSetNx(keyValues);
/// <summary>
/// 设置指定 key 的值,所有写入参数object都支持string | byte[] | 数值 | 对象
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="value">值</param>
/// <param name="expireSeconds">过期(秒单位)</param>
/// <param name="exists">Nx, Xx</param>
/// <returns></returns>
public static bool Set(string key, object value, int expireSeconds = -1, RedisExistence? exists = null) => Instance.Set(key, value, expireSeconds, exists);
public static bool Set(string key, object value, TimeSpan expire, RedisExistence? exists = null) => Instance.Set(key, value, expire, exists);
/// <summary>
/// 对 key 所储存的字符串值,设置或清除指定偏移量上的位(bit)
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="offset">偏移量</param>
/// <param name="value">值</param>
/// <returns></returns>
public static bool SetBit(string key, uint offset, bool value) => Instance.SetBit(key, offset, value);
/// <summary>
/// 只有在 key 不存在时设置 key 的值
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="value">值</param>
/// <returns></returns>
public static bool SetNx(string key, object value) => Instance.SetNx(key, value);
/// <summary>
/// 用 value 参数覆写给定 key 所储存的字符串值,从偏移量 offset 开始
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="offset">偏移量</param>
/// <param name="value">值</param>
/// <returns>被修改后的字符串长度</returns>
public static long SetRange(string key, uint offset, object value) => Instance.SetRange(key, offset, value);
/// <summary>
/// 返回 key 所储存的字符串值的长度
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public static long StrLen(string key) => Instance.StrLen(key);
#endregion
#region Key
/// <summary>
/// 用于在 key 存在时删除 key
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public static long Del(params string[] key) => Instance.Del(key);
/// <summary>
/// 序列化给定 key ,并返回被序列化的值
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public static byte[] Dump(string key) => Instance.Dump(key);
/// <summary>
/// 检查给定 key 是否存在
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public static bool Exists(string key) => Instance.Exists(key);
/// <summary>
/// [redis-server 3.0] 检查给定多个 key 是否存在
/// </summary>
/// <param name="keys">不含prefix前辍</param>
/// <returns></returns>
public static long Exists(string[] keys) => Instance.Exists(keys);
/// <summary>
/// 为给定 key 设置过期时间
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="seconds">过期秒数</param>
/// <returns></returns>
public static bool Expire(string key, int seconds) => Instance.Expire(key, seconds);
/// <summary>
/// 为给定 key 设置过期时间
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="expire">过期时间</param>
/// <returns></returns>
public static bool Expire(string key, TimeSpan expire) => Instance.Expire(key, expire);
/// <summary>
/// 为给定 key 设置过期时间
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="expire">过期时间</param>
/// <returns></returns>
public static bool ExpireAt(string key, DateTime expire) => Instance.ExpireAt(key, expire);
/// <summary>
/// 查找所有分区节点中符合给定模式(pattern)的 key
/// <para>Keys方法返回的keys[]包含prefix,使用前请自行处理</para>
/// </summary>
/// <param name="pattern">如:runoob*</param>
/// <returns></returns>
public static string[] Keys(string pattern) => Instance.Keys(pattern);
/// <summary>
/// 将当前数据库的 key 移动到给定的数据库 db 当中
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="database">数据库</param>
/// <returns></returns>
public static bool Move(string key, int database) => Instance.Move(key, database);
/// <summary>
/// 该返回给定 key 锁储存的值所使用的内部表示(representation)
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public static string ObjectEncoding(string key) => Instance.ObjectEncoding(key);
/// <summary>
/// 该返回给定 key 引用所储存的值的次数。此命令主要用于除错
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public static long? ObjectRefCount(string key) => Instance.ObjectRefCount(key);
/// <summary>
/// 返回给定 key 自储存以来的空转时间(idle, 没有被读取也没有被写入),以秒为单位
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public static long? ObjectIdleTime(string key) => Instance.ObjectIdleTime(key);
/// <summary>
/// 移除 key 的过期时间,key 将持久保持
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public static bool Persist(string key) => Instance.Persist(key);
/// <summary>
/// 为给定 key 设置过期时间(毫秒)
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="milliseconds">过期毫秒数</param>
/// <returns></returns>
public static bool PExpire(string key, int milliseconds) => Instance.PExpire(key, milliseconds);
/// <summary>
/// 为给定 key 设置过期时间(毫秒)
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="expire">过期时间</param>
/// <returns></returns>
public static bool PExpire(string key, TimeSpan expire) => Instance.PExpire(key, expire);
/// <summary>
/// 为给定 key 设置过期时间(毫秒)
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="expire">过期时间</param>
/// <returns></returns>
public static bool PExpireAt(string key, DateTime expire) => Instance.PExpireAt(key, expire);
/// <summary>
/// 以毫秒为单位返回 key 的剩余的过期时间
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public static long PTtl(string key) => Instance.PTtl(key);
/// <summary>
/// 从所有节点中随机返回一个 key
/// </summary>
/// <returns>返回的 key 如果包含 prefix前辍,则会去除后返回</returns>
public static string RandomKey() => Instance.RandomKey();
/// <summary>
/// 修改 key 的名称
/// </summary>
/// <param name="key">旧名称,不含prefix前辍</param>
/// <param name="newKey">新名称,不含prefix前辍</param>
/// <returns></returns>
public static bool Rename(string key, string newKey) => Instance.Rename(key, newKey);
/// <summary>
/// 修改 key 的名称
/// </summary>
/// <param name="key">旧名称,不含prefix前辍</param>
/// <param name="newKey">新名称,不含prefix前辍</param>
/// <returns></returns>
public static bool RenameNx(string key, string newKey) => Instance.RenameNx(key, newKey);
/// <summary>
/// 反序列化给定的序列化值,并将它和给定的 key 关联
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="serializedValue">序列化值</param>
/// <returns></returns>
public static bool Restore(string key, byte[] serializedValue) => Instance.Restore(key, serializedValue);
/// <summary>
/// 反序列化给定的序列化值,并将它和给定的 key 关联
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="ttlMilliseconds">毫秒为单位为 key 设置生存时间</param>
/// <param name="serializedValue">序列化值</param>
/// <returns></returns>
public static bool Restore(string key, long ttlMilliseconds, byte[] serializedValue) => Instance.Restore(key, ttlMilliseconds, serializedValue);
/// <summary>
/// 返回给定列表、集合、有序集合 key 中经过排序的元素,参数资料:http://doc.redisfans.com/key/sort.html
/// </summary>
/// <param name="key">列表、集合、有序集合,不含prefix前辍</param>
/// <param name="count">数量</param>
/// <param name="offset">偏移量</param>
/// <param name="by">排序字段</param>
/// <param name="dir">排序方式</param>
/// <param name="isAlpha">对字符串或数字进行排序</param>
/// <param name="get">根据排序的结果来取出相应的键值</param>
/// <returns></returns>
public static string[] Sort(string key, long? count = null, long offset = 0, string by = null, RedisSortDir? dir = null, bool? isAlpha = null, params string[] get) =>
Instance.Sort(key, count, offset, by, dir, isAlpha, get);
/// <summary>
/// 保存给定列表、集合、有序集合 key 中经过排序的元素,参数资料:http://doc.redisfans.com/key/sort.html
/// </summary>
/// <param name="key">列表、集合、有序集合,不含prefix前辍</param>
/// <param name="destination">目标key,不含prefix前辍</param>
/// <param name="count">数量</param>
/// <param name="offset">偏移量</param>
/// <param name="by">排序字段</param>
/// <param name="dir">排序方式</param>
/// <param name="isAlpha">对字符串或数字进行排序</param>
/// <param name="get">根据排序的结果来取出相应的键值</param>
/// <returns></returns>
public static long SortAndStore(string key, string destination, long? count = null, long offset = 0, string by = null, RedisSortDir? dir = null, bool? isAlpha = null, params string[] get) =>
Instance.SortAndStore(key, destination, count, offset, by, dir, isAlpha, get);
/// <summary>
/// 以秒为单位,返回给定 key 的剩余生存时间
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public static long Ttl(string key) => Instance.Ttl(key);
/// <summary>
/// 返回 key 所储存的值的类型
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public static KeyType Type(string key) => Instance.Type(key);
/// <summary>
/// 迭代当前数据库中的数据库键
/// </summary>
/// <param name="cursor">位置</param>
/// <param name="pattern">模式</param>
/// <param name="count">数量</param>
/// <returns></returns>
public static RedisScan<string> Scan(long cursor, string pattern = null, long? count = null) => Instance.Scan(cursor, pattern, count);
/// <summary>
/// 迭代当前数据库中的数据库键
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="cursor">位置</param>
/// <param name="pattern">模式</param>
/// <param name="count">数量</param>
/// <returns></returns>
public static RedisScan<T> Scan<T>(string key, long cursor, string pattern = null, long? count = null) => Instance.Scan<T>(cursor, pattern, count);
#endregion
#region Geo redis-server 3.2
/// <summary>
/// 将指定的地理空间位置(纬度、经度、成员)添加到指定的key中。这些数据将会存储到sorted set这样的目的是为了方便使用GEORADIUS或者GEORADIUSBYMEMBER命令对数据进行半径查询等操作。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="longitude">经度</param>
/// <param name="latitude">纬度</param>
/// <param name="member">成员</param>
/// <returns>是否成功</returns>
public static bool GeoAdd(string key, decimal longitude, decimal latitude, object member) => Instance.GeoAdd(key, longitude, latitude, member);
/// <summary>
/// 将指定的地理空间位置(纬度、经度、成员)添加到指定的key中。这些数据将会存储到sorted set这样的目的是为了方便使用GEORADIUS或者GEORADIUSBYMEMBER命令对数据进行半径查询等操作。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="values">批量添加的值</param>
/// <returns>添加到sorted set元素的数目,但不包括已更新score的元素。</returns>
public static long GeoAdd(string key, params (decimal longitude, decimal latitude, object member)[] values) => Instance.GeoAdd(key, values);
/// <summary>
/// 返回两个给定位置之间的距离。如果两个位置之间的其中一个不存在, 那么命令返回空值。GEODIST 命令在计算距离时会假设地球为完美的球形, 在极限情况下, 这一假设最大会造成 0.5% 的误差。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="member1">成员1</param>
/// <param name="member2">成员2</param>
/// <param name="unit">m 表示单位为米;km 表示单位为千米;mi 表示单位为英里;ft 表示单位为英尺;</param>
/// <returns>计算出的距离会以双精度浮点数的形式被返回。 如果给定的位置元素不存在, 那么命令返回空值。</returns>
public static decimal? GeoDist(string key, object member1, object member2, GeoUnit unit = GeoUnit.m) => Instance.GeoDist(key, member1, member2, unit);
/// <summary>
/// 返回一个或多个位置元素的 Geohash 表示。通常使用表示位置的元素使用不同的技术,使用Geohash位置52点整数编码。由于编码和解码过程中所使用的初始最小和最大坐标不同,编码的编码也不同于标准。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="members">多个查询的成员</param>
/// <returns>一个数组, 数组的每个项都是一个 geohash 。 命令返回的 geohash 的位置与用户给定的位置元素的位置一一对应。</returns>
public static string[] GeoHash(string key, object[] members) => Instance.GeoHash(key, members);
/// <summary>
/// 从key里返回所有给定位置元素的位置(经度和纬度)。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="members">多个查询的成员</param>
/// <returns>GEOPOS 命令返回一个数组, 数组中的每个项都由两个元素组成: 第一个元素为给定位置元素的经度, 而第二个元素则为给定位置元素的纬度。当给定的位置元素不存在时, 对应的数组项为空值。</returns>
public static (decimal longitude, decimal latitude)?[] GeoPos(string key, object[] members) => Instance.GeoPos(key, members);
/// <summary>
/// 以给定的经纬度为中心, 返回键包含的位置元素当中, 与中心的距离不超过给定最大距离的所有位置元素。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="longitude">经度</param>
/// <param name="latitude">纬度</param>
/// <param name="radius">距离</param>
/// <param name="unit">m 表示单位为米;km 表示单位为千米;mi 表示单位为英里;ft 表示单位为英尺;</param>
/// <param name="count">虽然用户可以使用 COUNT 选项去获取前 N 个匹配元素, 但是因为命令在内部可能会需要对所有被匹配的元素进行处理, 所以在对一个非常大的区域进行搜索时, 即使只使用 COUNT 选项去获取少量元素, 命令的执行速度也可能会非常慢。 但是从另一方面来说, 使用 COUNT 选项去减少需要返回的元素数量, 对于减少带宽来说仍然是非常有用的。</param>
/// <param name="sorting">排序</param>
/// <returns></returns>
public static string[] GeoRadius(string key, decimal longitude, decimal latitude, decimal radius, GeoUnit unit = GeoUnit.m, long? count = null, GeoOrderBy? sorting = null) =>
Instance.GeoRadius(key, longitude, latitude, radius, unit, count, sorting);
/// <summary>
/// 以给定的经纬度为中心, 返回键包含的位置元素当中, 与中心的距离不超过给定最大距离的所有位置元素。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="longitude">经度</param>
/// <param name="latitude">纬度</param>
/// <param name="radius">距离</param>
/// <param name="unit">m 表示单位为米;km 表示单位为千米;mi 表示单位为英里;ft 表示单位为英尺;</param>
/// <param name="count">虽然用户可以使用 COUNT 选项去获取前 N 个匹配元素, 但是因为命令在内部可能会需要对所有被匹配的元素进行处理, 所以在对一个非常大的区域进行搜索时, 即使只使用 COUNT 选项去获取少量元素, 命令的执行速度也可能会非常慢。 但是从另一方面来说, 使用 COUNT 选项去减少需要返回的元素数量, 对于减少带宽来说仍然是非常有用的。</param>
/// <param name="sorting">排序</param>
/// <returns></returns>
public static T[] GeoRadius<T>(string key, decimal longitude, decimal latitude, decimal radius, GeoUnit unit = GeoUnit.m, long? count = null, GeoOrderBy? sorting = null) =>
Instance.GeoRadius<T>(key, longitude, latitude, radius, unit, count, sorting);
/// <summary>
/// 以给定的经纬度为中心, 返回键包含的位置元素当中, 与中心的距离不超过给定最大距离的所有位置元素(包含距离)。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="longitude">经度</param>
/// <param name="latitude">纬度</param>
/// <param name="radius">距离</param>
/// <param name="unit">m 表示单位为米;km 表示单位为千米;mi 表示单位为英里;ft 表示单位为英尺;</param>
/// <param name="count">虽然用户可以使用 COUNT 选项去获取前 N 个匹配元素, 但是因为命令在内部可能会需要对所有被匹配的元素进行处理, 所以在对一个非常大的区域进行搜索时, 即使只使用 COUNT 选项去获取少量元素, 命令的执行速度也可能会非常慢。 但是从另一方面来说, 使用 COUNT 选项去减少需要返回的元素数量, 对于减少带宽来说仍然是非常有用的。</param>
/// <param name="sorting">排序</param>
/// <returns></returns>
public static (string member, decimal dist)[] GeoRadiusWithDist(string key, decimal longitude, decimal latitude, decimal radius, GeoUnit unit = GeoUnit.m, long? count = null, GeoOrderBy? sorting = null) =>
Instance.GeoRadiusWithDist(key, longitude, latitude, radius, unit, count, sorting);
/// <summary>
/// 以给定的经纬度为中心, 返回键包含的位置元素当中, 与中心的距离不超过给定最大距离的所有位置元素(包含距离)。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="longitude">经度</param>
/// <param name="latitude">纬度</param>
/// <param name="radius">距离</param>
/// <param name="unit">m 表示单位为米;km 表示单位为千米;mi 表示单位为英里;ft 表示单位为英尺;</param>
/// <param name="count">虽然用户可以使用 COUNT 选项去获取前 N 个匹配元素, 但是因为命令在内部可能会需要对所有被匹配的元素进行处理, 所以在对一个非常大的区域进行搜索时, 即使只使用 COUNT 选项去获取少量元素, 命令的执行速度也可能会非常慢。 但是从另一方面来说, 使用 COUNT 选项去减少需要返回的元素数量, 对于减少带宽来说仍然是非常有用的。</param>
/// <param name="sorting">排序</param>
/// <returns></returns>
public static (T member, decimal dist)[] GeoRadiusWithDist<T>(string key, decimal longitude, decimal latitude, decimal radius, GeoUnit unit = GeoUnit.m, long? count = null, GeoOrderBy? sorting = null) =>
Instance.GeoRadiusWithDist<T>(key, longitude, latitude, radius, unit, count, sorting);
///// <summary>
///// 以给定的经纬度为中心, 返回键包含的位置元素当中, 与中心的距离不超过给定最大距离的所有位置元素(包含经度、纬度)。
///// </summary>
///// <param name="key">不含prefix前辍</param>
///// <param name="longitude">经度</param>
///// <param name="latitude">纬度</param>
///// <param name="radius">距离</param>
///// <param name="unit">m 表示单位为米;km 表示单位为千米;mi 表示单位为英里;ft 表示单位为英尺;</param>
///// <param name="count">虽然用户可以使用 COUNT 选项去获取前 N 个匹配元素, 但是因为命令在内部可能会需要对所有被匹配的元素进行处理, 所以在对一个非常大的区域进行搜索时, 即使只使用 COUNT 选项去获取少量元素, 命令的执行速度也可能会非常慢。 但是从另一方面来说, 使用 COUNT 选项去减少需要返回的元素数量, 对于减少带宽来说仍然是非常有用的。</param>
///// <param name="sorting">排序</param>
///// <returns></returns>
//private static (string member, decimal longitude, decimal latitude)[] GeoRadiusWithCoord(string key, decimal longitude, decimal latitude, decimal radius, GeoUnit unit = GeoUnit.m, long? count = null, GeoOrderBy? sorting = null) =>
// Instance.GeoRadiusWithCoord(key, longitude, latitude, radius, unit, count, sorting);
///// <summary>
///// 以给定的经纬度为中心, 返回键包含的位置元素当中, 与中心的距离不超过给定最大距离的所有位置元素(包含经度、纬度)。
///// </summary>
///// <param name="key">不含prefix前辍</param>
///// <param name="longitude">经度</param>
///// <param name="latitude">纬度</param>
///// <param name="radius">距离</param>
///// <param name="unit">m 表示单位为米;km 表示单位为千米;mi 表示单位为英里;ft 表示单位为英尺;</param>
///// <param name="count">虽然用户可以使用 COUNT 选项去获取前 N 个匹配元素, 但是因为命令在内部可能会需要对所有被匹配的元素进行处理, 所以在对一个非常大的区域进行搜索时, 即使只使用 COUNT 选项去获取少量元素, 命令的执行速度也可能会非常慢。 但是从另一方面来说, 使用 COUNT 选项去减少需要返回的元素数量, 对于减少带宽来说仍然是非常有用的。</param>
///// <param name="sorting">排序</param>
///// <returns></returns>
//private static (T member, decimal longitude, decimal latitude)[] GeoRadiusWithCoord<T>(string key, decimal longitude, decimal latitude, decimal radius, GeoUnit unit = GeoUnit.m, long? count = null, GeoOrderBy? sorting = null) =>
// Instance.GeoRadiusWithCoord<T>(key, longitude, latitude, radius, unit, count, sorting);
/// <summary>
/// 以给定的经纬度为中心, 返回键包含的位置元素当中, 与中心的距离不超过给定最大距离的所有位置元素(包含距离、经度、纬度)。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="longitude">经度</param>
/// <param name="latitude">纬度</param>
/// <param name="radius">距离</param>
/// <param name="unit">m 表示单位为米;km 表示单位为千米;mi 表示单位为英里;ft 表示单位为英尺;</param>
/// <param name="count">虽然用户可以使用 COUNT 选项去获取前 N 个匹配元素, 但是因为命令在内部可能会需要对所有被匹配的元素进行处理, 所以在对一个非常大的区域进行搜索时, 即使只使用 COUNT 选项去获取少量元素, 命令的执行速度也可能会非常慢。 但是从另一方面来说, 使用 COUNT 选项去减少需要返回的元素数量, 对于减少带宽来说仍然是非常有用的。</param>
/// <param name="sorting">排序</param>
/// <returns></returns>
public static (string member, decimal dist, decimal longitude, decimal latitude)[] GeoRadiusWithDistAndCoord(string key, decimal longitude, decimal latitude, decimal radius, GeoUnit unit = GeoUnit.m, long? count = null, GeoOrderBy? sorting = null) =>
Instance.GeoRadiusWithDistAndCoord(key, longitude, latitude, radius, unit, count, sorting);
/// <summary>
/// 以给定的经纬度为中心, 返回键包含的位置元素当中, 与中心的距离不超过给定最大距离的所有位置元素(包含距离、经度、纬度)。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="longitude">经度</param>
/// <param name="latitude">纬度</param>
/// <param name="radius">距离</param>
/// <param name="unit">m 表示单位为米;km 表示单位为千米;mi 表示单位为英里;ft 表示单位为英尺;</param>
/// <param name="count">虽然用户可以使用 COUNT 选项去获取前 N 个匹配元素, 但是因为命令在内部可能会需要对所有被匹配的元素进行处理, 所以在对一个非常大的区域进行搜索时, 即使只使用 COUNT 选项去获取少量元素, 命令的执行速度也可能会非常慢。 但是从另一方面来说, 使用 COUNT 选项去减少需要返回的元素数量, 对于减少带宽来说仍然是非常有用的。</param>
/// <param name="sorting">排序</param>
/// <returns></returns>
public static (T member, decimal dist, decimal longitude, decimal latitude)[] GeoRadiusWithDistAndCoord<T>(string key, decimal longitude, decimal latitude, decimal radius, GeoUnit unit = GeoUnit.m, long? count = null, GeoOrderBy? sorting = null) =>
Instance.GeoRadiusWithDistAndCoord<T>(key, longitude, latitude, radius, unit, count, sorting);
/// <summary>
/// 以给定的成员为中心, 返回键包含的位置元素当中, 与中心的距离不超过给定最大距离的所有位置元素。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="member">成员</param>
/// <param name="radius">距离</param>
/// <param name="unit">m 表示单位为米;km 表示单位为千米;mi 表示单位为英里;ft 表示单位为英尺;</param>
/// <param name="count">虽然用户可以使用 COUNT 选项去获取前 N 个匹配元素, 但是因为命令在内部可能会需要对所有被匹配的元素进行处理, 所以在对一个非常大的区域进行搜索时, 即使只使用 COUNT 选项去获取少量元素, 命令的执行速度也可能会非常慢。 但是从另一方面来说, 使用 COUNT 选项去减少需要返回的元素数量, 对于减少带宽来说仍然是非常有用的。</param>
/// <param name="sorting">排序</param>
/// <returns></returns>
public static string[] GeoRadiusByMember(string key, object member, decimal radius, GeoUnit unit = GeoUnit.m, long? count = null, GeoOrderBy? sorting = null) =>
Instance.GeoRadiusByMember(key, member, radius, unit, count, sorting);
/// <summary>
/// 以给定的成员为中心, 返回键包含的位置元素当中, 与中心的距离不超过给定最大距离的所有位置元素。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="member">成员</param>
/// <param name="radius">距离</param>
/// <param name="unit">m 表示单位为米;km 表示单位为千米;mi 表示单位为英里;ft 表示单位为英尺;</param>
/// <param name="count">虽然用户可以使用 COUNT 选项去获取前 N 个匹配元素, 但是因为命令在内部可能会需要对所有被匹配的元素进行处理, 所以在对一个非常大的区域进行搜索时, 即使只使用 COUNT 选项去获取少量元素, 命令的执行速度也可能会非常慢。 但是从另一方面来说, 使用 COUNT 选项去减少需要返回的元素数量, 对于减少带宽来说仍然是非常有用的。</param>
/// <param name="sorting">排序</param>
/// <returns></returns>
public static T[] GeoRadiusByMember<T>(string key, object member, decimal radius, GeoUnit unit = GeoUnit.m, long? count = null, GeoOrderBy? sorting = null) =>
Instance.GeoRadiusByMember<T>(key, member, radius, unit, count, sorting);
/// <summary>
/// 以给定的成员为中心, 返回键包含的位置元素当中, 与中心的距离不超过给定最大距离的所有位置元素(包含距离)。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="member">成员</param>
/// <param name="radius">距离</param>
/// <param name="unit">m 表示单位为米;km 表示单位为千米;mi 表示单位为英里;ft 表示单位为英尺;</param>
/// <param name="count">虽然用户可以使用 COUNT 选项去获取前 N 个匹配元素, 但是因为命令在内部可能会需要对所有被匹配的元素进行处理, 所以在对一个非常大的区域进行搜索时, 即使只使用 COUNT 选项去获取少量元素, 命令的执行速度也可能会非常慢。 但是从另一方面来说, 使用 COUNT 选项去减少需要返回的元素数量, 对于减少带宽来说仍然是非常有用的。</param>
/// <param name="sorting">排序</param>
/// <returns></returns>
public static (string member, decimal dist)[] GeoRadiusByMemberWithDist(string key, object member, decimal radius, GeoUnit unit = GeoUnit.m, long? count = null, GeoOrderBy? sorting = null) =>
Instance.GeoRadiusByMemberWithDist(key, member, radius, unit, count, sorting);
/// <summary>
/// 以给定的成员为中心, 返回键包含的位置元素当中, 与中心的距离不超过给定最大距离的所有位置元素(包含距离)。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="member">成员</param>
/// <param name="radius">距离</param>
/// <param name="unit">m 表示单位为米;km 表示单位为千米;mi 表示单位为英里;ft 表示单位为英尺;</param>
/// <param name="count">虽然用户可以使用 COUNT 选项去获取前 N 个匹配元素, 但是因为命令在内部可能会需要对所有被匹配的元素进行处理, 所以在对一个非常大的区域进行搜索时, 即使只使用 COUNT 选项去获取少量元素, 命令的执行速度也可能会非常慢。 但是从另一方面来说, 使用 COUNT 选项去减少需要返回的元素数量, 对于减少带宽来说仍然是非常有用的。</param>
/// <param name="sorting">排序</param>
/// <returns></returns>
public static (T member, decimal dist)[] GeoRadiusByMemberWithDist<T>(string key, object member, decimal radius, GeoUnit unit = GeoUnit.m, long? count = null, GeoOrderBy? sorting = null) =>
Instance.GeoRadiusByMemberWithDist<T>(key, member, radius, unit, count, sorting);
///// <summary>
///// 以给定的成员为中心, 返回键包含的位置元素当中, 与中心的距离不超过给定最大距离的所有位置元素(包含经度、纬度)。
///// </summary>
///// <param name="key">不含prefix前辍</param>
///// <param name="member">成员</param>
///// <param name="radius">距离</param>
///// <param name="unit">m 表示单位为米;km 表示单位为千米;mi 表示单位为英里;ft 表示单位为英尺;</param>
///// <param name="count">虽然用户可以使用 COUNT 选项去获取前 N 个匹配元素, 但是因为命令在内部可能会需要对所有被匹配的元素进行处理, 所以在对一个非常大的区域进行搜索时, 即使只使用 COUNT 选项去获取少量元素, 命令的执行速度也可能会非常慢。 但是从另一方面来说, 使用 COUNT 选项去减少需要返回的元素数量, 对于减少带宽来说仍然是非常有用的。</param>
///// <param name="sorting">排序</param>
///// <returns></returns>
//private static (string member, decimal longitude, decimal latitude)[] GeoRadiusByMemberWithCoord(string key, object member, decimal radius, GeoUnit unit = GeoUnit.m, long? count = null, GeoOrderBy? sorting = null) =>
// Instance.GeoRadiusByMemberWithCoord(key, member, radius, unit, count, sorting);
///// <summary>
///// 以给定的成员为中心, 返回键包含的位置元素当中, 与中心的距离不超过给定最大距离的所有位置元素(包含经度、纬度)。
///// </summary>
///// <param name="key">不含prefix前辍</param>
///// <param name="member">成员</param>
///// <param name="radius">距离</param>
///// <param name="unit">m 表示单位为米;km 表示单位为千米;mi 表示单位为英里;ft 表示单位为英尺;</param>
///// <param name="count">虽然用户可以使用 COUNT 选项去获取前 N 个匹配元素, 但是因为命令在内部可能会需要对所有被匹配的元素进行处理, 所以在对一个非常大的区域进行搜索时, 即使只使用 COUNT 选项去获取少量元素, 命令的执行速度也可能会非常慢。 但是从另一方面来说, 使用 COUNT 选项去减少需要返回的元素数量, 对于减少带宽来说仍然是非常有用的。</param>
///// <param name="sorting">排序</param>
///// <returns></returns>
//private static (T member, decimal longitude, decimal latitude)[] GeoRadiusByMemberWithCoord<T>(string key, object member, decimal radius, GeoUnit unit = GeoUnit.m, long? count = null, GeoOrderBy? sorting = null) =>
// Instance.GeoRadiusByMemberWithCoord<T>(key, member, radius, unit, count, sorting);
/// <summary>
/// 以给定的成员为中心, 返回键包含的位置元素当中, 与中心的距离不超过给定最大距离的所有位置元素(包含距离、经度、纬度)。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="member">成员</param>
/// <param name="radius">距离</param>
/// <param name="unit">m 表示单位为米;km 表示单位为千米;mi 表示单位为英里;ft 表示单位为英尺;</param>
/// <param name="count">虽然用户可以使用 COUNT 选项去获取前 N 个匹配元素, 但是因为命令在内部可能会需要对所有被匹配的元素进行处理, 所以在对一个非常大的区域进行搜索时, 即使只使用 COUNT 选项去获取少量元素, 命令的执行速度也可能会非常慢。 但是从另一方面来说, 使用 COUNT 选项去减少需要返回的元素数量, 对于减少带宽来说仍然是非常有用的。</param>
/// <param name="sorting">排序</param>
/// <returns></returns>
public static (string member, decimal dist, decimal longitude, decimal latitude)[] GeoRadiusByMemberWithDistAndCoord(string key, object member, decimal radius, GeoUnit unit = GeoUnit.m, long? count = null, GeoOrderBy? sorting = null) =>
Instance.GeoRadiusByMemberWithDistAndCoord(key, member, radius, unit, count, sorting);
/// <summary>
/// 以给定的成员为中心, 返回键包含的位置元素当中, 与中心的距离不超过给定最大距离的所有位置元素(包含距离、经度、纬度)。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="member">成员</param>
/// <param name="radius">距离</param>
/// <param name="unit">m 表示单位为米;km 表示单位为千米;mi 表示单位为英里;ft 表示单位为英尺;</param>
/// <param name="count">虽然用户可以使用 COUNT 选项去获取前 N 个匹配元素, 但是因为命令在内部可能会需要对所有被匹配的元素进行处理, 所以在对一个非常大的区域进行搜索时, 即使只使用 COUNT 选项去获取少量元素, 命令的执行速度也可能会非常慢。 但是从另一方面来说, 使用 COUNT 选项去减少需要返回的元素数量, 对于减少带宽来说仍然是非常有用的。</param>
/// <param name="sorting">排序</param>
/// <returns></returns>
public static (T member, decimal dist, decimal longitude, decimal latitude)[] GeoRadiusByMemberWithDistAndCoord<T>(string key, object member, decimal radius, GeoUnit unit = GeoUnit.m, long? count = null, GeoOrderBy? sorting = null) =>
Instance.GeoRadiusByMemberWithDistAndCoord<T>(key, member, radius, unit, count, sorting);
#endregion
/// <summary>
/// 开启分布式锁,若超时返回null
/// </summary>
/// <param name="name">锁名称</param>
/// <param name="timeoutSeconds">超时(秒)</param>
/// <param name="autoDelay">自动延长锁超时时间,看门狗线程的超时时间为timeoutSeconds/2 , 在看门狗线程超时时间时自动延长锁的时间为timeoutSeconds。除非程序意外退出,否则永不超时。</param>
/// <returns></returns>
public static CSRedisClientLock Lock(string name, int timeoutSeconds, bool autoDelay = true) => Instance.Lock(name, timeoutSeconds);
} |
27182812/ChatGLM-LLaMA-chinese-insturct | 37,447 | src/transformers/models/cvt/modeling_tf_cvt.py | # coding=utf-8
# Copyright 2022 Microsoft Research and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" TF 2.0 Cvt model."""
import collections.abc
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import tensorflow as tf
from ...modeling_tf_outputs import TFImageClassifierOutputWithNoAttention
from ...modeling_tf_utils import (
TFModelInputType,
TFPreTrainedModel,
TFSequenceClassificationLoss,
get_initializer,
keras_serializable,
unpack_inputs,
)
from ...tf_utils import shape_list, stable_softmax
from ...utils import (
ModelOutput,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from .configuration_cvt import CvtConfig
logger = logging.get_logger(__name__)
# General docstring
_CONFIG_FOR_DOC = "CvtConfig"
TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"microsoft/cvt-13",
"microsoft/cvt-13-384",
"microsoft/cvt-13-384-22k",
"microsoft/cvt-21",
"microsoft/cvt-21-384",
"microsoft/cvt-21-384-22k",
# See all Cvt models at https://huggingface.co/models?filter=cvt
]
@dataclass
class TFBaseModelOutputWithCLSToken(ModelOutput):
"""
Base class for model's outputs.
Args:
last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
cls_token_value (`tf.Tensor` of shape `(batch_size, 1, hidden_size)`):
Classification token at the output of the last layer of the model.
hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus
the initial embedding outputs.
"""
last_hidden_state: tf.Tensor = None
cls_token_value: tf.Tensor = None
hidden_states: Optional[Tuple[tf.Tensor]] = None
class TFCvtDropPath(tf.keras.layers.Layer):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
References:
(1) github.com:rwightman/pytorch-image-models
"""
def __init__(self, drop_prob: float, **kwargs):
super().__init__(**kwargs)
self.drop_prob = drop_prob
def call(self, x: tf.Tensor, training=None):
if self.drop_prob == 0.0 or not training:
return x
keep_prob = 1 - self.drop_prob
shape = (tf.shape(x)[0],) + (1,) * (len(tf.shape(x)) - 1)
random_tensor = keep_prob + tf.random.uniform(shape, 0, 1)
random_tensor = tf.floor(random_tensor)
return (x / keep_prob) * random_tensor
class TFCvtEmbeddings(tf.keras.layers.Layer):
"""Construct the Convolutional Token Embeddings."""
def __init__(
self,
config: CvtConfig,
patch_size: int,
embed_dim: int,
stride: int,
padding: int,
dropout_rate: float,
**kwargs,
):
super().__init__(**kwargs)
self.convolution_embeddings = TFCvtConvEmbeddings(
config,
patch_size=patch_size,
embed_dim=embed_dim,
stride=stride,
padding=padding,
name="convolution_embeddings",
)
self.dropout = tf.keras.layers.Dropout(dropout_rate)
def call(self, pixel_values: tf.Tensor, training: bool = False) -> tf.Tensor:
hidden_state = self.convolution_embeddings(pixel_values)
hidden_state = self.dropout(hidden_state, training=training)
return hidden_state
class TFCvtConvEmbeddings(tf.keras.layers.Layer):
"""Image to Convolution Embeddings. This convolutional operation aims to model local spatial contexts."""
def __init__(self, config: CvtConfig, patch_size: int, embed_dim: int, stride: int, padding: int, **kwargs):
super().__init__(**kwargs)
self.padding = tf.keras.layers.ZeroPadding2D(padding=padding)
self.patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
self.projection = tf.keras.layers.Conv2D(
filters=embed_dim,
kernel_size=patch_size,
strides=stride,
padding="valid",
data_format="channels_last",
kernel_initializer=get_initializer(config.initializer_range),
name="projection",
)
# Using the same default epsilon as PyTorch
self.normalization = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="normalization")
def call(self, pixel_values: tf.Tensor) -> tf.Tensor:
if isinstance(pixel_values, dict):
pixel_values = pixel_values["pixel_values"]
pixel_values = self.projection(self.padding(pixel_values))
# "batch_size, height, width, num_channels -> batch_size, (height*width), num_channels"
batch_size, height, width, num_channels = shape_list(pixel_values)
hidden_size = height * width
pixel_values = tf.reshape(pixel_values, shape=(batch_size, hidden_size, num_channels))
pixel_values = self.normalization(pixel_values)
# "batch_size, (height*width), num_channels -> batch_size, height, width, num_channels"
pixel_values = tf.reshape(pixel_values, shape=(batch_size, height, width, num_channels))
return pixel_values
class TFCvtSelfAttentionConvProjection(tf.keras.layers.Layer):
"""Convolutional projection layer."""
def __init__(self, config: CvtConfig, embed_dim: int, kernel_size: int, stride: int, padding: int, **kwargs):
super().__init__(**kwargs)
self.padding = tf.keras.layers.ZeroPadding2D(padding=padding)
self.convolution = tf.keras.layers.Conv2D(
filters=embed_dim,
kernel_size=kernel_size,
kernel_initializer=get_initializer(config.initializer_range),
padding="valid",
strides=stride,
use_bias=False,
name="convolution",
groups=embed_dim,
)
# Using the same default epsilon as PyTorch, TF uses (1 - pytorch momentum)
self.normalization = tf.keras.layers.BatchNormalization(epsilon=1e-5, momentum=0.9, name="normalization")
def call(self, hidden_state: tf.Tensor, training: bool = False) -> tf.Tensor:
hidden_state = self.convolution(self.padding(hidden_state))
hidden_state = self.normalization(hidden_state, training=training)
return hidden_state
class TFCvtSelfAttentionLinearProjection(tf.keras.layers.Layer):
"""Linear projection layer used to flatten tokens into 1D."""
def call(self, hidden_state: tf.Tensor) -> tf.Tensor:
# "batch_size, height, width, num_channels -> batch_size, (height*width), num_channels"
batch_size, height, width, num_channels = shape_list(hidden_state)
hidden_size = height * width
hidden_state = tf.reshape(hidden_state, shape=(batch_size, hidden_size, num_channels))
return hidden_state
class TFCvtSelfAttentionProjection(tf.keras.layers.Layer):
"""Convolutional Projection for Attention."""
def __init__(
self,
config: CvtConfig,
embed_dim: int,
kernel_size: int,
stride: int,
padding: int,
projection_method: str = "dw_bn",
**kwargs,
):
super().__init__(**kwargs)
if projection_method == "dw_bn":
self.convolution_projection = TFCvtSelfAttentionConvProjection(
config, embed_dim, kernel_size, stride, padding, name="convolution_projection"
)
self.linear_projection = TFCvtSelfAttentionLinearProjection()
def call(self, hidden_state: tf.Tensor, training: bool = False) -> tf.Tensor:
hidden_state = self.convolution_projection(hidden_state, training=training)
hidden_state = self.linear_projection(hidden_state)
return hidden_state
class TFCvtSelfAttention(tf.keras.layers.Layer):
"""
Self-attention layer. A depth-wise separable convolution operation (Convolutional Projection), is applied for
query, key, and value embeddings.
"""
def __init__(
self,
config: CvtConfig,
num_heads: int,
embed_dim: int,
kernel_size: int,
stride_q: int,
stride_kv: int,
padding_q: int,
padding_kv: int,
qkv_projection_method: str,
qkv_bias: bool,
attention_drop_rate: float,
with_cls_token: bool = True,
**kwargs,
):
super().__init__(**kwargs)
self.scale = embed_dim**-0.5
self.with_cls_token = with_cls_token
self.embed_dim = embed_dim
self.num_heads = num_heads
self.convolution_projection_query = TFCvtSelfAttentionProjection(
config,
embed_dim,
kernel_size,
stride_q,
padding_q,
projection_method="linear" if qkv_projection_method == "avg" else qkv_projection_method,
name="convolution_projection_query",
)
self.convolution_projection_key = TFCvtSelfAttentionProjection(
config,
embed_dim,
kernel_size,
stride_kv,
padding_kv,
projection_method=qkv_projection_method,
name="convolution_projection_key",
)
self.convolution_projection_value = TFCvtSelfAttentionProjection(
config,
embed_dim,
kernel_size,
stride_kv,
padding_kv,
projection_method=qkv_projection_method,
name="convolution_projection_value",
)
self.projection_query = tf.keras.layers.Dense(
units=embed_dim,
kernel_initializer=get_initializer(config.initializer_range),
use_bias=qkv_bias,
bias_initializer="zeros",
name="projection_query",
)
self.projection_key = tf.keras.layers.Dense(
units=embed_dim,
kernel_initializer=get_initializer(config.initializer_range),
use_bias=qkv_bias,
bias_initializer="zeros",
name="projection_key",
)
self.projection_value = tf.keras.layers.Dense(
units=embed_dim,
kernel_initializer=get_initializer(config.initializer_range),
use_bias=qkv_bias,
bias_initializer="zeros",
name="projection_value",
)
self.dropout = tf.keras.layers.Dropout(attention_drop_rate)
def rearrange_for_multi_head_attention(self, hidden_state: tf.Tensor) -> tf.Tensor:
batch_size, hidden_size, _ = shape_list(hidden_state)
head_dim = self.embed_dim // self.num_heads
hidden_state = tf.reshape(hidden_state, shape=(batch_size, hidden_size, self.num_heads, head_dim))
hidden_state = tf.transpose(hidden_state, perm=(0, 2, 1, 3))
return hidden_state
def call(self, hidden_state: tf.Tensor, height: int, width: int, training: bool = False) -> tf.Tensor:
if self.with_cls_token:
cls_token, hidden_state = tf.split(hidden_state, [1, height * width], 1)
# "batch_size, (height*width), num_channels -> batch_size, height, width, num_channels"
batch_size, hidden_size, num_channels = shape_list(hidden_state)
hidden_state = tf.reshape(hidden_state, shape=(batch_size, height, width, num_channels))
key = self.convolution_projection_key(hidden_state, training=training)
query = self.convolution_projection_query(hidden_state, training=training)
value = self.convolution_projection_value(hidden_state, training=training)
if self.with_cls_token:
query = tf.concat((cls_token, query), axis=1)
key = tf.concat((cls_token, key), axis=1)
value = tf.concat((cls_token, value), axis=1)
head_dim = self.embed_dim // self.num_heads
query = self.rearrange_for_multi_head_attention(self.projection_query(query))
key = self.rearrange_for_multi_head_attention(self.projection_key(key))
value = self.rearrange_for_multi_head_attention(self.projection_value(value))
attention_score = tf.matmul(query, key, transpose_b=True) * self.scale
attention_probs = stable_softmax(logits=attention_score, axis=-1)
attention_probs = self.dropout(attention_probs, training=training)
context = tf.matmul(attention_probs, value)
# "batch_size, num_heads, hidden_size, head_dim -> batch_size, hidden_size, (num_heads*head_dim)"
_, _, hidden_size, _ = shape_list(context)
context = tf.transpose(context, perm=(0, 2, 1, 3))
context = tf.reshape(context, (batch_size, hidden_size, self.num_heads * head_dim))
return context
class TFCvtSelfOutput(tf.keras.layers.Layer):
"""Output of the Attention layer ."""
def __init__(self, config: CvtConfig, embed_dim: int, drop_rate: float, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
units=embed_dim, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
self.dropout = tf.keras.layers.Dropout(drop_rate)
def call(self, hidden_state: tf.Tensor, training: bool = False) -> tf.Tensor:
hidden_state = self.dense(inputs=hidden_state)
hidden_state = self.dropout(inputs=hidden_state, training=training)
return hidden_state
class TFCvtAttention(tf.keras.layers.Layer):
"""Attention layer. First chunk of the convolutional transformer block."""
def __init__(
self,
config: CvtConfig,
num_heads: int,
embed_dim: int,
kernel_size: int,
stride_q: int,
stride_kv: int,
padding_q: int,
padding_kv: int,
qkv_projection_method: str,
qkv_bias: bool,
attention_drop_rate: float,
drop_rate: float,
with_cls_token: bool = True,
**kwargs,
):
super().__init__(**kwargs)
self.attention = TFCvtSelfAttention(
config,
num_heads,
embed_dim,
kernel_size,
stride_q,
stride_kv,
padding_q,
padding_kv,
qkv_projection_method,
qkv_bias,
attention_drop_rate,
with_cls_token,
name="attention",
)
self.dense_output = TFCvtSelfOutput(config, embed_dim, drop_rate, name="output")
def prune_heads(self, heads):
raise NotImplementedError
def call(self, hidden_state: tf.Tensor, height: int, width: int, training: bool = False):
self_output = self.attention(hidden_state, height, width, training=training)
attention_output = self.dense_output(self_output, training=training)
return attention_output
class TFCvtIntermediate(tf.keras.layers.Layer):
"""Intermediate dense layer. Second chunk of the convolutional transformer block."""
def __init__(self, config: CvtConfig, embed_dim: int, mlp_ratio: int, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
units=int(embed_dim * mlp_ratio),
kernel_initializer=get_initializer(config.initializer_range),
activation="gelu",
name="dense",
)
def call(self, hidden_state: tf.Tensor) -> tf.Tensor:
hidden_state = self.dense(hidden_state)
return hidden_state
class TFCvtOutput(tf.keras.layers.Layer):
"""
Output of the Convolutional Transformer Block (last chunk). It consists of a MLP and a residual connection.
"""
def __init__(self, config: CvtConfig, embed_dim: int, drop_rate: int, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
units=embed_dim, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
self.dropout = tf.keras.layers.Dropout(drop_rate)
def call(self, hidden_state: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
hidden_state = self.dense(inputs=hidden_state)
hidden_state = self.dropout(inputs=hidden_state, training=training)
hidden_state = hidden_state + input_tensor
return hidden_state
class TFCvtLayer(tf.keras.layers.Layer):
"""
Convolutional Transformer Block composed by attention layers, normalization and multi-layer perceptrons (mlps). It
consists of 3 chunks : an attention layer, an intermediate dense layer and an output layer. This corresponds to the
`Block` class in the original implementation.
"""
def __init__(
self,
config: CvtConfig,
num_heads: int,
embed_dim: int,
kernel_size: int,
stride_q: int,
stride_kv: int,
padding_q: int,
padding_kv: int,
qkv_projection_method: str,
qkv_bias: bool,
attention_drop_rate: float,
drop_rate: float,
mlp_ratio: float,
drop_path_rate: float,
with_cls_token: bool = True,
**kwargs,
):
super().__init__(**kwargs)
self.attention = TFCvtAttention(
config,
num_heads,
embed_dim,
kernel_size,
stride_q,
stride_kv,
padding_q,
padding_kv,
qkv_projection_method,
qkv_bias,
attention_drop_rate,
drop_rate,
with_cls_token,
name="attention",
)
self.intermediate = TFCvtIntermediate(config, embed_dim, mlp_ratio, name="intermediate")
self.dense_output = TFCvtOutput(config, embed_dim, drop_rate, name="output")
# Using `layers.Activation` instead of `tf.identity` to better control `training` behaviour.
self.drop_path = (
TFCvtDropPath(drop_path_rate, name="drop_path")
if drop_path_rate > 0.0
else tf.keras.layers.Activation("linear", name="drop_path")
)
# Using the same default epsilon as PyTorch
self.layernorm_before = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="layernorm_before")
self.layernorm_after = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="layernorm_after")
def call(self, hidden_state: tf.Tensor, height: int, width: int, training: bool = False) -> tf.Tensor:
# in Cvt, layernorm is applied before self-attention
attention_output = self.attention(self.layernorm_before(hidden_state), height, width, training=training)
attention_output = self.drop_path(attention_output, training=training)
# first residual connection
hidden_state = attention_output + hidden_state
# in Cvt, layernorm is also applied after self-attention
layer_output = self.layernorm_after(hidden_state)
layer_output = self.intermediate(layer_output)
# second residual connection is done here
layer_output = self.dense_output(layer_output, hidden_state)
layer_output = self.drop_path(layer_output, training=training)
return layer_output
class TFCvtStage(tf.keras.layers.Layer):
"""
Cvt stage (encoder block). Each stage has 2 parts :
- (1) A Convolutional Token Embedding layer
- (2) A Convolutional Transformer Block (layer).
The classification token is added only in the last stage.
Args:
config ([`CvtConfig`]): Model configuration class.
stage (`int`): Stage number.
"""
def __init__(self, config: CvtConfig, stage: int, **kwargs):
super().__init__(**kwargs)
self.config = config
self.stage = stage
if self.config.cls_token[self.stage]:
self.cls_token = self.add_weight(
shape=(1, 1, self.config.embed_dim[-1]),
initializer=get_initializer(self.config.initializer_range),
trainable=True,
name="cvt.encoder.stages.2.cls_token",
)
self.embedding = TFCvtEmbeddings(
self.config,
patch_size=config.patch_sizes[self.stage],
stride=config.patch_stride[self.stage],
embed_dim=config.embed_dim[self.stage],
padding=config.patch_padding[self.stage],
dropout_rate=config.drop_rate[self.stage],
name="embedding",
)
drop_path_rates = tf.linspace(0.0, config.drop_path_rate[self.stage], config.depth[stage])
drop_path_rates = [x.numpy().item() for x in drop_path_rates]
self.layers = [
TFCvtLayer(
config,
num_heads=config.num_heads[self.stage],
embed_dim=config.embed_dim[self.stage],
kernel_size=config.kernel_qkv[self.stage],
stride_q=config.stride_q[self.stage],
stride_kv=config.stride_kv[self.stage],
padding_q=config.padding_q[self.stage],
padding_kv=config.padding_kv[self.stage],
qkv_projection_method=config.qkv_projection_method[self.stage],
qkv_bias=config.qkv_bias[self.stage],
attention_drop_rate=config.attention_drop_rate[self.stage],
drop_rate=config.drop_rate[self.stage],
mlp_ratio=config.mlp_ratio[self.stage],
drop_path_rate=drop_path_rates[self.stage],
with_cls_token=config.cls_token[self.stage],
name=f"layers.{j}",
)
for j in range(config.depth[self.stage])
]
def call(self, hidden_state: tf.Tensor, training: bool = False):
cls_token = None
hidden_state = self.embedding(hidden_state, training)
# "batch_size, height, width, num_channels -> batch_size, (height*width), num_channels"
batch_size, height, width, num_channels = shape_list(hidden_state)
hidden_size = height * width
hidden_state = tf.reshape(hidden_state, shape=(batch_size, hidden_size, num_channels))
if self.config.cls_token[self.stage]:
cls_token = tf.repeat(self.cls_token, repeats=batch_size, axis=0)
hidden_state = tf.concat((cls_token, hidden_state), axis=1)
for layer in self.layers:
layer_outputs = layer(hidden_state, height, width, training=training)
hidden_state = layer_outputs
if self.config.cls_token[self.stage]:
cls_token, hidden_state = tf.split(hidden_state, [1, height * width], 1)
# "batch_size, (height*width), num_channels -> batch_size, height, width, num_channels"
hidden_state = tf.reshape(hidden_state, shape=(batch_size, height, width, num_channels))
return hidden_state, cls_token
class TFCvtEncoder(tf.keras.layers.Layer):
"""
Convolutional Vision Transformer encoder. CVT has 3 stages of encoder blocks with their respective number of layers
(depth) being 1, 2 and 10.
Args:
config ([`CvtConfig`]): Model configuration class.
"""
config_class = CvtConfig
def __init__(self, config: CvtConfig, **kwargs):
super().__init__(**kwargs)
self.config = config
self.stages = [
TFCvtStage(config, stage_idx, name=f"stages.{stage_idx}") for stage_idx in range(len(config.depth))
]
def call(
self,
pixel_values: TFModelInputType,
output_hidden_states: Optional[bool] = False,
return_dict: Optional[bool] = True,
training: Optional[bool] = False,
) -> Union[TFBaseModelOutputWithCLSToken, Tuple[tf.Tensor]]:
all_hidden_states = () if output_hidden_states else None
hidden_state = pixel_values
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support (batch_size, num_channels, height, width)
# as input format. So change the input format to (batch_size, height, width, num_channels).
hidden_state = tf.transpose(hidden_state, perm=(0, 2, 3, 1))
cls_token = None
for _, (stage_module) in enumerate(self.stages):
hidden_state, cls_token = stage_module(hidden_state, training=training)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_state,)
# Change back to (batch_size, num_channels, height, width) format to have uniformity in the modules
hidden_state = tf.transpose(hidden_state, perm=(0, 3, 1, 2))
if output_hidden_states:
all_hidden_states = tuple([tf.transpose(hs, perm=(0, 3, 1, 2)) for hs in all_hidden_states])
if not return_dict:
return tuple(v for v in [hidden_state, cls_token, all_hidden_states] if v is not None)
return TFBaseModelOutputWithCLSToken(
last_hidden_state=hidden_state,
cls_token_value=cls_token,
hidden_states=all_hidden_states,
)
@keras_serializable
class TFCvtMainLayer(tf.keras.layers.Layer):
"""Construct the Cvt model."""
config_class = CvtConfig
def __init__(self, config: CvtConfig, **kwargs):
super().__init__(**kwargs)
self.config = config
self.encoder = TFCvtEncoder(config, name="encoder")
@unpack_inputs
def call(
self,
pixel_values: Optional[TFModelInputType] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: Optional[bool] = False,
) -> Union[TFBaseModelOutputWithCLSToken, Tuple[tf.Tensor]]:
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
encoder_outputs = self.encoder(
pixel_values,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
sequence_output = encoder_outputs[0]
if not return_dict:
return (sequence_output,) + encoder_outputs[1:]
return TFBaseModelOutputWithCLSToken(
last_hidden_state=sequence_output,
cls_token_value=encoder_outputs.cls_token_value,
hidden_states=encoder_outputs.hidden_states,
)
class TFCvtPreTrainedModel(TFPreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = CvtConfig
base_model_prefix = "cvt"
main_input_name = "pixel_values"
@property
def dummy_inputs(self) -> Dict[str, tf.Tensor]:
"""
Dummy inputs to build the network.
Returns:
`Dict[str, tf.Tensor]`: The dummy inputs.
"""
VISION_DUMMY_INPUTS = tf.random.uniform(shape=(3, self.config.num_channels, 224, 224), dtype=tf.float32)
return {"pixel_values": tf.constant(VISION_DUMMY_INPUTS)}
@tf.function(
input_signature=[
{
"pixel_values": tf.TensorSpec((None, None, None, None), tf.float32, name="pixel_values"),
}
]
)
def serving(self, inputs):
"""
Method used for serving the model.
Args:
inputs (`Dict[str, tf.Tensor]`):
The input of the saved model as a dictionary of tensors.
"""
output = self.call(inputs)
return self.serving_output(output)
TFCVT_START_DOCSTRING = r"""
This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
behavior.
<Tip>
TF 2.0 models accepts two formats as inputs:
- having all inputs as keyword arguments (like PyTorch models), or
- having all inputs as a list, tuple or dict in the first positional arguments.
This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the
tensors in the first argument of the model call function: `model(inputs)`.
</Tip>
Args:
config ([`CvtConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
TFCVT_INPUTS_DOCSTRING = r"""
Args:
pixel_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`CvtImageProcessor.__call__`]
for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
used instead.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
eager mode, in graph mode the value will always be set to True.
training (`bool`, *optional*, defaults to `False``):
Whether or not to use the model in training mode (some modules like dropout modules have different
behaviors between training and evaluation).
"""
@add_start_docstrings(
"The bare Cvt Model transformer outputting raw hidden-states without any specific head on top.",
TFCVT_START_DOCSTRING,
)
class TFCvtModel(TFCvtPreTrainedModel):
def __init__(self, config: CvtConfig, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.cvt = TFCvtMainLayer(config, name="cvt")
@unpack_inputs
@add_start_docstrings_to_model_forward(TFCVT_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=TFBaseModelOutputWithCLSToken, config_class=_CONFIG_FOR_DOC)
def call(
self,
pixel_values: Optional[tf.Tensor] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: Optional[bool] = False,
) -> Union[TFBaseModelOutputWithCLSToken, Tuple[tf.Tensor]]:
r"""
Returns:
Examples:
```python
>>> from transformers import AutoImageProcessor, TFCvtModel
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> image_processor = AutoImageProcessor.from_pretrained("microsoft/cvt-13")
>>> model = TFCvtModel.from_pretrained("microsoft/cvt-13")
>>> inputs = image_processor(images=image, return_tensors="tf")
>>> outputs = model(**inputs)
>>> last_hidden_states = outputs.last_hidden_state
```"""
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
outputs = self.cvt(
pixel_values=pixel_values,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithCLSToken(
last_hidden_state=outputs.last_hidden_state,
cls_token_value=outputs.cls_token_value,
hidden_states=outputs.hidden_states,
)
def serving_output(self, output: TFBaseModelOutputWithCLSToken) -> TFBaseModelOutputWithCLSToken:
return TFBaseModelOutputWithCLSToken(
last_hidden_state=output.last_hidden_state,
cls_token_value=output.cls_token_value,
hidden_states=output.hidden_states,
)
@add_start_docstrings(
"""
Cvt Model transformer with an image classification head on top (a linear layer on top of the final hidden state of
the [CLS] token) e.g. for ImageNet.
""",
TFCVT_START_DOCSTRING,
)
class TFCvtForImageClassification(TFCvtPreTrainedModel, TFSequenceClassificationLoss):
def __init__(self, config: CvtConfig, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.cvt = TFCvtMainLayer(config, name="cvt")
# Using same default epsilon as in the original implementation.
self.layernorm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="layernorm")
# Classifier head
self.classifier = tf.keras.layers.Dense(
units=config.num_labels,
kernel_initializer=get_initializer(config.initializer_range),
use_bias=True,
bias_initializer="zeros",
name="classifier",
)
@unpack_inputs
@add_start_docstrings_to_model_forward(TFCVT_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=TFImageClassifierOutputWithNoAttention, config_class=_CONFIG_FOR_DOC)
def call(
self,
pixel_values: Optional[tf.Tensor] = None,
labels: Optional[tf.Tensor] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: Optional[bool] = False,
) -> Union[TFImageClassifierOutputWithNoAttention, Tuple[tf.Tensor]]:
r"""
labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Returns:
Examples:
```python
>>> from transformers import AutoImageProcessor, TFCvtForImageClassification
>>> import tensorflow as tf
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> image_processor = AutoImageProcessor.from_pretrained("microsoft/cvt-13")
>>> model = TFCvtForImageClassification.from_pretrained("microsoft/cvt-13")
>>> inputs = image_processor(images=image, return_tensors="tf")
>>> outputs = model(**inputs)
>>> logits = outputs.logits
>>> # model predicts one of the 1000 ImageNet classes
>>> predicted_class_idx = tf.math.argmax(logits, axis=-1)[0]
>>> print("Predicted class:", model.config.id2label[int(predicted_class_idx)])
```"""
outputs = self.cvt(
pixel_values,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
sequence_output = outputs[0]
cls_token = outputs[1]
if self.config.cls_token[-1]:
sequence_output = self.layernorm(cls_token)
else:
# rearrange "batch_size, num_channels, height, width -> batch_size, (height*width), num_channels"
batch_size, num_channels, height, width = shape_list(sequence_output)
sequence_output = tf.reshape(sequence_output, shape=(batch_size, num_channels, height * width))
sequence_output = tf.transpose(sequence_output, perm=(0, 2, 1))
sequence_output = self.layernorm(sequence_output)
sequence_output_mean = tf.reduce_mean(sequence_output, axis=1)
logits = self.classifier(sequence_output_mean)
loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states)
def serving_output(self, output: TFImageClassifierOutputWithNoAttention) -> TFImageClassifierOutputWithNoAttention:
return TFImageClassifierOutputWithNoAttention(logits=output.logits, hidden_states=output.hidden_states)
|
2881099/csredis | 6,331 | src/CSRedisCore/RedisSentinelClient.cs | using CSRedis.Internal;
using CSRedis.Internal.Commands;
using CSRedis.Internal.IO;
using System;
using System.Collections.Generic;
using System.Net;
using System.Runtime.Serialization;
using System.Text;
namespace CSRedis
{
/// <summary>
/// Represents a client connection to a Redis sentinel instance
/// </summary>
public partial class RedisSentinelClient : IDisposable
{
const int DefaultPort = 26379;
const bool DefaultSSL = false;
const int DefaultConcurrency = 1000;
const int DefaultBufferSize = 1024;
readonly RedisConnector _connector;
readonly SubscriptionListener _subscription;
/// <summary>
/// Occurs when a subscription message is received
/// </summary>
public event EventHandler<RedisSubscriptionReceivedEventArgs> SubscriptionReceived;
/// <summary>
/// Occurs when a subscription channel is added or removed
/// </summary>
public event EventHandler<RedisSubscriptionChangedEventArgs> SubscriptionChanged;
/// <summary>
/// Occurs when the connection has sucessfully reconnected
/// </summary>
public event EventHandler Reconnected;
/// <summary>
/// Get the Redis sentinel hostname
/// </summary>
public string Host { get { return GetHost(); } }
/// <summary>
/// Get the Redis sentinel port
/// </summary>
public int Port { get { return GetPort(); } }
/// <summary>
/// Get a value indicating whether the Redis sentinel client is connected to the server
/// </summary>
public bool Connected { get { return _connector.IsConnected; } }
/// <summary>
/// Get the string encoding used to communicate with the server
/// </summary>
public Encoding Encoding { get { return _connector.Encoding; } }
/// <summary>
/// Get or set the connection read timeout (milliseconds)
/// </summary>
public int ReceiveTimeout
{
get { return _connector.ReceiveTimeout; }
set { _connector.ReceiveTimeout = value; }
}
/// <summary>
/// Get or set the connection send timeout (milliseconds)
/// </summary>
public int SendTimeout
{
get { return _connector.SendTimeout; }
set { _connector.SendTimeout = value; }
}
/// <summary>
/// Get or set the number of times to attempt a reconnect after a connection fails
/// </summary>
public int ReconnectAttempts
{
get { return _connector.ReconnectAttempts; }
set { _connector.ReconnectAttempts = value; }
}
/// <summary>
/// Get or set the amount of time to wait between reconnect attempts
/// </summary>
public int ReconnectWait
{
get { return _connector.ReconnectWait; }
set { _connector.ReconnectWait = value; }
}
/// <summary>
/// Create a new RedisSentinelClient using default port and encoding
/// </summary>
/// <param name="host">Redis sentinel hostname</param>
public RedisSentinelClient(string host)
: this(host, DefaultPort)
{ }
/// <summary>
/// Create a new RedisSentinelClient using default encoding
/// </summary>
/// <param name="host">Redis sentinel hostname</param>
/// <param name="port">Redis sentinel port</param>
public RedisSentinelClient(string host, int port)
: this(host, port, DefaultSSL)
{ }
/// <summary>
/// Create a new RedisSentinelClient using default encoding
/// </summary>
/// <param name="host">Redis sentinel hostname</param>
/// <param name="port">Redis sentinel port</param>
/// <param name="ssl">Set to true if remote Redis server expects SSL</param>
public RedisSentinelClient(string host, int port, bool ssl)
: this(new RedisSocket(ssl), new DnsEndPoint(host, port), DefaultConcurrency, DefaultBufferSize)
{ }
internal RedisSentinelClient(IRedisSocket socket, EndPoint endpoint)
: this(socket, endpoint, DefaultConcurrency, DefaultBufferSize)
{ }
internal RedisSentinelClient(IRedisSocket socket, EndPoint endpoint, int concurrency, int bufferSize)
{
_connector = new RedisConnector(endpoint, socket, concurrency, bufferSize);
_subscription = new SubscriptionListener(_connector);
_subscription.MessageReceived += OnSubscriptionReceived;
_subscription.Changed += OnSubscriptionChanged;
_connector.Connected += OnConnectionReconnected;
}
/// <summary>
/// Release resoures used by the current RedisSentinelClient
/// </summary>
public void Dispose()
{
if (_connector != null)
_connector.Dispose();
}
void OnSubscriptionReceived(object sender, RedisSubscriptionReceivedEventArgs args)
{
if (SubscriptionReceived != null)
SubscriptionReceived(this, args);
}
void OnSubscriptionChanged(object sender, RedisSubscriptionChangedEventArgs args)
{
if (SubscriptionChanged != null)
SubscriptionChanged(this, args);
}
void OnConnectionReconnected(object sender, EventArgs args)
{
if (Reconnected != null)
Reconnected(this, args);
}
string GetHost()
{
if (_connector.EndPoint is IPEndPoint)
return (_connector.EndPoint as IPEndPoint).Address.ToString();
else if (_connector.EndPoint is DnsEndPoint)
return (_connector.EndPoint as DnsEndPoint).Host;
else
return null;
}
int GetPort()
{
if (_connector.EndPoint is IPEndPoint)
return (_connector.EndPoint as IPEndPoint).Port;
else if (_connector.EndPoint is DnsEndPoint)
return (_connector.EndPoint as DnsEndPoint).Port;
else
return -1;
}
}
}
|
2881099/csredis | 2,378 | src/CSRedisCore/IRedisClient.cs | using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
namespace CSRedis
{
/// <summary>
/// Common properties of the RedisClient
/// </summary>
public interface IRedisClient : IDisposable
{
/// <summary>
/// Occurs when a subscription message is received
/// </summary>
event EventHandler<RedisSubscriptionReceivedEventArgs> SubscriptionReceived;
/// <summary>
/// Occurs when a subscription channel is added or removed
/// </summary>
event EventHandler<RedisSubscriptionChangedEventArgs> SubscriptionChanged;
/// <summary>
/// Occurs when a transaction command is acknowledged by the server
/// </summary>
event EventHandler<RedisTransactionQueuedEventArgs> TransactionQueued;
/// <summary>
/// Occurs when a monitor message is received
/// </summary>
event EventHandler<RedisMonitorEventArgs> MonitorReceived;
/// <summary>
/// Occurs when the connection has sucessfully reconnected
/// </summary>
event EventHandler Connected;
/// <summary>
/// Get the Redis server hostname
/// </summary>
string Host { get; }
/// <summary>
/// Get the Redis server port
/// </summary>
int Port { get; }
/// <summary>
/// Get a value indicating whether the Redis client is connected to the server
/// </summary>
bool IsConnected { get; }
/// <summary>
/// Get or set the string encoding used to communicate with the server
/// </summary>
Encoding Encoding { get; set; }
/// <summary>
/// Get or set the connection read timeout (milliseconds)
/// </summary>
int ReceiveTimeout { get; set; }
/// <summary>
/// Get or set the connection send timeout (milliseconds)
/// </summary>
int SendTimeout { get; set; }
/// <summary>
/// Get or set the number of times to attempt a reconnect after a connection fails
/// </summary>
int ReconnectAttempts { get; set; }
/// <summary>
/// Get or set the amount of time (milliseconds) to wait between reconnect attempts
/// </summary>
int ReconnectWait { get; set; }
}
}
|
27182812/ChatGLM-LLaMA-chinese-insturct | 13,579 | src/transformers/models/cvt/convert_cvt_original_pytorch_checkpoint_to_pytorch.py | # coding=utf-8
# Copyright 2022 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert CvT checkpoints from the original repository.
URL: https://github.com/microsoft/CvT"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoFeatureExtractor, CvtConfig, CvtForImageClassification
def embeddings(idx):
"""
The function helps in renaming embedding layer weights.
Args:
idx: stage number in original model
"""
embed = []
embed.append(
(
f"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight",
f"stage{idx}.patch_embed.proj.weight",
)
)
embed.append(
(
f"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias",
f"stage{idx}.patch_embed.proj.bias",
)
)
embed.append(
(
f"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight",
f"stage{idx}.patch_embed.norm.weight",
)
)
embed.append(
(
f"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias",
f"stage{idx}.patch_embed.norm.bias",
)
)
return embed
def attention(idx, cnt):
"""
The function helps in renaming attention block layers weights.
Args:
idx: stage number in original model
cnt: count of blocks in each stage
"""
attention_weights = []
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight",
)
)
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight",
)
)
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias",
)
)
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean",
)
)
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var",
)
)
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked",
)
)
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight",
)
)
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight",
)
)
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias",
)
)
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean",
)
)
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var",
)
)
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked",
)
)
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight",
)
)
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight",
)
)
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias",
)
)
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean",
)
)
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var",
)
)
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked",
)
)
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight",
f"stage{idx}.blocks.{cnt}.attn.proj_q.weight",
)
)
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias",
f"stage{idx}.blocks.{cnt}.attn.proj_q.bias",
)
)
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight",
f"stage{idx}.blocks.{cnt}.attn.proj_k.weight",
)
)
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias",
f"stage{idx}.blocks.{cnt}.attn.proj_k.bias",
)
)
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight",
f"stage{idx}.blocks.{cnt}.attn.proj_v.weight",
)
)
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias",
f"stage{idx}.blocks.{cnt}.attn.proj_v.bias",
)
)
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight",
f"stage{idx}.blocks.{cnt}.attn.proj.weight",
)
)
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias",
f"stage{idx}.blocks.{cnt}.attn.proj.bias",
)
)
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight", f"stage{idx}.blocks.{cnt}.mlp.fc1.weight")
)
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias", f"stage{idx}.blocks.{cnt}.mlp.fc1.bias")
)
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight", f"stage{idx}.blocks.{cnt}.mlp.fc2.weight")
)
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias", f"stage{idx}.blocks.{cnt}.mlp.fc2.bias")
)
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight", f"stage{idx}.blocks.{cnt}.norm1.weight")
)
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias", f"stage{idx}.blocks.{cnt}.norm1.bias")
)
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight", f"stage{idx}.blocks.{cnt}.norm2.weight")
)
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias", f"stage{idx}.blocks.{cnt}.norm2.bias")
)
return attention_weights
def cls_token(idx):
"""
Function helps in renaming cls_token weights
"""
token = []
token.append((f"cvt.encoder.stages.{idx}.cls_token", "stage2.cls_token"))
return token
def final():
"""
Function helps in renaming final classification layer
"""
head = []
head.append(("layernorm.weight", "norm.weight"))
head.append(("layernorm.bias", "norm.bias"))
head.append(("classifier.weight", "head.weight"))
head.append(("classifier.bias", "head.bias"))
return head
def convert_cvt_checkpoint(cvt_model, image_size, cvt_file_name, pytorch_dump_folder):
"""
Fucntion to convert the microsoft cvt checkpoint to huggingface checkpoint
"""
img_labels_file = "imagenet-1k-id2label.json"
num_labels = 1000
repo_id = "huggingface/label-files"
num_labels = num_labels
id2label = json.load(open(cached_download(hf_hub_url(repo_id, img_labels_file, repo_type="dataset")), "r"))
id2label = {int(k): v for k, v in id2label.items()}
id2label = id2label
label2id = {v: k for k, v in id2label.items()}
config = config = CvtConfig(num_labels=num_labels, id2label=id2label, label2id=label2id)
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("/", 1)[-1][4:6] == "13":
config.depth = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("/", 1)[-1][4:6] == "21":
config.depth = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
config.depth = [2, 2, 20]
config.num_heads = [3, 12, 16]
config.embed_dim = [192, 768, 1024]
model = CvtForImageClassification(config)
feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/convnext-base-224-22k-1k")
feature_extractor.size["shortest_edge"] = image_size
original_weights = torch.load(cvt_file_name, map_location=torch.device("cpu"))
huggingface_weights = OrderedDict()
list_of_state_dict = []
for idx in range(len(config.depth)):
if config.cls_token[idx]:
list_of_state_dict = list_of_state_dict + cls_token(idx)
list_of_state_dict = list_of_state_dict + embeddings(idx)
for cnt in range(config.depth[idx]):
list_of_state_dict = list_of_state_dict + attention(idx, cnt)
list_of_state_dict = list_of_state_dict + final()
for gg in list_of_state_dict:
print(gg)
for i in range(len(list_of_state_dict)):
huggingface_weights[list_of_state_dict[i][0]] = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(huggingface_weights)
model.save_pretrained(pytorch_dump_folder)
feature_extractor.save_pretrained(pytorch_dump_folder)
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--cvt_model",
default="cvt-w24",
type=str,
help="Name of the cvt model you'd like to convert.",
)
parser.add_argument(
"--image_size",
default=384,
type=int,
help="Input Image Size",
)
parser.add_argument(
"--cvt_file_name",
default="cvtmodels\CvT-w24-384x384-IN-22k.pth",
type=str,
help="Input Image Size",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
args = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
|
27182812/ChatGLM-LLaMA-chinese-insturct | 1,992 | src/transformers/models/m2m_100/__init__.py | # Copyright 2021 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_import_structure = {
"configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"],
"tokenization_m2m_100": ["M2M100Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["modeling_m2m_100"] = [
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
"M2M100PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_m2m_100 import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, M2M100Config, M2M100OnnxConfig
from .tokenization_m2m_100 import M2M100Tokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_m2m_100 import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
M2M100ForConditionalGeneration,
M2M100Model,
M2M100PreTrainedModel,
)
else:
import sys
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
2881099/csredis | 106,118 | src/CSRedisCore/RedisClient.Async.cs | using CSRedis.Internal.Commands;
using System;
using System.Collections.Generic;
using System.Collections.Concurrent;
using System.Linq;
using System.Threading;
using System.Threading.Tasks;
#if net40
#else
namespace CSRedis
{
public partial class RedisClient
{
private readonly SemaphoreSlim _semaphore = new SemaphoreSlim(1, 1);
/// <summary>
/// Open connection to redis server
/// </summary>
/// <returns>True on success</returns>
public Task<bool> ConnectAsync()
{
return _connector.ConnectAsync();
}
/// <summary>
/// Call arbitrary redis command
/// </summary>
/// <param name="command"></param>
/// <param name="args"></param>
/// <returns></returns>
public Task<object> CallAsync(string command, params string[] args)
{
return WriteAsync(RedisCommands.Call(command, args));
}
internal ConcurrentQueue<TaskCompletionSource<object>> _asyncPipe;
async Task<T> WriteAsync<T>(RedisCommand<T> command)
{
if (_transaction.Active)
return await _transaction.WriteAsync(command);
else if (_asyncPipe != null)
{
var tsc = new TaskCompletionSource<object>();
try
{
await _semaphore.WaitAsync();
_asyncPipe.Enqueue(tsc);
_connector.Pipeline.Write(command);
}
finally
{
_semaphore.Release();
}
var ret = await tsc.Task;
return (T)ret;
}
else
return await _connector.CallAsync(command);
}
#region Connection
/// <summary>
/// Authenticate to the server
/// </summary>
/// <param name="password">Server password</param>
/// <returns>Task associated with status message</returns>
public Task<string> AuthAsync(string password)
{
return WriteAsync(RedisCommands.Auth(null, password));
}
/// <summary>
/// Echo the given string
/// </summary>
/// <param name="message">Message to echo</param>
/// <returns>Task associated with echo response</returns>
public Task<string> EchoAsync(string message)
{
return WriteAsync(RedisCommands.Echo(message));
}
/// <summary>
/// Ping the server
/// </summary>
/// <returns>Task associated with status message</returns>
public Task<string> PingAsync()
{
return WriteAsync(RedisCommands.Ping());
}
/// <summary>
/// Close the connection
/// </summary>
/// <returns>Task associated with status message</returns>
public Task<string> QuitAsync()
{
return WriteAsync(RedisCommands.Quit())
.ContinueWith<string>(t =>
{
_connector.Dispose();
return t.Result;
});
}
/// <summary>
/// Change the selected database for the current connection
/// </summary>
/// <param name="index">Zero-based database index</param>
/// <returns>Status message</returns>
public Task<string> SelectAsync(int index)
{
return WriteAsync(RedisCommands.Select(index));
}
#endregion
#region Keys
/// <summary>
/// [redis-server 3.2.1] 修改指定key(s) 最后访问时间 若key不存在,不做操作
/// </summary>
/// <param name="keys">Keys</param>
/// <returns></returns>
public Task<long> TouchAsync(params string[] keys)
{
return WriteAsync(RedisCommands.Touch(keys));
}
/// <summary>
/// [redis-server 4.0.0] Delete a key, 该命令和DEL十分相似:删除指定的key(s),若key不存在则该key被跳过。但是,相比DEL会产生阻塞,该命令会在另一个线程中回收内存,因此它是非阻塞的。 这也是该命令名字的由来:仅将keys从keyspace元数据中删除,真正的删除会在后续异步操作。
/// </summary>
/// <param name="keys">Keys to delete</param>
/// <returns>Number of keys removed</returns>
public Task<long> UnLinkAsync(params string[] keys)
{
return WriteAsync(RedisCommands.UnLink(keys));
}
/// <summary>
/// Delete a key
/// </summary>
/// <param name="keys">Keys to delete</param>
/// <returns></returns>
public Task<long> DelAsync(params string[] keys)
{
return WriteAsync(RedisCommands.Del(keys));
}
/// <summary>
/// Return a serialized version of the value stored at the specified key
/// </summary>
/// <param name="key">Key to dump</param>
/// <returns></returns>
public Task<byte[]> DumpAsync(string key)
{
return WriteAsync(RedisCommands.Dump(key));
}
/// <summary>
/// Determine if a key exists
/// </summary>
/// <param name="key">Key to check</param>
/// <returns></returns>
public Task<bool> ExistsAsync(string key)
{
return WriteAsync(RedisCommands.Exists(key));
}
public Task<long> ExistsAsync(string[] keys)
{
return WriteAsync(RedisCommands.Exists(keys));
}
/// <summary>
/// Set a key's time to live in seconds
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="expiration">Expiration (nearest second)</param>
/// <returns></returns>
public Task<bool> ExpireAsync(string key, int expiration)
{
return WriteAsync(RedisCommands.Expire(key, expiration));
}
/// <summary>
/// Set a key's time to live in seconds
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="expiration">Expiration in seconds</param>
/// <returns></returns>
public Task<bool> ExpireAsync(string key, TimeSpan expiration)
{
return WriteAsync(RedisCommands.Expire(key, expiration));
}
/// <summary>
/// Set the expiration for a key (nearest second)
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="expirationDate">Date of expiration, to nearest second</param>
/// <returns></returns>
public Task<bool> ExpireAtAsync(string key, DateTime expirationDate)
{
return WriteAsync(RedisCommands.ExpireAt(key, expirationDate));
}
/// <summary>
/// Set the expiration for a key as a UNIX timestamp
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="timestamp"></param>
/// <returns></returns>
public Task<bool> ExpireAtAsync(string key, int timestamp)
{
return WriteAsync(RedisCommands.ExpireAt(key, timestamp));
}
/// <summary>
/// Find all keys matching the given pattern
/// </summary>
/// <param name="pattern">Pattern to match</param>
/// <returns></returns>
public Task<string[]> KeysAsync(string pattern)
{
return WriteAsync(RedisCommands.Keys(pattern));
}
/// <summary>
/// Atomically transfer a key from a Redis instance to another one
/// </summary>
/// <param name="host">Remote Redis host</param>
/// <param name="port">Remote Redis port</param>
/// <param name="key">Key to migrate</param>
/// <param name="destinationDb">Remote database ID</param>
/// <param name="timeout">Timeout in milliseconds</param>
/// <returns></returns>
public Task<string> MigrateAsync(string host, int port, string key, int destinationDb, int timeout)
{
return WriteAsync(RedisCommands.Migrate(host, port, key, destinationDb, timeout));
}
/// <summary>
/// Atomically transfer a key from a Redis instance to another one
/// </summary>
/// <param name="host">Remote Redis host</param>
/// <param name="port">Remote Redis port</param>
/// <param name="key">Key to migrate</param>
/// <param name="destinationDb">Remote database ID</param>
/// <param name="timeout">Timeout in milliseconds</param>
/// <returns></returns>
public Task<string> MigrateAsync(string host, int port, string key, int destinationDb, TimeSpan timeout)
{
return WriteAsync(RedisCommands.Migrate(host, port, key, destinationDb, timeout));
}
/// <summary>
/// Move a key to another database
/// </summary>
/// <param name="key">Key to move</param>
/// <param name="database">Database destination ID</param>
/// <returns></returns>
public Task<bool> MoveAsync(string key, int database)
{
return WriteAsync(RedisCommands.Move(key, database));
}
/// <summary>
/// Get the number of references of the value associated with the specified key
/// </summary>
/// <param name="arguments">Subcommand arguments</param>
/// <returns>The type of internal representation used to store the value at the specified key</returns>
public Task<string> ObjectEncodingAsync(params string[] arguments)
{
return WriteAsync(RedisCommands.ObjectEncoding(arguments));
}
/// <summary>
/// Inspect the internals of Redis objects
/// </summary>
/// <param name="subCommand">Type of Object command to send</param>
/// <param name="arguments">Subcommand arguments</param>
/// <returns>Varies depending on subCommand</returns>
public Task<long?> ObjectAsync(RedisObjectSubCommand subCommand, params string[] arguments)
{
return WriteAsync(RedisCommands.Object(subCommand, arguments));
}
/// <summary>
/// Remove the expiration from a key
/// </summary>
/// <param name="key">Key to modify</param>
/// <returns></returns>
public Task<bool> PersistAsync(string key)
{
return WriteAsync(RedisCommands.Persist(key));
}
/// <summary>
/// Set a key's time to live in milliseconds
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="expiration">Expiration (nearest millisecond)</param>
/// <returns></returns>
public Task<bool> PExpireAsync(string key, TimeSpan expiration)
{
return WriteAsync(RedisCommands.PExpire(key, expiration));
}
/// <summary>
/// Set a key's time to live in milliseconds
/// </summary>
/// <param name="key">Key</param>
/// <param name="milliseconds">Expiration in milliseconds</param>
/// <returns></returns>
public Task<bool> PExpireAsync(string key, long milliseconds)
{
return WriteAsync(RedisCommands.PExpire(key, milliseconds));
}
/// <summary>
/// Set the expiration for a key (nearest millisecond)
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="date">Expiration date</param>
/// <returns></returns>
public Task<bool> PExpireAtAsync(string key, DateTime date)
{
return WriteAsync(RedisCommands.PExpireAt(key, date));
}
/// <summary>
/// Set the expiration for a key as a UNIX timestamp specified in milliseconds
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="timestamp">Expiration timestamp (milliseconds)</param>
/// <returns></returns>
public Task<bool> PExpireAtAsync(string key, long timestamp)
{
return WriteAsync(RedisCommands.PExpireAt(key, timestamp));
}
/// <summary>
/// Get the time to live for a key in milliseconds
/// </summary>
/// <param name="key">Key to check</param>
/// <returns></returns>
public Task<long> PTtlAsync(string key)
{
return WriteAsync(RedisCommands.PTtl(key));
}
/// <summary>
/// Return a random key from the keyspace
/// </summary>
/// <returns></returns>
public Task<string> RandomKeyAsync()
{
return WriteAsync(RedisCommands.RandomKey());
}
/// <summary>
/// Rename a key
/// </summary>
/// <param name="key">Key to rename</param>
/// <param name="newKey">New key name</param>
/// <returns></returns>
public Task<string> RenameAsync(string key, string newKey)
{
return WriteAsync(RedisCommands.Rename(key, newKey));
}
/// <summary>
/// Rename a key, only if the new key does not exist
/// </summary>
/// <param name="key">Key to rename</param>
/// <param name="newKey">New key name</param>
/// <returns></returns>
public Task<bool> RenameNxAsync(string key, string newKey)
{
return WriteAsync(RedisCommands.RenameNx(key, newKey));
}
/// <summary>
/// Create a key using the provided serialized value, previously obtained using dump
/// </summary>
/// <param name="key">Key to restore</param>
/// <param name="ttlMilliseconds">Time-to-live in milliseconds</param>
/// <param name="serializedValue">Serialized value from DUMP</param>
/// <returns></returns>
public Task<string> RestoreAsync(string key, long ttlMilliseconds, byte[] serializedValue)
{
return WriteAsync(RedisCommands.Restore(key, ttlMilliseconds, serializedValue));
}
/// <summary>
/// Sort the elements in a list, set or sorted set
/// </summary>
/// <param name="key">Key to sort</param>
/// <param name="offset">Number of elements to skip</param>
/// <param name="count">Number of elements to return</param>
/// <param name="by">Sort by external key</param>
/// <param name="dir">Sort direction</param>
/// <param name="isAlpha">Sort lexicographically</param>
/// <param name="get">Retrieve external keys</param>
/// <returns></returns>
public Task<string[]> SortAsync(string key, long? offset = null, long? count = null, string by = null, RedisSortDir? dir = null, bool? isAlpha = null, params string[] get)
{
return WriteAsync(RedisCommands.Sort(key, offset, count, by, dir, isAlpha, get));
}
/// <summary>
/// Sort the elements in a list, set or sorted set, then store the result in a new list
/// </summary>
/// <param name="key">Key to sort</param>
/// <param name="destination">Destination key name of stored sort</param>
/// <param name="offset">Number of elements to skip</param>
/// <param name="count">Number of elements to return</param>
/// <param name="by">Sort by external key</param>
/// <param name="dir">Sort direction</param>
/// <param name="isAlpha">Sort lexicographically</param>
/// <param name="get">Retrieve external keys</param>
/// <returns></returns>
public Task<long> SortAndStoreAsync(string key, string destination, long? offset = null, long? count = null, string by = null, RedisSortDir? dir = null, bool? isAlpha = null, params string[] get)
{
return WriteAsync(RedisCommands.SortAndStore(key, destination, offset, count, by, dir, isAlpha, get));
}
/// <summary>
/// Get the time to live for a key
/// </summary>
/// <param name="key">Key to check</param>
/// <returns></returns>
public Task<long> TtlAsync(string key)
{
return WriteAsync(RedisCommands.Ttl(key));
}
/// <summary>
/// Determine the type stored at key
/// </summary>
/// <param name="key">Key to check</param>
/// <returns></returns>
public Task<string> TypeAsync(string key)
{
return WriteAsync(RedisCommands.Type(key));
}
/// <summary>
/// Iterate the set of keys in the currently selected Redis database
/// </summary>
/// <param name="cursor">The cursor returned by the server in the previous call, or 0 if this is the first call</param>
/// <param name="pattern">Glob-style pattern to filter returned elements</param>
/// <param name="count">Set the maximum number of elements to return</param>
/// <returns>Updated cursor and result set</returns>
public Task<RedisScan<string>> ScanAsync(long cursor, string pattern = null, long? count = null)
{
return WriteAsync(RedisCommands.Scan(cursor, pattern, count));
}
public Task<RedisScan<byte[]>> ScanBytesAsync(long cursor, string pattern = null, long? count = null)
{
return WriteAsync(RedisCommands.ScanBytes(cursor, pattern, count));
}
#endregion
#region Hashes
/// <summary>
/// [redis-server 3.2.0] 返回hash指定field的value的字符串长度,如果hash或者field不存在,返回0.
/// </summary>
/// <param name="key">Hash key</param>
/// <param name="field">Field</param>
/// <returns></returns>
public Task<long> HStrLenAsync(string key, string field)
{
return WriteAsync(RedisCommands.HStrLen(key, field));
}
/// <summary>
/// Delete one or more hash fields
/// </summary>
/// <param name="key">Hash key</param>
/// <param name="fields">Fields to delete</param>
/// <returns>Number of fields removed from hash</returns>
public Task<long> HDelAsync(string key, params string[] fields)
{
return WriteAsync(RedisCommands.HDel(key, fields));
}
/// <summary>
/// Determine if a hash field exists
/// </summary>
/// <param name="key">Hash key</param>
/// <param name="field">Field to check</param>
/// <returns>True if hash field exists</returns>
public Task<bool> HExistsAsync(string key, string field)
{
return WriteAsync(RedisCommands.HExists(key, field));
}
/// <summary>
/// Get the value of a hash field
/// </summary>
/// <param name="key">Hash key</param>
/// <param name="field">Field to get</param>
/// <returns>Value of hash field</returns>
public Task<string> HGetAsync(string key, string field)
{
return WriteAsync(RedisCommands.HGet(key, field));
}
public Task<byte[]> HGetBytesAsync(string key, string field)
{
return WriteAsync(RedisCommands.HGetBytes(key, field));
}
/// <summary>
/// Get all the fields and values in a hash
/// </summary>
/// <typeparam name="T">Object to map hash</typeparam>
/// <param name="key">Hash key</param>
/// <returns>Strongly typed object mapped from hash</returns>
public Task<T> HGetAllAsync<T>(string key)
where T : class
{
return WriteAsync(RedisCommands.HGetAll<T>(key));
}
/// <summary>
/// Get all the fields and values in a hash
/// </summary>
/// <param name="key">Hash key</param>
/// <returns>Dictionary mapped from string</returns>
public Task<Dictionary<string, string>> HGetAllAsync(string key)
{
return WriteAsync(RedisCommands.HGetAll(key));
}
public Task<Dictionary<string, byte[]>> HGetAllBytesAsync(string key)
{
return WriteAsync(RedisCommands.HGetAllBytes(key));
}
/// <summary>
/// Increment the integer value of a hash field by the given number
/// </summary>
/// <param name="key">Hash key</param>
/// <param name="field">Field to increment</param>
/// <param name="increment">Increment value</param>
/// <returns>Value of field after increment</returns>
public Task<long> HIncrByAsync(string key, string field, long increment)
{
return WriteAsync(RedisCommands.HIncrBy(key, field, increment));
}
/// <summary>
/// Increment the float value of a hash field by the given number
/// </summary>
/// <param name="key">Hash key</param>
/// <param name="field">Field to increment</param>
/// <param name="increment">Increment value</param>
/// <returns>Value of field after increment</returns>
public Task<decimal> HIncrByFloatAsync(string key, string field, decimal increment)
{
return WriteAsync(RedisCommands.HIncrByFloat(key, field, increment));
}
/// <summary>
/// Get all the fields in a hash
/// </summary>
/// <param name="key">Hash key</param>
/// <returns>All hash field names</returns>
public Task<string[]> HKeysAsync(string key)
{
return WriteAsync(RedisCommands.HKeys(key));
}
/// <summary>
/// Get the number of fields in a hash
/// </summary>
/// <param name="key">Hash key</param>
/// <returns>Number of fields in hash</returns>
public Task<long> HLenAsync(string key)
{
return WriteAsync(RedisCommands.HLen(key));
}
/// <summary>
/// Get the values of all the given hash fields
/// </summary>
/// <param name="key">Hash key</param>
/// <param name="fields">Fields to return</param>
/// <returns>Values of given fields</returns>
public Task<string[]> HMGetAsync(string key, params string[] fields)
{
return WriteAsync(RedisCommands.HMGet(key, fields));
}
public Task<byte[][]> HMGetBytesAsync(string key, params string[] fields)
{
return WriteAsync(RedisCommands.HMGetBytes(key, fields));
}
/// <summary>
/// Set multiple hash fields to multiple values
/// </summary>
/// <param name="key">Hash key</param>
/// <param name="dict">Dictionary mapping of hash</param>
/// <returns>Status code</returns>
public Task<string> HMSetAsync(string key, Dictionary<string, object> dict)
{
return WriteAsync(RedisCommands.HMSet(key, dict));
}
/// <summary>
/// Set multiple hash fields to multiple values
/// </summary>
/// <typeparam name="T">Type of object to map hash</typeparam>
/// <param name="key">Hash key</param>
/// <param name="obj">Object mapping of hash</param>
/// <returns>Status code</returns>
public Task<string> HMSetAsync<T>(string key, T obj)
where T : class
{
return WriteAsync(RedisCommands.HMSet<T>(key, obj));
}
/// <summary>
/// Set multiple hash fields to multiple values
/// </summary>
/// <param name="key">Hash key</param>
/// <param name="keyValues">Array of [key,value,key,value,..]</param>
/// <returns>Status code</returns>
public Task<string> HMSetAsync(string key, params object[] keyValues)
{
return WriteAsync(RedisCommands.HMSet(key, keyValues));
}
/// <summary>
/// Set the value of a hash field
/// </summary>
/// <param name="key">Hash key</param>
/// <param name="field">Hash field to set</param>
/// <param name="value">Value to set</param>
/// <returns>True if field is new</returns>
public Task<bool> HSetAsync(string key, string field, object value)
{
return WriteAsync(RedisCommands.HSet(key, field, value));
}
/// <summary>
/// Set the value of a hash field, only if the field does not exist
/// </summary>
/// <param name="key">Hash key</param>
/// <param name="field">Hash field to set</param>
/// <param name="value">Value to set</param>
/// <returns>True if field was set to value</returns>
public Task<bool> HSetNxAsync(string key, string field, object value)
{
return WriteAsync(RedisCommands.HSetNx(key, field, value));
}
/// <summary>
/// Get all the values in a hash
/// </summary>
/// <param name="key">Hash key</param>
/// <returns>Array of all values in hash</returns>
public Task<string[]> HValsAsync(string key)
{
return WriteAsync(RedisCommands.HVals(key));
}
public Task<byte[][]> HValsBytesAsync(string key)
{
return WriteAsync(RedisCommands.HValsBytes(key));
}
/// <summary>
/// Iterate the keys and values of a hash field
/// </summary>
/// <param name="key">Hash key</param>
/// <param name="cursor">The cursor returned by the server in the previous call, or 0 if this is the first call</param>
/// <param name="pattern">Glob-style pattern to filter returned elements</param>
/// <param name="count">Maximum number of elements to return</param>
/// <returns>Updated cursor and result set</returns>
public Task<RedisScan<Tuple<string, string>>> HScanAsync(string key, long cursor, string pattern = null, long? count = null)
{
return WriteAsync(RedisCommands.HScan(key, cursor, pattern, count));
}
public Task<RedisScan<Tuple<string, byte[]>>> HScanBytesAsync(string key, long cursor, string pattern = null, long? count = null)
{
return WriteAsync(RedisCommands.HScanBytes(key, cursor, pattern, count));
}
#endregion
#region Lists
/// <summary>
/// Get an element from a list by its index
/// </summary>
/// <param name="key">List key</param>
/// <param name="index">Zero-based index of item to return</param>
/// <returns>Element at index</returns>
public Task<string> LIndexAsync(string key, long index)
{
return WriteAsync(RedisCommands.LIndex(key, index));
}
public Task<byte[]> LIndexBytesAsync(string key, long index)
{
return WriteAsync(RedisCommands.LIndexBytes(key, index));
}
/// <summary>
/// Insert an element before or after another element in a list
/// </summary>
/// <param name="key">List key</param>
/// <param name="insertType">Relative position</param>
/// <param name="pivot">Relative element</param>
/// <param name="value">Element to insert</param>
/// <returns>Length of list after insert or -1 if pivot not found</returns>
public Task<long> LInsertAsync(string key, RedisInsert insertType, object pivot, object value)
{
return WriteAsync(RedisCommands.LInsert(key, insertType, pivot, value));
}
/// <summary>
/// Get the length of a list
/// </summary>
/// <param name="key">List key</param>
/// <returns>Length of list at key</returns>
public Task<long> LLenAsync(string key)
{
return WriteAsync(RedisCommands.LLen(key));
}
/// <summary>
/// Remove and get the first element in a list
/// </summary>
/// <param name="key">List key</param>
/// <returns>First element in list</returns>
public Task<string> LPopAsync(string key)
{
return WriteAsync(RedisCommands.LPop(key));
}
public Task<byte[]> LPopBytesAsync(string key)
{
return WriteAsync(RedisCommands.LPopBytes(key));
}
/// <summary>
/// Prepend one or multiple values to a list
/// </summary>
/// <param name="key">List key</param>
/// <param name="values">Values to push</param>
/// <returns>Length of list after push</returns>
public Task<long> LPushAsync(string key, params object[] values)
{
return WriteAsync(RedisCommands.LPush(key, values));
}
/// <summary>
/// Prepend a value to a list, only if the list exists
/// </summary>
/// <param name="key">List key</param>
/// <param name="value">Value to push</param>
/// <returns>Length of list after push</returns>
public Task<long> LPushXAsync(string key, object value)
{
return WriteAsync(RedisCommands.LPushX(key, value));
}
/// <summary>
/// Get a range of elements from a list
/// </summary>
/// <param name="key">List key</param>
/// <param name="start">Start offset</param>
/// <param name="stop">Stop offset</param>
/// <returns>List of elements in range</returns>
public Task<string[]> LRangeAsync(string key, long start, long stop)
{
return WriteAsync(RedisCommands.LRange(key, start, stop));
}
public Task<byte[][]> LRangeBytesAsync(string key, long start, long stop)
{
return WriteAsync(RedisCommands.LRangeBytes(key, start, stop));
}
/// <summary>
/// Remove elements from a list
/// </summary>
/// <param name="key">List key</param>
/// <param name="count">>0: remove N elements from head to tail; <0: remove N elements from tail to head; =0: remove all elements</param>
/// <param name="value">Remove elements equal to value</param>
/// <returns>Number of removed elements</returns>
public Task<long> LRemAsync(string key, long count, object value)
{
return WriteAsync(RedisCommands.LRem(key, count, value));
}
/// <summary>
/// Set the value of an element in a list by its index
/// </summary>
/// <param name="key">List key</param>
/// <param name="index">List index to modify</param>
/// <param name="value">New element value</param>
/// <returns>Status code</returns>
public Task<string> LSetAsync(string key, long index, object value)
{
return WriteAsync(RedisCommands.LSet(key, index, value));
}
/// <summary>
/// Trim a list to the specified range
/// </summary>
/// <param name="key">List key</param>
/// <param name="start">Zero-based start index</param>
/// <param name="stop">Zero-based stop index</param>
/// <returns>Status code</returns>
public Task<string> LTrimAsync(string key, long start, long stop)
{
return WriteAsync(RedisCommands.LTrim(key, start, stop));
}
/// <summary>
/// Remove and get the last elment in a list
/// </summary>
/// <param name="key">List key</param>
/// <returns>Value of last list element</returns>
public Task<string> RPopAsync(string key)
{
return WriteAsync(RedisCommands.RPop(key));
}
public Task<byte[]> RPopBytesAsync(string key)
{
return WriteAsync(RedisCommands.RPopBytes(key));
}
/// <summary>
/// Remove the last elment in a list, append it to another list and return it
/// </summary>
/// <param name="source">List source key</param>
/// <param name="destination">Destination key</param>
/// <returns>Element being popped and pushed</returns>
public Task<string> RPopLPushAsync(string source, string destination)
{
return WriteAsync(RedisCommands.RPopLPush(source, destination));
}
public Task<byte[]> RPopBytesLPushAsync(string source, string destination)
{
return WriteAsync(RedisCommands.RPopBytesLPush(source, destination));
}
/// <summary>
/// Append one or multiple values to a list
/// </summary>
/// <param name="key">List key</param>
/// <param name="values">Values to push</param>
/// <returns>Length of list after push</returns>
public Task<long> RPushAsync(string key, params object[] values)
{
return WriteAsync(RedisCommands.RPush(key, values));
}
/// <summary>
/// Append a value to a list, only if the list exists
/// </summary>
/// <param name="key">List key</param>
/// <param name="value">Value to push</param>
/// <returns>Length of list after push</returns>
public Task<long> RPushXAsync(string key, object value)
{
return WriteAsync(RedisCommands.RPushX(key, value));
}
#endregion
#region Sets
/// <summary>
/// Add one or more members to a set
/// </summary>
/// <param name="key">Set key</param>
/// <param name="members">Members to add to set</param>
/// <returns>Number of elements added to set</returns>
public Task<long> SAddAsync(string key, params object[] members)
{
return WriteAsync(RedisCommands.SAdd(key, members));
}
/// <summary>
/// Get the number of members in a set
/// </summary>
/// <param name="key">Set key</param>
/// <returns>Number of elements in set</returns>
public Task<long> SCardAsync(string key)
{
return WriteAsync(RedisCommands.SCard(key));
}
/// <summary>
/// Subtract multiple sets
/// </summary>
/// <param name="keys">Set keys to subtract</param>
/// <returns>Array of elements in resulting set</returns>
public Task<string[]> SDiffAsync(params string[] keys)
{
return WriteAsync(RedisCommands.SDiff(keys));
}
public Task<byte[][]> SDiffBytesAsync(params string[] keys)
{
return WriteAsync(RedisCommands.SDiffBytes(keys));
}
/// <summary>
/// Subtract multiple sets and store the resulting set in a key
/// </summary>
/// <param name="destination">Destination key</param>
/// <param name="keys">Set keys to subtract</param>
/// <returns>Number of elements in the resulting set</returns>
public Task<long> SDiffStoreAsync(string destination, params string[] keys)
{
return WriteAsync(RedisCommands.SDiffStore(destination, keys));
}
/// <summary>
/// Intersect multiple sets
/// </summary>
/// <param name="keys">Set keys to intersect</param>
/// <returns>Array of elements in resulting set</returns>
public Task<string[]> SInterAsync(params string[] keys)
{
return WriteAsync(RedisCommands.SInter(keys));
}
public Task<byte[][]> SInterBytesAsync(params string[] keys)
{
return WriteAsync(RedisCommands.SInterBytes(keys));
}
/// <summary>
/// Intersect multiple sets and store the resulting set in a key
/// </summary>
/// <param name="destination">Destination key</param>
/// <param name="keys">Set keys to intersect</param>
/// <returns>Number of elements in resulting set</returns>
public Task<long> SInterStoreAsync(string destination, params string[] keys)
{
return WriteAsync(RedisCommands.SInterStore(destination, keys));
}
/// <summary>
/// Determine if a given value is a member of a set
/// </summary>
/// <param name="key">Set key</param>
/// <param name="member">Member to lookup</param>
/// <returns>True if member exists in set</returns>
public Task<bool> SIsMemberAsync(string key, object member)
{
return WriteAsync(RedisCommands.SIsMember(key, member));
}
/// <summary>
/// Get all the members in a set
/// </summary>
/// <param name="key">Set key</param>
/// <returns>All elements in the set</returns>
public Task<string[]> SMembersAsync(string key)
{
return WriteAsync(RedisCommands.SMembers(key));
}
public Task<byte[][]> SMembersBytesAsync(string key)
{
return WriteAsync(RedisCommands.SMembersBytes(key));
}
/// <summary>
/// Move a member from one set to another
/// </summary>
/// <param name="source">Source key</param>
/// <param name="destination">Destination key</param>
/// <param name="member">Member to move</param>
/// <returns>True if element was moved</returns>
public Task<bool> SMoveAsync(string source, string destination, object member)
{
return WriteAsync(RedisCommands.SMove(source, destination, member));
}
/// <summary>
/// Remove and return a random member from a set
/// </summary>
/// <param name="key">Set key</param>
/// <returns>The removed element</returns>
public Task<string> SPopAsync(string key)
{
return WriteAsync(RedisCommands.SPop(key));
}
public Task<byte[]> SPopBytesAsync(string key)
{
return WriteAsync(RedisCommands.SPopBytes(key));
}
/// <summary>
/// Remove and return one or more random members from a set
/// </summary>
/// <param name="key">Set key</param>
/// <param name="count">Number of elements to remove and return</param>
/// <returns></returns>
public Task<string[]> SPopAsync(string key, long count)
{
return WriteAsync(RedisCommands.SPop(key, count));
}
public Task<byte[][]> SPopBytesAsync(string key, long count)
{
return WriteAsync(RedisCommands.SPopBytes(key, count));
}
/// <summary>
/// Get a random member from a set
/// </summary>
/// <param name="key">Set key</param>
/// <returns>One random element from set</returns>
public Task<string> SRandMemberAsync(string key)
{
return WriteAsync(RedisCommands.SRandMember(key));
}
public Task<byte[]> SRandMemberBytesAsync(string key)
{
return WriteAsync(RedisCommands.SRandMemberBytes(key));
}
/// <summary>
/// Get one or more random members from a set
/// </summary>
/// <param name="key">Set key</param>
/// <param name="count">Number of elements to return</param>
/// <returns>One or more random elements from set</returns>
public Task<string[]> SRandMembersAsync(string key, long count)
{
return WriteAsync(RedisCommands.SRandMembers(key, count));
}
public Task<byte[][]> SRandMembersBytesAsync(string key, long count)
{
return WriteAsync(RedisCommands.SRandMembersBytes(key, count));
}
/// <summary>
/// Remove one or more members from a set
/// </summary>
/// <param name="key">Set key</param>
/// <param name="members">Set members to remove</param>
/// <returns>Number of elements removed from set</returns>
public Task<long> SRemAsync(string key, params object[] members)
{
return WriteAsync(RedisCommands.SRem(key, members));
}
/// <summary>
/// Add multiple sets
/// </summary>
/// <param name="keys">Set keys to union</param>
/// <returns>Array of elements in resulting set</returns>
public Task<string[]> SUnionAsync(params string[] keys)
{
return WriteAsync(RedisCommands.SUnion(keys));
}
public Task<byte[][]> SUnionBytesAsync(params string[] keys)
{
return WriteAsync(RedisCommands.SUnionBytes(keys));
}
/// <summary>
/// Add multiple sets and store the resulting set in a key
/// </summary>
/// <param name="destination">Destination key</param>
/// <param name="keys">Set keys to union</param>
/// <returns>Number of elements in resulting set</returns>
public Task<long> SUnionStoreAsync(string destination, params string[] keys)
{
return WriteAsync(RedisCommands.SUnionStore(destination, keys));
}
/// <summary>
/// Iterate the elements of a set field
/// </summary>
/// <param name="key">Set key</param>
/// <param name="cursor">The cursor returned by the server in the previous call, or 0 if this is the first call</param>
/// <param name="pattern">Glob-style pattern to filter returned elements</param>
/// <param name="count">Maximum number of elements to return</param>
/// <returns>Updated cursor and result set</returns>
public Task<RedisScan<string>> SScanAsync(string key, long cursor, string pattern = null, long? count = null)
{
return WriteAsync(RedisCommands.SScan(key, cursor, pattern, count));
}
public Task<RedisScan<byte[]>> SScanBytesAsync(string key, long cursor, string pattern = null, long? count = null)
{
return WriteAsync(RedisCommands.SScanBytes(key, cursor, pattern, count));
}
#endregion
#region Sorted Sets
public Task<Tuple<string, decimal>[]> ZPopMaxAsync(string key, long count)
{
return WriteAsync(RedisCommands.ZPopMax(key, count));
}
public Task<Tuple<byte[], decimal>[]> ZPopMaxBytesAsync(string key, long count)
{
return WriteAsync(RedisCommands.ZPopMaxBytes(key, count));
}
public Task<Tuple<string, decimal>[]> ZPopMinAsync(string key, long count)
{
return WriteAsync(RedisCommands.ZPopMin(key, count));
}
public Task<Tuple<byte[], decimal>[]> ZPopMinBytesAsync(string key, long count)
{
return WriteAsync(RedisCommands.ZPopMinBytes(key, count));
}
/// <summary>
/// Add one or more members to a sorted set, or update its score if it already exists
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="scoreMembers">Array of member scores to add to sorted set</param>
/// <returns>Number of elements added to the sorted set (not including member updates)</returns>
public Task<long> ZAddAsync<TScore, TMember>(string key, params Tuple<TScore, TMember>[] scoreMembers)
{
return WriteAsync(RedisCommands.ZAdd(key, scoreMembers));
}
/// <summary>
/// Add one or more members to a sorted set, or update its score if it already exists
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="scoreMembers">Array of member scores [s1, m1, s2, m2, ..]</param>
/// <returns>Number of elements added to the sorted set (not including member updates)</returns>
public Task<long> ZAddAsync(string key, params object[] scoreMembers)
{
return WriteAsync(RedisCommands.ZAdd(key, scoreMembers));
}
/// <summary>
/// Get the number of members in a sorted set
/// </summary>
/// <param name="key">Sorted set key</param>
/// <returns>Number of elements in the sorted set</returns>
public Task<long> ZCardAsync(string key)
{
return WriteAsync(RedisCommands.ZCard(key));
}
/// <summary>
/// Count the members in a sorted set with scores within the given values
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="min">Minimum score</param>
/// <param name="max">Maximum score</param>
/// <param name="exclusiveMin">Minimum score is exclusive</param>
/// <param name="exclusiveMax">Maximum score is exclusive</param>
/// <returns>Number of elements in the specified score range</returns>
public Task<long> ZCountAsync(string key, decimal min, decimal max, bool exclusiveMin = false, bool exclusiveMax = false)
{
return WriteAsync(RedisCommands.ZCount(key, min, max, exclusiveMin, exclusiveMax));
}
/// <summary>
/// Count the members in a sorted set with scores within the given values
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="min">Minimum score</param>
/// <param name="max">Maximum score</param>
/// <returns>Number of elements in the specified score range</returns>
public Task<long> ZCountAsync(string key, string min, string max)
{
return WriteAsync(RedisCommands.ZCount(key, min, max));
}
/// <summary>
/// Increment the score of a member in a sorted set
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="increment">Increment by value</param>
/// <param name="member">Sorted set member to increment</param>
/// <returns>New score of member</returns>
public Task<decimal> ZIncrByAsync(string key, decimal increment, object member)
{
return WriteAsync(RedisCommands.ZIncrBy(key, increment, member));
}
/// <summary>
/// Intersect multiple sorted sets and store the resulting set in a new key
/// </summary>
/// <param name="destination">Destination key</param>
/// <param name="weights">Multiplication factor for each input set</param>
/// <param name="aggregate">Aggregation function of resulting set</param>
/// <param name="keys">Sorted set keys to intersect</param>
/// <returns>Number of elements in the resulting sorted set</returns>
public Task<long> ZInterStoreAsync(string destination, decimal[] weights = null, RedisAggregate? aggregate = null, params string[] keys)
{
return WriteAsync(RedisCommands.ZInterStore(destination, weights, aggregate, keys));
}
/// <summary>
/// Intersect multiple sorted sets and store the resulting set in a new key
/// </summary>
/// <param name="destination">Destination key</param>
/// <param name="keys">Sorted set keys to intersect</param>
/// <returns>Number of elements in the resulting sorted set</returns>
public Task<long> ZInterStoreAsync(string destination, params string[] keys)
{
return ZInterStoreAsync(destination, null, null, keys);
}
/// <summary>
/// Return a range of members in a sorted set, by index
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="start">Start offset</param>
/// <param name="stop">Stop offset</param>
/// <param name="withScores">Include scores in result</param>
/// <returns>Array of elements in the specified range (with optional scores)</returns>
public Task<string[]> ZRangeAsync(string key, long start, long stop, bool withScores = false)
{
return WriteAsync(RedisCommands.ZRange(key, start, stop, withScores));
}
public Task<byte[][]> ZRangeBytesAsync(string key, long start, long stop, bool withScores = false)
{
return WriteAsync(RedisCommands.ZRangeBytes(key, start, stop, withScores));
}
/// <summary>
/// Return a range of members in a sorted set, by index, with scores
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="start">Start offset</param>
/// <param name="stop">Stop offset</param>
/// <returns>Array of elements in the specified range with scores</returns>
public Task<Tuple<string, decimal>[]> ZRangeWithScoresAsync(string key, long start, long stop)
{
return WriteAsync(RedisCommands.ZRangeWithScores(key, start, stop));
}
public Task<Tuple<byte[], decimal>[]> ZRangeBytesWithScoresAsync(string key, long start, long stop)
{
return WriteAsync(RedisCommands.ZRangeBytesWithScores(key, start, stop));
}
/// <summary>
/// Return a range of members in a sorted set, by score
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="min">Minimum score</param>
/// <param name="max">Maximum score</param>
/// <param name="withScores">Include scores in result</param>
/// <param name="exclusiveMin">Minimum score is exclusive</param>
/// <param name="exclusiveMax">Maximum score is exclusive</param>
/// <param name="offset">Start offset</param>
/// <param name="count">Number of elements to return</param>
/// <returns>List of elements in the specified range (with optional scores)</returns>
public Task<string[]> ZRangeByScoreAsync(string key, decimal min, decimal max, bool withScores = false, bool exclusiveMin = false, bool exclusiveMax = false, long? offset = null, long? count = null)
{
return WriteAsync(RedisCommands.ZRangeByScore(key, min, max, withScores, exclusiveMin, exclusiveMax, offset, count));
}
public Task<byte[][]> ZRangeBytesByScoreAsync(string key, decimal min, decimal max, bool withScores = false, bool exclusiveMin = false, bool exclusiveMax = false, long? offset = null, long? count = null)
{
return WriteAsync(RedisCommands.ZRangeBytesByScore(key, min, max, withScores, exclusiveMin, exclusiveMax, offset, count));
}
/// <summary>
/// Return a range of members in a sorted set, by score
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="min">Minimum score</param>
/// <param name="max">Maximum score</param>
/// <param name="withScores">Include scores in result</param>
/// <param name="offset">Start offset</param>
/// <param name="count">Number of elements to return</param>
/// <returns>List of elements in the specified range (with optional scores)</returns>
public Task<string[]> ZRangeByScoreAsync(string key, string min, string max, bool withScores = false, long? offset = null, long? count = null)
{
return WriteAsync(RedisCommands.ZRangeByScore(key, min, max, withScores, offset, count));
}
public Task<byte[][]> ZRangeBytesByScoreAsync(string key, string min, string max, bool withScores = false, long? offset = null, long? count = null)
{
return WriteAsync(RedisCommands.ZRangeBytesByScore(key, min, max, withScores, offset, count));
}
/// <summary>
/// Return a range of members in a sorted set, by score, with scores
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="min">Minimum score</param>
/// <param name="max">Maximum score</param>
/// <param name="exclusiveMin">Minimum score is exclusive</param>
/// <param name="exclusiveMax">Maximum score is exclusive</param>
/// <param name="offset">Start offset</param>
/// <param name="count">Number of elements to return</param>
/// <returns>List of elements in the specified range (with optional scores)</returns>
public Task<Tuple<string, decimal>[]> ZRangeByScoreWithScoresAsync(string key, decimal min, decimal max, bool exclusiveMin = false, bool exclusiveMax = false, long? offset = null, long? count = null)
{
return WriteAsync(RedisCommands.ZRangeByScoreWithScores(key, min, max, exclusiveMin, exclusiveMax, offset, count));
}
public Task<Tuple<byte[], decimal>[]> ZRangeBytesByScoreWithScoresAsync(string key, decimal min, decimal max, bool exclusiveMin = false, bool exclusiveMax = false, long? offset = null, long? count = null)
{
return WriteAsync(RedisCommands.ZRangeBytesByScoreWithScores(key, min, max, exclusiveMin, exclusiveMax, offset, count));
}
/// <summary>
/// Return a range of members in a sorted set, by score, with scores
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="min">Minimum score</param>
/// <param name="max">Maximum score</param>
/// <param name="offset">Start offset</param>
/// <param name="count">Number of elements to return</param>
/// <returns>List of elements in the specified range (with optional scores)</returns>
public Task<Tuple<string, decimal>[]> ZRangeByScoreWithScoresAsync(string key, string min, string max, long? offset = null, long? count = null)
{
return WriteAsync(RedisCommands.ZRangeByScoreWithScores(key, min, max, offset, count));
}
public Task<Tuple<byte[], decimal>[]> ZRangeBytesByScoreWithScoresAsync(string key, string min, string max, long? offset = null, long? count = null)
{
return WriteAsync(RedisCommands.ZRangeBytesByScoreWithScores(key, min, max, offset, count));
}
/// <summary>
/// Determine the index of a member in a sorted set
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="member">Member to lookup</param>
/// <returns>Rank of member or null if key does not exist</returns>
public Task<long?> ZRankAsync(string key, object member)
{
return WriteAsync(RedisCommands.ZRank(key, member));
}
/// <summary>
/// Remove one or more members from a sorted set
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="members">Members to remove</param>
/// <returns>Number of elements removed</returns>
public Task<long> ZRemAsync(string key, params object[] members)
{
return WriteAsync(RedisCommands.ZRem(key, members));
}
/// <summary>
/// Remove all members in a sorted set within the given indexes
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="start">Start offset</param>
/// <param name="stop">Stop offset</param>
/// <returns>Number of elements removed</returns>
public Task<long> ZRemRangeByRankAsync(string key, long start, long stop)
{
return WriteAsync(RedisCommands.ZRemRangeByRank(key, start, stop));
}
/// <summary>
/// Remove all members in a sorted set within the given scores
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="min">Minimum score</param>
/// <param name="max">Maximum score</param>
/// <param name="exclusiveMin">Minimum score is exclusive</param>
/// <param name="exclusiveMax">Maximum score is exclusive</param>
/// <returns>Number of elements removed</returns>
public Task<long> ZRemRangeByScoreAsync(string key, decimal min, decimal max, bool exclusiveMin = false, bool exclusiveMax = false)
{
return WriteAsync(RedisCommands.ZRemRangeByScore(key, min, max, exclusiveMin, exclusiveMax));
}
public Task<long> ZRemRangeByScoreAsync(string key, string min, string max)
{
return WriteAsync(RedisCommands.ZRemRangeByScore(key, min, max));
}
/// <summary>
/// Return a range of members in a sorted set, by index, with scores ordered from high to low
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="start">Start offset</param>
/// <param name="stop">Stop offset</param>
/// <param name="withScores">Include scores in result</param>
/// <returns>List of elements in the specified range (with optional scores)</returns>
public Task<string[]> ZRevRangeAsync(string key, long start, long stop, bool withScores = false)
{
return WriteAsync(RedisCommands.ZRevRange(key, start, stop, withScores));
}
public Task<byte[][]> ZRevRangeBytesAsync(string key, long start, long stop, bool withScores = false)
{
return WriteAsync(RedisCommands.ZRevRangeBytes(key, start, stop, withScores));
}
/// <summary>
/// Return a range of members in a sorted set, by index, with scores ordered from high to low
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="start">Start offset</param>
/// <param name="stop">Stop offset</param>
/// <returns>List of elements in the specified range (with optional scores)</returns>
public Task<Tuple<string, decimal>[]> ZRevRangeWithScoresAsync(string key, long start, long stop)
{
return WriteAsync(RedisCommands.ZRevRangeWithScores(key, start, stop));
}
public Task<Tuple<byte[], decimal>[]> ZRevRangeBytesWithScoresAsync(string key, long start, long stop)
{
return WriteAsync(RedisCommands.ZRevRangeBytesWithScores(key, start, stop));
}
/// <summary>
/// Return a range of members in a sorted set, by score, with scores ordered from high to low
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="max">Maximum score</param>
/// <param name="min">Minimum score</param>
/// <param name="withScores">Include scores in result</param>
/// <param name="exclusiveMax">Maximum score is exclusive</param>
/// <param name="exclusiveMin">Minimum score is exclusive</param>
/// <param name="offset">Start offset</param>
/// <param name="count">Number of elements to return</param>
/// <returns>List of elements in the specified score range (with optional scores)</returns>
public Task<string[]> ZRevRangeByScoreAsync(string key, decimal max, decimal min, bool withScores = false, bool exclusiveMax = false, bool exclusiveMin = false, long? offset = null, long? count = null)
{
return WriteAsync(RedisCommands.ZRevRangeByScore(key, max, min, withScores, exclusiveMax, exclusiveMin, offset, count));
}
public Task<byte[][]> ZRevRangeBytesByScoreAsync(string key, decimal max, decimal min, bool withScores = false, bool exclusiveMax = false, bool exclusiveMin = false, long? offset = null, long? count = null)
{
return WriteAsync(RedisCommands.ZRevRangeBytesByScore(key, max, min, withScores, exclusiveMax, exclusiveMin, offset, count));
}
/// <summary>
/// Return a range of members in a sorted set, by score, with scores ordered from high to low
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="max">Maximum score</param>
/// <param name="min">Minimum score</param>
/// <param name="withScores">Include scores in result</param>
/// <param name="offset">Start offset</param>
/// <param name="count">Number of elements to return</param>
/// <returns>List of elements in the specified score range (with optional scores)</returns>
public Task<string[]> ZRevRangeByScoreAsync(string key, string max, string min, bool withScores = false, long? offset = null, long? count = null)
{
return WriteAsync(RedisCommands.ZRevRangeByScore(key, max, min, withScores, offset, count));
}
public Task<byte[][]> ZRevRangeBytesByScoreAsync(string key, string max, string min, bool withScores = false, long? offset = null, long? count = null)
{
return WriteAsync(RedisCommands.ZRevRangeBytesByScore(key, max, min, withScores, offset, count));
}
/// <summary>
/// Return a range of members in a sorted set, by score, with scores ordered from high to low
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="max">Maximum score</param>
/// <param name="min">Minimum score</param>
/// <param name="exclusiveMax">Maximum score is exclusive</param>
/// <param name="exclusiveMin">Minimum score is exclusive</param>
/// <param name="offset">Start offset</param>
/// <param name="count">Number of elements to return</param>
/// <returns>List of elements in the specified score range (with optional scores)</returns>
public Task<Tuple<string, decimal>[]> ZRevRangeByScoreWithScoresAsync(string key, decimal max, decimal min, bool exclusiveMax = false, bool exclusiveMin = false, long? offset = null, long? count = null)
{
return WriteAsync(RedisCommands.ZRevRangeByScoreWithScores(key, max, min, exclusiveMax, exclusiveMin, offset, count));
}
public Task<Tuple<byte[], decimal>[]> ZRevRangeBytesByScoreWithScoresAsync(string key, decimal max, decimal min, bool exclusiveMax = false, bool exclusiveMin = false, long? offset = null, long? count = null)
{
return WriteAsync(RedisCommands.ZRevRangeBytesByScoreWithScores(key, max, min, exclusiveMax, exclusiveMin, offset, count));
}
/// <summary>
/// Return a range of members in a sorted set, by score, with scores ordered from high to low
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="max">Maximum score</param>
/// <param name="min">Minimum score</param>
/// <param name="offset">Start offset</param>
/// <param name="count">Number of elements to return</param>
/// <returns>List of elements in the specified score range (with optional scores)</returns>
public Task<Tuple<string, decimal>[]> ZRevRangeByScoreWithScoresAsync(string key, string max, string min, long? offset = null, long? count = null)
{
return WriteAsync(RedisCommands.ZRevRangeByScoreWithScores(key, max, min, offset, count));
}
public Task<Tuple<byte[], decimal>[]> ZRevRangeBytesByScoreWithScoresAsync(string key, string max, string min, long? offset = null, long? count = null)
{
return WriteAsync(RedisCommands.ZRevRangeBytesByScoreWithScores(key, max, min, offset, count));
}
/// <summary>
/// Determine the index of a member in a sorted set, with scores ordered from high to low
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="member">Member to lookup</param>
/// <returns>Rank of member, or null if member does not exist</returns>
public Task<long?> ZRevRankAsync(string key, object member)
{
return WriteAsync(RedisCommands.ZRevRank(key, member));
}
/// <summary>
/// Get the score associated with the given member in a sorted set
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="member">Member to lookup</param>
/// <returns>Score of member, or null if member does not exist</returns>
public Task<decimal?> ZScoreAsync(string key, object member)
{
return WriteAsync(RedisCommands.ZScore(key, member));
}
/// <summary>
/// Add multiple sorted sets and store the resulting sorted set in a new key
/// </summary>
/// <param name="destination">Destination key</param>
/// <param name="weights">Multiplication factor for each input set</param>
/// <param name="aggregate">Aggregation function of resulting set</param>
/// <param name="keys">Sorted set keys to union</param>
/// <returns>Number of elements in the resulting sorted set</returns>
public Task<long> ZUnionStoreAsync(string destination, decimal[] weights = null, RedisAggregate? aggregate = null, params string[] keys)
{
return WriteAsync(RedisCommands.ZUnionStore(destination, weights, aggregate, keys));
}
/// <summary>
/// Iterate the scores and elements of a sorted set field
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="cursor">The cursor returned by the server in the previous call, or 0 if this is the first call</param>
/// <param name="pattern">Glob-style pattern to filter returned elements</param>
/// <param name="count">Maximum number of elements to return</param>
/// <returns>Updated cursor and result set</returns>
public Task<RedisScan<Tuple<string, decimal>>> ZScanAsync(string key, long cursor, string pattern = null, long? count = null)
{
return WriteAsync(RedisCommands.ZScan(key, cursor, pattern, count));
}
public Task<RedisScan<Tuple<byte[], decimal>>> ZScanBytesAsync(string key, long cursor, string pattern = null, long? count = null)
{
return WriteAsync(RedisCommands.ZScanBytes(key, cursor, pattern, count));
}
/// <summary>
/// Retrieve all the elements in a sorted set with a value between min and max
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="min">Lexagraphic start value. Prefix value with '(' to indicate exclusive; '[' to indicate inclusive. Use '-' or '+' to specify infinity.</param>
/// <param name="max">Lexagraphic stop value. Prefix value with '(' to indicate exclusive; '[' to indicate inclusive. Use '-' or '+' to specify infinity.</param>
/// <param name="offset">Limit result set by offset</param>
/// <param name="count">Limimt result set by size</param>
/// <returns>List of elements in the specified range</returns>
public Task<string[]> ZRangeByLexAsync(string key, string min, string max, long? offset = null, long? count = null)
{
return WriteAsync(RedisCommands.ZRangeByLex(key, min, max, offset, count));
}
public Task<byte[][]> ZRangeBytesByLexAsync(string key, string min, string max, long? offset = null, long? count = null)
{
return WriteAsync(RedisCommands.ZRangeBytesByLex(key, min, max, offset, count));
}
/// <summary>
/// Remove all elements in the sorted set with a value between min and max
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="min">Lexagraphic start value. Prefix value with '(' to indicate exclusive; '[' to indicate inclusive. Use '-' or '+' to specify infinity.</param>
/// <param name="max">Lexagraphic stop value. Prefix value with '(' to indicate exclusive; '[' to indicate inclusive. Use '-' or '+' to specify infinity.</param>
/// <returns>Number of elements removed</returns>
public Task<long> ZRemRangeByLexAsync(string key, string min, string max)
{
return WriteAsync(RedisCommands.ZRemRangeByLex(key, min, max));
}
/// <summary>
/// Returns the number of elements in the sorted set with a value between min and max.
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="min">Lexagraphic start value. Prefix value with '(' to indicate exclusive; '[' to indicate inclusive. Use '-' or '+' to specify infinity.</param>
/// <param name="max">Lexagraphic stop value. Prefix value with '(' to indicate exclusive; '[' to indicate inclusive. Use '-' or '+' to specify infinity.</param>
/// <returns>Number of elements in the specified score range</returns>
public Task<long> ZLexCountAsync(string key, string min, string max)
{
return WriteAsync(RedisCommands.ZLexCount(key, min, max));
}
#endregion
#region Pub/Sub
/// <summary>
/// Post a message to a channel
/// </summary>
/// <param name="channel">Channel to post message</param>
/// <param name="message">Message to send</param>
/// <returns>Number of clients that received the message</returns>
public Task<long> PublishAsync(string channel, string message)
{
return WriteAsync(RedisCommands.Publish(channel, message));
}
/// <summary>
/// List the currently active channels
/// </summary>
/// <param name="pattern">Glob-style channel pattern</param>
/// <returns>Active channel names</returns>
public Task<string[]> PubSubChannelsAsync(string pattern = null)
{
return WriteAsync(RedisCommands.PubSubChannels(pattern));
}
/// <summary>
/// Return the number of subscribers (not counting clients subscribed to patterns) for the specified channels
/// </summary>
/// <param name="channels">Channels to query</param>
/// <returns>Channel names and counts</returns>
public Task<Tuple<string, long>[]> PubSubNumSubAsync(params string[] channels)
{
return WriteAsync(RedisCommands.PubSubNumSub(channels));
}
/// <summary>
/// Return the number of subscriptions to patterns
/// </summary>
/// <returns>The number of patterns all the clients are subscribed to</returns>
public Task<long> PubSubNumPatAsync()
{
return WriteAsync(RedisCommands.PubSubNumPat());
}
#endregion
#region Scripting
/// <summary>
/// Execute a Lua script server side
/// </summary>
/// <param name="script">Script to run on server</param>
/// <param name="keys">Keys used by script</param>
/// <param name="arguments">Arguments to pass to script</param>
/// <returns>Redis object</returns>
public Task<object> EvalAsync(string script, string[] keys, params object[] arguments)
{
return WriteAsync(RedisCommands.Eval(script, keys, arguments));
}
/// <summary>
/// Execute a Lua script server side, sending only the script's cached SHA hash
/// </summary>
/// <param name="sha1">SHA1 hash of script</param>
/// <param name="keys">Keys used by script</param>
/// <param name="arguments">Arguments to pass to script</param>
/// <returns>Redis object</returns>
public Task<object> EvalSHAAsync(string sha1, string[] keys, params object[] arguments)
{
return WriteAsync(RedisCommands.EvalSHA(sha1, keys, arguments));
}
/// <summary>
/// Check existence of script SHA hashes in the script cache
/// </summary>
/// <param name="sha1s">SHA1 script hashes</param>
/// <returns>Array of boolean values indicating script existence on server</returns>
public Task<bool[]> ScriptExistsAsync(params string[] sha1s)
{
return WriteAsync(RedisCommands.ScriptExists(sha1s));
}
/// <summary>
/// Remove all scripts from the script cache
/// </summary>
/// <returns>Status code</returns>
public Task<string> ScriptFlushAsync()
{
return WriteAsync(RedisCommands.ScriptFlush());
}
/// <summary>
/// Kill the script currently in execution
/// </summary>
/// <returns>Status code</returns>
public Task<string> ScriptKillAsync()
{
return WriteAsync(RedisCommands.ScriptKill());
}
/// <summary>
/// Load the specified Lua script into the script cache
/// </summary>
/// <param name="script">Lua script to load</param>
/// <returns>SHA1 hash of script</returns>
public Task<string> ScriptLoadAsync(string script)
{
return WriteAsync(RedisCommands.ScriptLoad(script));
}
#endregion
#region Strings
/// <summary>
/// Append a value to a key
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="value">Value to append to key</param>
/// <returns>Length of string after append</returns>
public Task<long> AppendAsync(string key, object value)
{
return WriteAsync(RedisCommands.Append(key, value));
}
/// <summary>
/// Count set bits in a string
/// </summary>
/// <param name="key">Key to check</param>
/// <param name="start">Start offset</param>
/// <param name="end">Stop offset</param>
/// <returns>Number of bits set to 1</returns>
public Task<long> BitCountAsync(string key, long? start = null, long? end = null)
{
return WriteAsync(RedisCommands.BitCount(key, start, end));
}
/// <summary>
/// Perform bitwise operations between strings
/// </summary>
/// <param name="operation">Bit command to execute</param>
/// <param name="destKey">Store result in destination key</param>
/// <param name="keys">Keys to operate</param>
/// <returns>Size of string stored in the destination key</returns>
public Task<long> BitOpAsync(RedisBitOp operation, string destKey, params string[] keys)
{
return WriteAsync(RedisCommands.BitOp(operation, destKey, keys));
}
/// <summary>
/// Find first bit set or clear in a string
/// </summary>
/// <param name="key">Key to examine</param>
/// <param name="bit">Bit value (1 or 0)</param>
/// <param name="start">Examine string at specified byte offset</param>
/// <param name="end">Examine string to specified byte offset</param>
/// <returns>Position of the first bit set to the specified value</returns>
public Task<long> BitPosAsync(string key, bool bit, long? start = null, long? end = null)
{
return WriteAsync(RedisCommands.BitPos(key, bit, start, end));
}
/// <summary>
/// Decrement the integer value of a key by one
/// </summary>
/// <param name="key">Key to modify</param>
/// <returns>Value of key after decrement</returns>
public Task<long> DecrAsync(string key)
{
return WriteAsync(RedisCommands.Decr(key));
}
/// <summary>
/// Decrement the integer value of a key by the given number
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="decrement">Decrement value</param>
/// <returns>Value of key after decrement</returns>
public Task<long> DecrByAsync(string key, long decrement)
{
return WriteAsync(RedisCommands.DecrBy(key, decrement));
}
/// <summary>
/// Get the value of a key
/// </summary>
/// <param name="key">Key to lookup</param>
/// <returns>Value of key</returns>
public Task<string> GetAsync(string key)
{
return WriteAsync(RedisCommands.Get(key));
}
public Task<byte[]> GetBytesAsync(string key)
{
return WriteAsync(RedisCommands.GetBytes(key));
}
/// <summary>
/// Returns the bit value at offset in the string value stored at key
/// </summary>
/// <param name="key">Key to lookup</param>
/// <param name="offset">Offset of key to check</param>
/// <returns>Bit value stored at offset</returns>
public Task<bool> GetBitAsync(string key, uint offset)
{
return WriteAsync(RedisCommands.GetBit(key, offset));
}
/// <summary>
/// Get a substring of the string stored at a key
/// </summary>
/// <param name="key">Key to lookup</param>
/// <param name="start">Start offset</param>
/// <param name="end">End offset</param>
/// <returns>Substring in the specified range</returns>
public Task<string> GetRangeAsync(string key, long start, long end)
{
return WriteAsync(RedisCommands.GetRange(key, start, end));
}
public Task<byte[]> GetRangeBytesAsync(string key, long start, long end)
{
return WriteAsync(RedisCommands.GetRangeBytes(key, start, end));
}
/// <summary>
/// Set the string value of a key and return its old value
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="value">Value to set</param>
/// <returns>Old value stored at key, or null if key did not exist</returns>
public Task<string> GetSetAsync(string key, object value)
{
return WriteAsync(RedisCommands.GetSet(key, value));
}
public Task<byte[]> GetSetBytesAsync(string key, object value)
{
return WriteAsync(RedisCommands.GetSetBytes(key, value));
}
/// <summary>
/// Increment the integer value of a key by one
/// </summary>
/// <param name="key">Key to modify</param>
/// <returns>Value of key after increment</returns>
public Task<long> IncrAsync(string key)
{
return WriteAsync(RedisCommands.Incr(key));
}
/// <summary>
/// Increment the integer value of a key by the given amount
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="increment">Increment amount</param>
/// <returns>Value of key after increment</returns>
public Task<long> IncrByAsync(string key, long increment)
{
return WriteAsync(RedisCommands.IncrBy(key, increment));
}
/// <summary>
/// Increment the float value of a key by the given amount
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="increment">Increment amount</param>
/// <returns>Value of key after increment</returns>
public Task<decimal> IncrByFloatAsync(string key, decimal increment)
{
return WriteAsync(RedisCommands.IncrByFloat(key, increment));
}
/// <summary>
/// Get the values of all the given keys
/// </summary>
/// <param name="keys">Keys to lookup</param>
/// <returns>Array of values at the specified keys</returns>
public Task<string[]> MGetAsync(params string[] keys)
{
return WriteAsync(RedisCommands.MGet(keys));
}
public Task<byte[][]> MGetBytesAsync(params string[] keys)
{
return WriteAsync(RedisCommands.MGetBytes(keys));
}
/// <summary>
/// Set multiple keys to multiple values
/// </summary>
/// <param name="keyValues">Key values to set</param>
/// <returns>Status code</returns>
public Task<string> MSetAsync(params Tuple<string, object>[] keyValues)
{
return WriteAsync(RedisCommands.MSet(keyValues));
}
/// <summary>
/// Set multiple keys to multiple values
/// </summary>
/// <param name="keyValues">Key values to set [k1, v1, k2, v2, ..]</param>
/// <returns>Status code</returns>
public Task<string> MSetAsync(params object[] keyValues)
{
return WriteAsync(RedisCommands.MSet(keyValues));
}
/// <summary>
/// Set multiple keys to multiple values, only if none of the keys exist
/// </summary>
/// <param name="keyValues">Key values to set</param>
/// <returns>True if all keys were set</returns>
public Task<bool> MSetNxAsync(params Tuple<string, object>[] keyValues)
{
return WriteAsync(RedisCommands.MSetNx(keyValues));
}
/// <summary>
/// Set multiple keys to multiple values, only if none of the keys exist
/// </summary>
/// <param name="keyValues">Key values to set [k1, v1, k2, v2, ..]</param>
/// <returns>True if all keys were set</returns>
public Task<bool> MSetNxAsync(params object[] keyValues)
{
return WriteAsync(RedisCommands.MSetNx(keyValues));
}
/// <summary>
/// Set the value and expiration in milliseconds of a key
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="milliseconds">Expiration in milliseconds</param>
/// <param name="value">Value to set</param>
/// <returns>Status code</returns>
public Task<string> PSetExAsync(string key, long milliseconds, object value)
{
return WriteAsync(RedisCommands.PSetEx(key, milliseconds, value));
}
/// <summary>
/// Set the string value of a key
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="value">Value to set</param>
/// <returns>Status code</returns>
public Task<string> SetAsync(string key, object value)
{
return WriteAsync(RedisCommands.Set(key, value));
}
/// <summary>
/// Set the string value of a key with atomic expiration and existence condition
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="value">Value to set</param>
/// <param name="expiration">Set expiration to nearest millisecond</param>
/// <param name="condition">Set key if existence condition</param>
/// <returns>Status code, or null if condition not met</returns>
public Task<string> SetAsync(string key, object value, TimeSpan expiration, RedisExistence? condition = null)
{
return WriteAsync(RedisCommands.Set(key, value, expiration, condition));
}
/// <summary>
/// Set the string value of a key with atomic expiration and existence condition
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="value">Value to set</param>
/// <param name="expirationSeconds">Set expiration to nearest second</param>
/// <param name="condition">Set key if existence condition</param>
/// <returns>Status code, or null if condition not met</returns>
public Task<string> SetAsync(string key, object value, int? expirationSeconds = null, RedisExistence? condition = null)
{
return WriteAsync(RedisCommands.Set(key, value, expirationSeconds, condition));
}
/// <summary>
/// Set the string value of a key with atomic expiration and existence condition
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="value">Value to set</param>
/// <param name="expirationMilliseconds">Set expiration to nearest millisecond</param>
/// <param name="condition">Set key if existence condition</param>
/// <returns>Status code, or null if condition not met</returns>
public Task<string> SetAsync(string key, object value, long? expirationMilliseconds = null, RedisExistence? condition = null)
{
return WriteAsync(RedisCommands.Set(key, value, expirationMilliseconds, condition));
}
/// <summary>
/// Sets or clears the bit at offset in the string value stored at key
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="offset">Modify key at offset</param>
/// <param name="value">Value to set (on or off)</param>
/// <returns>Original bit stored at offset</returns>
public Task<bool> SetBitAsync(string key, uint offset, bool value)
{
return WriteAsync(RedisCommands.SetBit(key, offset, value));
}
/// <summary>
/// Set the value and expiration of a key
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="seconds">Expiration in seconds</param>
/// <param name="value">Value to set</param>
/// <returns>Status code</returns>
public Task<string> SetExAsync(string key, long seconds, object value)
{
return WriteAsync(RedisCommands.SetEx(key, seconds, value));
}
/// <summary>
/// Set the value of a key, only if the key does not exist
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="value">Value to set</param>
/// <returns>True if key was set</returns>
public Task<bool> SetNxAsync(string key, object value)
{
return WriteAsync(RedisCommands.SetNx(key, value));
}
/// <summary>
/// Overwrite part of a string at key starting at the specified offset
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="offset">Start offset</param>
/// <param name="value">Value to write at offset</param>
/// <returns>Length of string after operation</returns>
public Task<long> SetRangeAsync(string key, uint offset, object value)
{
return WriteAsync(RedisCommands.SetRange(key, offset, value));
}
/// <summary>
/// Get the length of the value stored in a key
/// </summary>
/// <param name="key">Key to lookup</param>
/// <returns>Length of string at key</returns>
public Task<long> StrLenAsync(string key)
{
return WriteAsync(RedisCommands.StrLen(key));
}
#endregion
#region Server
/// <summary>
/// Asyncronously rewrite the append-only file
/// </summary>
/// <returns>Status code</returns>
public Task<string> BgRewriteAofAsync()
{
return WriteAsync(RedisCommands.BgRewriteAof());
}
/// <summary>
/// Asynchronously save the dataset to disk
/// </summary>
/// <returns>Status code</returns>
public Task<string> BgSaveAsync()
{
return WriteAsync(RedisCommands.BgSave());
}
/// <summary>
/// Get the current connection name
/// </summary>
/// <returns>Connection name</returns>
public Task<string> ClientGetNameAsync()
{
return WriteAsync(RedisCommands.ClientGetName());
}
/// <summary>
/// Kill the connection of a client
/// </summary>
/// <param name="ip">Client IP returned from CLIENT LIST</param>
/// <param name="port">Client port returned from CLIENT LIST</param>
/// <returns>Status code</returns>
public Task<string> ClientKillAsync(string ip, int port)
{
return WriteAsync(RedisCommands.ClientKill(ip, port));
}
/// <summary>
/// Kill the connection of a client
/// </summary>
/// <param name="addr">Client address</param>
/// <param name="id">Client ID</param>
/// <param name="type">Client type</param>
/// <param name="skipMe">Set to true to skip calling client</param>
/// <returns>The number of clients killed</returns>
public Task<long> ClientKillAsync(string addr = null, string id = null, string type = null, bool? skipMe = null)
{
return WriteAsync(RedisCommands.ClientKill(addr, id, type, skipMe));
}
/// <summary>
/// Get the list of client connections
/// </summary>
/// <returns>Formatted string of clients</returns>
public Task<string> ClientListAsync()
{
return WriteAsync(RedisCommands.ClientList());
}
/// <summary>
/// Suspend all the Redis clients for the specified amount of time
/// </summary>
/// <param name="milliseconds">Time in milliseconds to suspend</param>
/// <returns>Status code</returns>
public Task<string> ClientPauseAsync(int milliseconds)
{
return WriteAsync(RedisCommands.ClientPause(milliseconds));
}
/// <summary>
/// Suspend all the Redis clients for the specified amount of time
/// </summary>
/// <param name="timeout">Time to suspend</param>
/// <returns>Status code</returns>
public Task<string> ClientPauseAsync(TimeSpan timeout)
{
return WriteAsync(RedisCommands.ClientPause(timeout));
}
/// <summary>
/// Set the current connection name
/// </summary>
/// <param name="connectionName">Name of connection (no spaces)</param>
/// <returns>Status code</returns>
public Task<string> ClientSetNameAsync(string connectionName)
{
return WriteAsync(RedisCommands.ClientSetName(connectionName));
}
/// <summary>
/// Get the value of a configuration paramter
/// </summary>
/// <param name="parameter">Configuration parameter to lookup</param>
/// <returns>Configuration value</returns>
public Task<Tuple<string, string>[]> ConfigGetAsync(string parameter)
{
return WriteAsync(RedisCommands.ConfigGet(parameter));
}
/// <summary>
/// Reset the stats returned by INFO
/// </summary>
/// <returns>Status code</returns>
public Task<string> ConfigResetStatAsync()
{
return WriteAsync(RedisCommands.ConfigResetStat());
}
/// <summary>
/// Rewrites the redis.conf file
/// </summary>
/// <returns>Status code</returns>
public Task<string> ConfigRewriteAsync()
{
return WriteAsync(RedisCommands.ConfigRewrite());
}
/// <summary>
/// Set a configuration parameter to the given value
/// </summary>
/// <param name="parameter">Parameter to set</param>
/// <param name="value">Value to set</param>
/// <returns>Status code</returns>
public Task<string> ConfigSetAsync(string parameter, string value)
{
return WriteAsync(RedisCommands.ConfigSet(parameter, value));
}
/// <summary>
/// Return the number of keys in the selected database
/// </summary>
/// <returns>Number of keys</returns>
public Task<long> DbSizeAsync()
{
return WriteAsync(RedisCommands.DbSize());
}
/// <summary>
/// Make the server crash :(
/// </summary>
/// <returns>Status code</returns>
public Task<string> DebugSegFaultAsync()
{
return WriteAsync(RedisCommands.DebugSegFault());
}
/// <summary>
/// Remove all keys from all databases
/// </summary>
/// <returns>Status code</returns>
public Task<string> FlushAllAsync()
{
return WriteAsync(RedisCommands.FlushAll());
}
/// <summary>
/// Remove all keys from the current database
/// </summary>
/// <returns>Status code</returns>
public Task<string> FlushDbAsync()
{
return WriteAsync(RedisCommands.FlushDb());
}
/// <summary>
/// Get information and statistics about the server
/// </summary>
/// <param name="section">all|default|server|clients|memory|persistence|stats|replication|cpu|commandstats|cluster|keyspace</param>
/// <returns>Formatted string</returns>
public Task<string> InfoAsync(string section = null)
{
return WriteAsync(RedisCommands.Info());
}
/// <summary>
/// Get the timestamp of the last successful save to disk
/// </summary>
/// <returns>Date of last save</returns>
public Task<DateTime> LastSaveAsync()
{
return WriteAsync(RedisCommands.LastSave());
}
/// <summary>
/// Provide information on the role of a Redis instance in the context of replication
/// </summary>
/// <returns>Role information</returns>
public Task<RedisRole> RoleAsync()
{
return WriteAsync(RedisCommands.Role());
}
/// <summary>
/// Syncronously save the dataset to disk
/// </summary>
/// <returns>Status code</returns>
public Task<string> SaveAsync()
{
return WriteAsync(RedisCommands.Save());
}
/// <summary>
/// Syncronously save the dataset to disk an then shut down the server
/// </summary>
/// <param name="save">Force a DB saving operation even if no save points are configured</param>
/// <returns>Status code</returns>
public Task<string> ShutdownAsync(bool? save = null)
{
return WriteAsync(RedisCommands.Shutdown());
}
/// <summary>
/// Make the server a slave of another instance or promote it as master
/// </summary>
/// <param name="host">Master host</param>
/// <param name="port">master port</param>
/// <returns>Status code</returns>
public Task<string> SlaveOfAsync(string host, int port)
{
return WriteAsync(RedisCommands.SlaveOf(host, port));
}
/// <summary>
/// Turn off replication, turning the Redis server into a master
/// </summary>
/// <returns>Status code</returns>
public Task<string> SlaveOfNoOneAsync()
{
return WriteAsync(RedisCommands.SlaveOfNoOne());
}
/// <summary>
/// Get latest entries from the slow log
/// </summary>
/// <param name="count">Limit entries returned</param>
/// <returns>Slow log entries</returns>
public Task<RedisSlowLogEntry[]> SlowLogGetAsync(long? count = null)
{
return WriteAsync(RedisCommands.SlowLogGet(count));
}
/// <summary>
/// Get the length of the slow log
/// </summary>
/// <returns>Slow log length</returns>
public Task<long> SlowLogLenAsync()
{
return WriteAsync(RedisCommands.SlowLogLen());
}
/// <summary>
/// Reset the slow log
/// </summary>
/// <returns>Status code</returns>
public Task<string> SlowLogResetAsync()
{
return WriteAsync(RedisCommands.SlowLogReset());
}
/// <summary>
/// Internal command used for replication
/// </summary>
/// <returns>Byte array of Redis sync data</returns>
public Task<byte[]> SyncAsync()
{
return WriteAsync(RedisCommands.Sync());
}
/// <summary>
/// Return the current server time
/// </summary>
/// <returns>Server time</returns>
public Task<DateTime> TimeAsync()
{
return WriteAsync(RedisCommands.Time());
}
#endregion
#region Transactions
/// <summary>
/// Mark the start of a transaction block
/// </summary>
/// <returns>Status code</returns>
public Task<string> MultiAsync()
{
return _transaction.StartAsync();
}
/// <summary>
/// Discard all commands issued after MULTI
/// </summary>
/// <returns>Status code</returns>
public Task<string> DiscardAsync()
{
return _transaction.AbortAsync();
}
/// <summary>
/// Execute all commands issued after MULTI
/// </summary>
/// <returns>Array of output from all transaction commands</returns>
public Task<object[]> ExecAsync()
{
return _transaction.ExecuteAsync();
}
/// <summary>
/// Forget about all watched keys
/// </summary>
/// <returns>Status code</returns>
public Task<string> UnwatchAsync()
{
return WriteAsync(RedisCommands.Unwatch());
}
/// <summary>
/// Watch the given keys to determine execution of the MULTI/EXEC block
/// </summary>
/// <param name="keys">Keys to watch</param>
/// <returns>Status code</returns>
public Task<string> WatchAsync(params string[] keys)
{
return WriteAsync(RedisCommands.Watch(keys));
}
#endregion
#region HyperLogLog
/// <summary>
/// Adds the specified elements to the specified HyperLogLog.
/// </summary>
/// <param name="key">Key to update</param>
/// <param name="elements">Elements to add</param>
/// <returns>1 if at least 1 HyperLogLog internal register was altered. 0 otherwise.</returns>
public Task<bool> PfAddAsync(string key, params object[] elements)
{
return WriteAsync(RedisCommands.PfAdd(key, elements));
}
/// <summary>
/// Return the approximated cardinality of the set(s) observed by the HyperLogLog at key(s)
/// </summary>
/// <param name="keys">One or more HyperLogLog keys to examine</param>
/// <returns>Approximated number of unique elements observed via PFADD</returns>
public Task<long> PfCountAsync(params string[] keys)
{
return WriteAsync(RedisCommands.PfCount(keys));
}
/// <summary>
/// Merge N different HyperLogLogs into a single key.
/// </summary>
/// <param name="destKey">Where to store the merged HyperLogLogs</param>
/// <param name="sourceKeys">The HyperLogLogs keys that will be combined</param>
/// <returns>Status code</returns>
public Task<string> PfMergeAsync(string destKey, params string[] sourceKeys)
{
return WriteAsync(RedisCommands.PfMerge(destKey, sourceKeys));
}
#endregion
#region Geo redis-server 3.2
public Task<long> GeoAddAsync(string key, params (decimal longitude, decimal latitude, object member)[] values)
{
if (values == null || values.Length == 0) throw new Exception("values 参数不能为空");
var args = new List<object>();
args.Add(key);
foreach (var v in values) args.AddRange(new object[] { v.longitude, v.latitude, v.member });
return WriteAsync(new RedisInt("GEOADD", args.ToArray()));
}
public Task<decimal?> GeoDistAsync(string key, object member1, object member2, GeoUnit unit = GeoUnit.m)
{
if (unit == GeoUnit.m) return WriteAsync(new RedisFloat.Nullable("GEODIST", key, member1, member2));
return WriteAsync(new RedisFloat.Nullable("GEODIST", key, member1, member2, unit));
}
public Task<string[]> GeoHashAsync(string key, object[] members)
{
if (members == null || members.Length == 0) throw new Exception("values 参数不能为空");
var args = new List<object>();
args.Add(key);
args.AddRange(members);
return WriteAsync(new RedisArray.Strings("GEOHASH", args.ToArray()));
}
async public Task<(decimal longitude, decimal latitude)?[]> GeoPosAsync(string key, object[] members)
{
if (members == null || members.Length == 0) throw new Exception("values 参数不能为空");
var args = new List<object>();
args.Add(key);
args.AddRange(members);
var ret = await WriteAsync(new RedisArray.Generic<decimal[]>(new RedisArray.Generic<decimal>(new RedisFloat("GEOPOS", args.ToArray()))));
return ret.Select(a => a != null && a.Length == 2 ? new (decimal, decimal)?((a[0], a[1])) : null).ToArray();
}
async public Task<(string member, decimal dist, decimal longitude, decimal latitude, long hash)[]> GeoRadiusAsync(string key, decimal longitude, decimal latitude, decimal radius, GeoUnit unit = GeoUnit.m, long? count = null, GeoOrderBy? sorting = null, bool withCoord = false, bool withDist = false, bool withHash = false)
{
var args = new List<object>(new object[] { key, longitude, latitude, radius, unit });
if (withCoord) args.Add("WITHCOORD");
if (withDist) args.Add("WITHDIST");
if (withHash) args.Add("WITHHASH");
if (count.HasValue) args.Add(count);
if (sorting.HasValue) args.Add(sorting);
var cmd = new RedisTuple.Generic<string, decimal, long, decimal[]>.Single(
new RedisString(null),
withDist == false ? null : new RedisFloat(null),
withHash == false ? null : new RedisInt(null),
withCoord == false ? null : new RedisArray.Generic<decimal>(new RedisFloat(null)), "GEORADIUS", args.ToArray());
var ret = await WriteAsync(new RedisArray.Generic<Tuple<string, decimal, long, decimal[]>>(cmd));
return ret.Select(a => (a.Item1, a.Item2, a.Item4 == null ? default(decimal) : a.Item4[0], a.Item4 == null ? default(decimal) : a.Item4[1], a.Item3)).ToArray();
}
async public Task<(byte[] member, decimal dist, decimal longitude, decimal latitude, long hash)[]> GeoRadiusBytesAsync(string key, decimal longitude, decimal latitude, decimal radius, GeoUnit unit = GeoUnit.m, long? count = null, GeoOrderBy? sorting = null, bool withCoord = false, bool withDist = false, bool withHash = false)
{
var args = new List<object>(new object[] { key, longitude, latitude, radius, unit });
if (withCoord) args.Add("WITHCOORD");
if (withDist) args.Add("WITHDIST");
if (withHash) args.Add("WITHHASH");
if (count.HasValue) args.Add(count);
if (sorting.HasValue) args.Add(sorting);
var cmd = new RedisTuple.Generic<byte[], decimal, long, decimal[]>.Single(
new RedisBytes(null),
withDist == false ? null : new RedisFloat(null),
withHash == false ? null : new RedisInt(null),
withCoord == false ? null : new RedisArray.Generic<decimal>(new RedisFloat(null)), "GEORADIUS", args.ToArray());
var ret = await WriteAsync(new RedisArray.Generic<Tuple<byte[], decimal, long, decimal[]>>(cmd));
return ret.Select(a => (a.Item1, a.Item2, a.Item4 == null ? default(decimal) : a.Item4[0], a.Item4 == null ? default(decimal) : a.Item4[1], a.Item3)).ToArray();
}
async public Task<(string member, decimal dist, decimal longitude, decimal latitude, long hash)[]> GeoRadiusByMemberAsync(string key, object member, decimal radius, GeoUnit unit = GeoUnit.m, long? count = null, GeoOrderBy? sorting = null, bool withCoord = false, bool withDist = false, bool withHash = false)
{
var args = new List<object>(new object[] { key, member, radius, unit });
if (withCoord) args.Add("WITHCOORD");
if (withDist) args.Add("WITHDIST");
if (withHash) args.Add("WITHHASH");
if (count.HasValue) args.Add(count);
if (sorting.HasValue) args.Add(sorting);
var cmd = new RedisTuple.Generic<string, decimal, long, decimal[]>.Single(
new RedisString(null),
withDist == false ? null : new RedisFloat(null),
withHash == false ? null : new RedisInt(null),
withCoord == false ? null : new RedisArray.Generic<decimal>(new RedisFloat(null)), "GEORADIUSBYMEMBER", args.ToArray());
var ret = await WriteAsync(new RedisArray.Generic<Tuple<string, decimal, long, decimal[]>>(cmd));
return ret.Select(a => (a.Item1, a.Item2, a.Item4 == null ? default(decimal) : a.Item4[0], a.Item4 == null ? default(decimal) : a.Item4[1], a.Item3)).ToArray();
}
async public Task<(byte[] member, decimal dist, decimal longitude, decimal latitude, long hash)[]> GeoRadiusBytesByMemberAsync(string key, object member, decimal radius, GeoUnit unit = GeoUnit.m, long? count = null, GeoOrderBy? sorting = null, bool withCoord = false, bool withDist = false, bool withHash = false)
{
var args = new List<object>(new object[] { key, member, radius, unit });
if (withCoord) args.Add("WITHCOORD");
if (withDist) args.Add("WITHDIST");
if (withHash) args.Add("WITHHASH");
if (count.HasValue) args.Add(count);
if (sorting.HasValue) args.Add(sorting);
var cmd = new RedisTuple.Generic<byte[], decimal, long, decimal[]>.Single(
new RedisBytes(null),
withDist == false ? null : new RedisFloat(null),
withHash == false ? null : new RedisInt(null),
withCoord == false ? null : new RedisArray.Generic<decimal>(new RedisFloat(null)), "GEORADIUSBYMEMBER", args.ToArray());
var ret = await WriteAsync(new RedisArray.Generic<Tuple<byte[], decimal, long, decimal[]>>(cmd));
return ret.Select(a => (a.Item1, a.Item2, a.Item4 == null ? default(decimal) : a.Item4[0], a.Item4 == null ? default(decimal) : a.Item4[1], a.Item3)).ToArray();
}
#endregion
}
}
#endif |
2881099/csredis | 6,003 | src/CSRedisCore/RedisSentinelClient.Async.cs | using CSRedis.Internal.Commands;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
#if net40
#else
namespace CSRedis
{
public partial class RedisSentinelClient
{
/// <summary>
/// Connect to the remote host
/// </summary>
/// <returns>True if connected</returns>
public Task<bool> ConnectAsync()
{
return _connector.ConnectAsync();
}
/// <summary>
/// Call arbitrary Sentinel command (e.g. for a command not yet implemented in this library)
/// </summary>
/// <param name="command">The name of the command</param>
/// <param name="args">Array of arguments to the command</param>
/// <returns>Redis unified response</returns>
public Task<object> CallAsync(string command, params string[] args)
{
return WriteAsync(new RedisObject(command, args));
}
Task<T> WriteAsync<T>(RedisCommand<T> command)
{
return _connector.CallAsync(command);
}
#region sentinel
/// <summary>
/// Ping the Sentinel server
/// </summary>
/// <returns>Status code</returns>
public Task<string> PingAsync()
{
return WriteAsync(RedisCommands.Ping());
}
/// <summary>
/// Get a list of monitored Redis masters
/// </summary>
/// <returns>Redis master info</returns>
public Task<RedisMasterInfo[]> MastersAsync()
{
return WriteAsync(RedisCommands.Sentinel.Masters());
}
/// <summary>
/// Get information on the specified Redis master
/// </summary>
/// <param name="masterName">Name of the Redis master</param>
/// <returns>Master information</returns>
public Task<RedisMasterInfo> MasterAsync(string masterName)
{
return WriteAsync(RedisCommands.Sentinel.Master(masterName));
}
/// <summary>
/// Get a list of other Sentinels known to the current Sentinel
/// </summary>
/// <param name="masterName">Name of monitored master</param>
/// <returns>Sentinel hosts and ports</returns>
public Task<RedisSentinelInfo[]> SentinelsAsync(string masterName)
{
return WriteAsync(RedisCommands.Sentinel.Sentinels(masterName));
}
/// <summary>
/// Get a list of monitored Redis slaves to the given master
/// </summary>
/// <param name="masterName">Name of monitored master</param>
/// <returns>Redis slave info</returns>
public Task<RedisSlaveInfo[]> SlavesAsync(string masterName)
{
return WriteAsync(RedisCommands.Sentinel.Slaves(masterName));
}
/// <summary>
/// Get the IP and port of the current master Redis server
/// </summary>
/// <param name="masterName">Name of monitored master</param>
/// <returns>IP and port of master Redis server</returns>
public Task<Tuple<string, int>> GetMasterAddrByNameAsync(string masterName)
{
return WriteAsync(RedisCommands.Sentinel.GetMasterAddrByName(masterName));
}
/// <summary>
/// Get master state information
/// </summary>
/// <param name="ip">Host IP</param>
/// <param name="port">Host port</param>
/// <param name="currentEpoch">Current epoch</param>
/// <param name="runId">Run ID</param>
/// <returns>Master state</returns>
public Task<RedisMasterState> IsMasterDownByAddrAsync(string ip, int port, long currentEpoch, string runId)
{
return WriteAsync(RedisCommands.Sentinel.IsMasterDownByAddr(ip, port, currentEpoch, runId));
}
/// <summary>
/// Clear state in all masters with matching name
/// </summary>
/// <param name="pattern">Master name pattern</param>
/// <returns>Number of masters that were reset</returns>
public Task<long> ResetAsync(string pattern)
{
return WriteAsync(RedisCommands.Sentinel.Reset(pattern));
}
/// <summary>
/// Force a failover as if the master was not reachable, and without asking for agreement from other sentinels
/// </summary>
/// <param name="masterName">Master name</param>
/// <returns>Status code</returns>
public Task<string> FailoverAsync(string masterName)
{
return WriteAsync(RedisCommands.Sentinel.Failover(masterName));
}
/// <summary>
/// Start monitoring a new master
/// </summary>
/// <param name="name">Master name</param>
/// <param name="port">Master port</param>
/// <param name="quorum">Quorum count</param>
/// <returns>Status code</returns>
public Task<string> MonitorAsync(string name, int port, int quorum)
{
return WriteAsync(RedisCommands.Sentinel.Monitor(name, port, quorum));
}
/// <summary>
/// Remove the specified master
/// </summary>
/// <param name="name">Master name</param>
/// <returns>Status code</returns>
public Task<string> RemoveAsync(string name)
{
return WriteAsync(RedisCommands.Sentinel.Remove(name));
}
/// <summary>
/// Change configuration parameters of a specific master
/// </summary>
/// <param name="masterName">Master name</param>
/// <param name="option">Config option name</param>
/// <param name="value">Config option value</param>
/// <returns>Status code</returns>
public Task<string> SetAsync(string masterName, string option, string value)
{
return WriteAsync(RedisCommands.Sentinel.Set(masterName, option, value));
}
#endregion
}
}
#endif |
2881099/csredis | 15,548 | src/CSRedisCore/RedisClientPool.cs | using CSRedis.Internal.ObjectPool;
using System;
using System.Collections.Generic;
using System.Collections.Concurrent;
using System.Net;
using System.Text;
using System.Text.RegularExpressions;
using System.Threading.Tasks;
using System.Threading;
using System.Diagnostics;
using System.Linq;
using System.Net.Sockets;
namespace CSRedis
{
public class RedisClientPool : ObjectPool<RedisClient>
{
public RedisClientPool(string connectionString, Action<RedisClient> onConnected) : base(null)
{
_policy = new RedisClientPoolPolicy
{
_pool = this
};
_policy.Connected += (s, o) =>
{
RedisClient rc = s as RedisClient;
try
{
rc.ReceiveTimeout = _policy._syncTimeout;
rc.SendTimeout = _policy._syncTimeout;
}
catch { }
if (!string.IsNullOrEmpty(_policy._password))
{
try
{
rc.Auth(_policy._user, _policy._password);
}
catch (Exception authEx)
{
if (authEx.Message != "ERR Client sent AUTH, but no password is set")
throw authEx;
}
}
if (_policy._database > 0) rc.Select(_policy._database);
onConnected(s as RedisClient);
};
this.Policy = _policy;
_policy.ConnectionString = connectionString;
}
public void Return(Object<RedisClient> obj, Exception exception, bool isRecreate = false)
{
if (exception != null)
{
try
{
try
{
if (!obj.Value.IsConnected) obj.Value.Connect(_policy._connectTimeout);
obj.Value.Ping();
var fcolor = Console.ForegroundColor;
Console.WriteLine($"");
Console.ForegroundColor = ConsoleColor.DarkYellow;
Console.WriteLine($"csreids 错误【{Policy.Name}】:{exception.Message} {exception.StackTrace}");
Console.ForegroundColor = fcolor;
Console.WriteLine($"");
}
catch
{
obj.ResetValue();
if (!obj.Value.IsConnected) obj.Value.Connect(_policy._connectTimeout);
obj.Value.Ping();
}
}
catch (Exception ex)
{
base.SetUnavailable(ex, obj.LastGetTimeCopy);
}
}
base.Return(obj, isRecreate);
}
internal bool CheckAvailable() => base.LiveCheckAvailable();
internal RedisClientPoolPolicy _policy;
public string Key => _policy.Key;
public string Prefix => _policy.Prefix;
public Encoding Encoding { get; set; } = new UTF8Encoding(false);
}
public class RedisClientPoolPolicy : IPolicy<RedisClient>
{
internal RedisClientPool _pool;
internal int _port = 6379, _database = 0, _tryit = 0, _connectTimeout = 5000, _syncTimeout = 10000;
internal string _ip = "127.0.0.1", _user = "", _password = "", _clientname = "";
internal bool _ssl = false, _testCluster = true, _asyncPipeline = false;
internal int _preheat = 5;
internal string Key => $"{_ip}:{_port}/{_database}";
internal string Prefix { get; set; }
public event EventHandler Connected;
public string Name { get => Key; set { throw new Exception("RedisClientPoolPolicy 不提供设置 Name 属性值。"); } }
public int PoolSize { get; set; } = 50;
public TimeSpan SyncGetTimeout { get; set; } = TimeSpan.FromSeconds(10);
public TimeSpan IdleTimeout { get; set; } = TimeSpan.FromSeconds(20);
public int AsyncGetCapacity { get; set; } = 100000;
public bool IsThrowGetTimeoutException { get; set; } = true;
public bool IsAutoDisposeWithSystem { get; set; } = true;
public int CheckAvailableInterval { get; set; } = 2;
internal string BuildConnectionString(string endpoint)
{
return $"{endpoint}{(string.IsNullOrEmpty(_user) ? "" : $"user={_user}")},password={_password},defaultDatabase={_database},poolsize={PoolSize}," +
$"connectTimeout={_connectTimeout},syncTimeout={_syncTimeout},idletimeout={(int)IdleTimeout.TotalMilliseconds}," +
$"preheat=false,ssl={(_ssl ? "true" : "false")},tryit={_tryit},name={_clientname},prefix={Prefix}," +
$"autodispose={(IsAutoDisposeWithSystem ? "true" : "false")},asyncpipeline={(_asyncPipeline ? "true" : "false")}";
}
internal void SetHost(string host)
{
if (string.IsNullOrEmpty(host?.Trim())) {
_ip = "127.0.0.1";
_port = 6379;
return;
}
host = host.Trim();
var ipv6 = Regex.Match(host, @"^\[([^\]]+)\]\s*(:\s*(\d+))?$");
if (ipv6.Success) //ipv6+port 格式: [fe80::b164:55b3:4b4f:7ce6%15]:6379
{
_ip = ipv6.Groups[1].Value.Trim();
_port = int.TryParse(ipv6.Groups[3].Value, out var tryint) && tryint > 0 ? tryint : 6379;
return;
}
var spt = (host ?? "").Split(':');
if (spt.Length == 1) //ipv4 or domain
{
_ip = string.IsNullOrEmpty(spt[0].Trim()) == false ? spt[0].Trim() : "127.0.0.1";
_port = 6379;
return;
}
if (spt.Length == 2) //ipv4:port or domain:port
{
if (int.TryParse(spt.Last().Trim(), out var testPort2))
{
_ip = string.IsNullOrEmpty(spt[0].Trim()) == false ? spt[0].Trim() : "127.0.0.1";
_port = testPort2;
return;
}
_ip = host;
_port = 6379;
return;
}
if (IPAddress.TryParse(host, out var tryip) && tryip.AddressFamily == AddressFamily.InterNetworkV6) //test ipv6
{
_ip = host;
_port = 6379;
return;
}
if (int.TryParse(spt.Last().Trim(), out var testPort)) //test ipv6:port
{
var testHost = string.Join(":", spt.Where((a, b) => b < spt.Length - 1));
if (IPAddress.TryParse(testHost, out tryip) && tryip.AddressFamily == AddressFamily.InterNetworkV6)
{
_ip = testHost;
_port = 6379;
return;
}
}
_ip = host;
_port = 6379;
}
private string _connectionString;
public string ConnectionString
{
get => _connectionString;
set
{
_connectionString = value;
if (string.IsNullOrEmpty(_connectionString)) return;
//支持密码中带有逗号,将原有 split(',') 改成以下处理方式
var vs = Regex.Split(_connectionString, @"\,([\w \t\r\n]+)=", RegexOptions.Multiline);
this.SetHost(vs[0]);
for (var a = 1; a < vs.Length; a += 2)
{
var kv = new[] { vs[a].ToLower().Trim(), vs[a + 1] };
switch (kv[0])
{
case "user":
_user = kv.Length > 1 ? kv[1] : "";
break;
case "password":
_password = kv.Length > 1 ? kv[1] : "";
break;
case "prefix":
Prefix = kv.Length > 1 ? kv[1] : "";
break;
case "defaultdatabase":
_database = int.TryParse(kv.Length > 1 ? kv[1].Trim() : "0", out _database) ? _database : 0;
break;
case "poolsize":
PoolSize = int.TryParse(kv.Length > 1 ? kv[1].Trim() : "0", out var poolsize) == false || poolsize <= 0 ? 50 : poolsize;
break;
case "ssl":
_ssl = kv.Length > 1 ? kv[1].ToLower().Trim() == "true" : false;
break;
case "preheat":
var kvtrim = kv.Length > 1 ? kv[1].ToLower().Trim() : null;
_preheat = kvtrim == "true" ? -1 : (int.TryParse(kvtrim, out _preheat) ? _preheat : 0);
break;
case "name":
_clientname = kv.Length > 1 ? kv[1] : "";
break;
case "tryit":
_tryit = int.TryParse(kv.Length > 1 ? kv[1].Trim() : "0", out _tryit) ? _tryit : 0;
break;
case "connecttimeout":
_connectTimeout = int.TryParse(kv.Length > 1 ? kv[1].Trim() : "5000", out var connectTimeout) == false || connectTimeout <= 0 ? 5000 : connectTimeout;
break;
case "synctimeout":
_syncTimeout = int.TryParse(kv.Length > 1 ? kv[1].Trim() : "10000", out var syncTimeout) == false || syncTimeout <= 0 ? 10000 : syncTimeout;
break;
case "idletimeout":
IdleTimeout = TimeSpan.FromMilliseconds(int.TryParse(kv.Length > 1 ? kv[1].Trim() : "0", out var idleTimeout) == false || idleTimeout <= 0 ? 0 : idleTimeout);
break;
case "testcluster":
_testCluster = kv.Length > 1 ? kv[1].ToLower().Trim() == "true" : true;
break;
case "autodispose":
IsAutoDisposeWithSystem = kv.Length > 1 ? kv[1].ToLower().Trim() == "true" : true;
break;
case "asyncpipeline":
_asyncPipeline = kv.Length > 1 ? kv[1].ToLower().Trim() == "true" : true;
break;
}
}
if (_preheat < 0) _preheat = PoolSize;
if (_preheat > 0)
PrevReheatConnectionPool(_pool, _preheat);
}
}
public bool OnCheckAvailable(Object<RedisClient> obj)
{
obj.ResetValue();
if (!obj.Value.IsConnected) obj.Value.Connect(_connectTimeout);
return obj.Value.Ping() == "PONG";
}
public RedisClient OnCreate()
{
RedisClient client = null;
if (IPAddress.TryParse(_ip, out var tryip))
{
client = new RedisClient(new IPEndPoint(tryip, _port), _ssl);
}
else
{
var ips = Dns.GetHostAddresses(_ip);
if (ips.Length == 0) throw new Exception($"无法解析“{_ip}”");
client = new RedisClient(_ip, _port, _ssl);
}
client.Connected += (s, o) =>
{
Connected(s, o);
if (!string.IsNullOrEmpty(_clientname)) client.ClientSetName(_clientname);
};
return client;
}
public void OnDestroy(RedisClient obj)
{
if (obj != null)
{
//if (obj.IsConnected) try { obj.Quit(); } catch { } 此行会导致,服务器主动断开后,执行该命令超时停留10-20秒
try { obj.Dispose(); } catch { }
}
}
public void OnGet(Object<RedisClient> obj)
{
if (_pool.Encoding != obj.Value.Encoding) obj.Value.Encoding = _pool.Encoding;
if (_pool.IsAvailable)
{
if (DateTime.Now.Subtract(obj.LastReturnTime).TotalSeconds > 60 || obj.Value.IsConnected == false)
{
try
{
if (!obj.Value.IsConnected) obj.Value.Connect(_connectTimeout);
obj.Value.Ping();
}
catch
{
obj.ResetValue();
}
}
}
}
#if net40
#else
async public Task OnGetAsync(Object<RedisClient> obj)
{
if (_pool.Encoding != obj.Value.Encoding) obj.Value.Encoding = _pool.Encoding;
if (_pool.IsAvailable)
{
if (DateTime.Now.Subtract(obj.LastReturnTime).TotalSeconds > 60 || obj.Value.IsConnected == false)
{
try
{
if (!obj.Value.IsConnected) obj.Value.Connect(_connectTimeout);
await obj.Value.PingAsync();
}
catch
{
obj.ResetValue();
}
}
}
}
#endif
public void OnGetTimeout()
{
}
public void OnReturn(Object<RedisClient> obj)
{
}
public void OnAvailable()
{
}
public void OnUnavailable()
{
}
public static void PrevReheatConnectionPool(ObjectPool<RedisClient> pool, int minPoolSize)
{
if (minPoolSize <= 0) minPoolSize = Math.Min(5, pool.Policy.PoolSize);
if (minPoolSize > pool.Policy.PoolSize) minPoolSize = pool.Policy.PoolSize;
var initTestOk = true;
var initStartTime = DateTime.Now;
var initConns = new ConcurrentBag<Object<RedisClient>>();
try
{
var conn = pool.Get();
initConns.Add(conn);
pool.Policy.OnCheckAvailable(conn);
}
catch (Exception ex)
{
initTestOk = false; //预热一次失败,后面将不进行
pool.SetUnavailable(ex, DateTime.Now);
}
for (var a = 1; initTestOk && a < minPoolSize; a += 10)
{
if (initStartTime.Subtract(DateTime.Now).TotalSeconds > 3) break; //预热耗时超过3秒,退出
var b = Math.Min(minPoolSize - a, 10); //每10个预热
var initTasks = new Task[b];
for (var c = 0; c < b; c++)
{
initTasks[c] = TaskEx.Run(() =>
{
try
{
var conn = pool.Get();
initConns.Add(conn);
pool.Policy.OnCheckAvailable(conn);
}
catch
{
initTestOk = false; //有失败,下一组退出预热
}
});
}
Task.WaitAll(initTasks);
}
while (initConns.TryTake(out var conn)) pool.Return(conn);
}
}
}
|
2877025939/PlanADScrollView | 2,038 | PlanADScrollView/PlanADScrollView/AppDelegate.m | //
// AppDelegate.m
// PlanADScrollView
//
// Created by anan on 2017/10/18.
// Copyright © 2017年 Plan. All rights reserved.
//
#import "AppDelegate.h"
@interface AppDelegate ()
@end
@implementation AppDelegate
- (BOOL)application:(UIApplication *)application didFinishLaunchingWithOptions:(NSDictionary *)launchOptions {
// Override point for customization after application launch.
return YES;
}
- (void)applicationWillResignActive:(UIApplication *)application {
// Sent when the application is about to move from active to inactive state. This can occur for certain types of temporary interruptions (such as an incoming phone call or SMS message) or when the user quits the application and it begins the transition to the background state.
// Use this method to pause ongoing tasks, disable timers, and invalidate graphics rendering callbacks. Games should use this method to pause the game.
}
- (void)applicationDidEnterBackground:(UIApplication *)application {
// Use this method to release shared resources, save user data, invalidate timers, and store enough application state information to restore your application to its current state in case it is terminated later.
// If your application supports background execution, this method is called instead of applicationWillTerminate: when the user quits.
}
- (void)applicationWillEnterForeground:(UIApplication *)application {
// Called as part of the transition from the background to the active state; here you can undo many of the changes made on entering the background.
}
- (void)applicationDidBecomeActive:(UIApplication *)application {
// Restart any tasks that were paused (or not yet started) while the application was inactive. If the application was previously in the background, optionally refresh the user interface.
}
- (void)applicationWillTerminate:(UIApplication *)application {
// Called when the application is about to terminate. Save data if appropriate. See also applicationDidEnterBackground:.
}
@end
|
2881099/csredis | 7,296 | src/CSRedisCore/RedisSentinelClient.Sync.cs | using CSRedis.Internal.Commands;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
namespace CSRedis
{
public partial class RedisSentinelClient
{
/// <summary>
/// Connect to the remote host
/// </summary>
/// <param name="timeout">Connection timeout in milliseconds</param>
/// <returns>True if connected</returns>
public bool Connect(int timeout)
{
return _connector.Connect(timeout);
}
/// <summary>
/// Call arbitrary Sentinel command (e.g. for a command not yet implemented in this library)
/// </summary>
/// <param name="command">The name of the command</param>
/// <param name="args">Array of arguments to the command</param>
/// <returns>Redis unified response</returns>
public object Call(string command, params string[] args)
{
return Write(RedisCommands.Call(command, args));
}
T Write<T>(RedisCommand<T> command)
{
return _connector.Call(command);
}
#region sentinel
/// <summary>
/// Ping the Sentinel server
/// </summary>
/// <returns>Status code</returns>
public string Ping()
{
return Write(RedisCommands.Ping());
}
/// <summary>
/// Get a list of monitored Redis masters
/// </summary>
/// <returns>Redis master info</returns>
public RedisMasterInfo[] Masters()
{
return Write(RedisCommands.Sentinel.Masters());
}
/// <summary>
/// Get information on the specified Redis master
/// </summary>
/// <param name="masterName">Name of the Redis master</param>
/// <returns>Master information</returns>
public RedisMasterInfo Master(string masterName)
{
return Write(RedisCommands.Sentinel.Master(masterName));
}
/// <summary>
/// Get a list of other Sentinels known to the current Sentinel
/// </summary>
/// <param name="masterName">Name of monitored master</param>
/// <returns>Sentinel hosts and ports</returns>
public RedisSentinelInfo[] Sentinels(string masterName)
{
return Write(RedisCommands.Sentinel.Sentinels(masterName));
}
/// <summary>
/// Get a list of monitored Redis slaves to the given master
/// </summary>
/// <param name="masterName">Name of monitored master</param>
/// <returns>Redis slave info</returns>
public RedisSlaveInfo[] Slaves(string masterName)
{
return Write(RedisCommands.Sentinel.Slaves(masterName));
}
/// <summary>
/// Get the IP and port of the current master Redis server
/// </summary>
/// <param name="masterName">Name of monitored master</param>
/// <returns>IP and port of master Redis server</returns>
public Tuple<string, int> GetMasterAddrByName(string masterName)
{
return Write(RedisCommands.Sentinel.GetMasterAddrByName(masterName));
}
/// <summary>
/// Open one or more subscription channels to Redis Sentinel server
/// </summary>
/// <param name="channels">Name of channels to open (refer to http://redis.io/ for channel names)</param>
public void Subscribe(params string[] channels)
{
_subscription.Send(RedisCommands.Subscribe(channels));
}
/// <summary>
/// Close one or more subscription channels to Redis Sentinel server
/// </summary>
/// <param name="channels">Name of channels to close</param>
public void Unsubscribe(params string[] channels)
{
_subscription.Send(RedisCommands.Unsubscribe(channels));
}
/// <summary>
/// Open one or more subscription channels to Redis Sentinel server
/// </summary>
/// <param name="channelPatterns">Pattern of channels to open (refer to http://redis.io/ for channel names)</param>
public void PSubscribe(params string[] channelPatterns)
{
_subscription.Send(RedisCommands.PSubscribe(channelPatterns));
}
/// <summary>
/// Close one or more subscription channels to Redis Sentinel server
/// </summary>
/// <param name="channelPatterns">Pattern of channels to close</param>
public void PUnsubscribe(params string[] channelPatterns)
{
_subscription.Send(RedisCommands.PUnsubscribe(channelPatterns));
}
/// <summary>
/// Get master state information
/// </summary>
/// <param name="ip">Host IP</param>
/// <param name="port">Host port</param>
/// <param name="currentEpoch">Current epoch</param>
/// <param name="runId">Run ID</param>
/// <returns>Master state</returns>
public RedisMasterState IsMasterDownByAddr(string ip, int port, long currentEpoch, string runId)
{
return Write(RedisCommands.Sentinel.IsMasterDownByAddr(ip, port, currentEpoch, runId));
}
/// <summary>
/// Clear state in all masters with matching name
/// </summary>
/// <param name="pattern">Master name pattern</param>
/// <returns>Number of masters that were reset</returns>
public long Reset(string pattern)
{
return Write(RedisCommands.Sentinel.Reset(pattern));
}
/// <summary>
/// Force a failover as if the master was not reachable, and without asking for agreement from other sentinels
/// </summary>
/// <param name="masterName">Master name</param>
/// <returns>Status code</returns>
public string Failover(string masterName)
{
return Write(RedisCommands.Sentinel.Failover(masterName));
}
/// <summary>
/// Start monitoring a new master
/// </summary>
/// <param name="name">Master name</param>
/// <param name="port">Master port</param>
/// <param name="quorum">Quorum count</param>
/// <returns>Status code</returns>
public string Monitor(string name, int port, int quorum)
{
return Write(RedisCommands.Sentinel.Monitor(name, port, quorum));
}
/// <summary>
/// Remove the specified master
/// </summary>
/// <param name="name">Master name</param>
/// <returns>Status code</returns>
public string Remove(string name)
{
return Write(RedisCommands.Sentinel.Remove(name));
}
/// <summary>
/// Change configuration parameters of a specific master
/// </summary>
/// <param name="masterName">Master name</param>
/// <param name="option">Config option name</param>
/// <param name="value">Config option value</param>
/// <returns>Status code</returns>
public string Set(string masterName, string option, string value)
{
return Write(RedisCommands.Sentinel.Set(masterName, option, value));
}
#endregion
}
}
|
2881099/csredis | 1,781 | src/CSRedisCore/Net40.cs | using System;
using System.Collections.Generic;
using System.Text;
using System.Threading;
using System.Threading.Tasks;
namespace CSRedis
{
class TaskEx
{
public static Task<T> FromResult<T>(T value)
{
#if net40
return new Task<T>(() => value);
#else
return Task.FromResult(value);
#endif
}
public static Task Run(Action action)
{
#if net40
var tcs = new TaskCompletionSource<object>();
new Thread(() =>
{
try
{
action();
tcs.SetResult(null);
}
catch (Exception ex)
{
tcs.SetException(ex);
}
})
{ IsBackground = true }.Start();
return tcs.Task;
#else
return Task.Run(action);
#endif
}
public static Task<TResult> Run<TResult>(Func<TResult> function)
{
var tcs = new TaskCompletionSource<TResult>();
new Thread(() =>
{
try
{
tcs.SetResult(function());
}
catch (Exception ex)
{
tcs.SetException(ex);
}
})
{ IsBackground = true }.Start();
return tcs.Task;
}
public static Task Delay(TimeSpan timeout)
{
var tcs = new TaskCompletionSource<object>();
var timer = new System.Timers.Timer(timeout.TotalMilliseconds) { AutoReset = false };
timer.Elapsed += delegate { timer.Dispose(); tcs.SetResult(null); };
timer.Start();
return tcs.Task;
}
}
} |
2881099/csredis | 78,812 | src/CSRedisCore/IRedisClientSync.cs | using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
namespace CSRedis
{
/// <summary>
/// Interface for syncronous RedisClient methods
/// </summary>
public interface IRedisClientSync : IRedisClient
{
/// <summary>
/// Connect to the remote host
/// </summary>
/// <param name="timeout">Connection timeout in milliseconds</param>
/// <returns>True if connected</returns>
bool Connect(int timeout);
/// <summary>
/// Call arbitrary Redis command
/// </summary>
/// <param name="command">Command name</param>
/// <param name="args">Command arguments</param>
/// <returns>Redis object</returns>
object Call(string command, params string[] args);
#region Connection
/// <summary>
/// Authenticate to the server
/// </summary>
/// <param name="password">Redis server password</param>
/// <returns>Status message</returns>
string Auth(string password);
/// <summary>
/// Echo the given string
/// </summary>
/// <param name="message">Message to echo</param>
/// <returns>Message</returns>
string Echo(string message);
/// <summary>
/// Ping the server
/// </summary>
/// <returns>Status message</returns>
string Ping();
/// <summary>
/// Close the connection
/// </summary>
/// <returns>Status message</returns>
string Quit();
/// <summary>
/// Change the selected database for the current connection
/// </summary>
/// <param name="index">Zero-based database index</param>
/// <returns>Status message</returns>
string Select(int index);
#endregion
#region Keys
/// <summary>
/// Delete a key
/// </summary>
/// <param name="keys">Keys to delete</param>
/// <returns>Number of keys removed</returns>
long Del(params string[] keys);
/// <summary>
/// Return a serialized version of the value stored at the specified key
/// </summary>
/// <param name="key">Key to dump</param>
/// <returns>Serialized value</returns>
byte[] Dump(string key);
/// <summary>
/// Determine if a key exists
/// </summary>
/// <param name="key">Key to check</param>
/// <returns>True if key exists</returns>
bool Exists(string key);
/// <summary>
/// Set a key's time to live in seconds
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="expiration">Expiration (nearest second);</param>
/// <returns>True if timeout was set; false if key does not exist or timeout could not be set</returns>
bool Expire(string key, TimeSpan expiration);
/// <summary>
/// Set a key's time to live in seconds
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="seconds">Expiration in seconds</param>
/// <returns>True if timeout was set; false if key does not exist or timeout could not be set</returns>
bool Expire(string key, int seconds);
/// <summary>
/// Set the expiration for a key (nearest second);
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="expirationDate">Date of expiration, to nearest second</param>
/// <returns>True if timeout was set; false if key does not exist or timeout could not be set</returns>
bool ExpireAt(string key, DateTime expirationDate);
/// <summary>
/// Set the expiration for a key as a UNIX timestamp
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="timestamp">UNIX timestamp</param>
/// <returns>True if timeout was set; false if key does not exist or timeout could not be set</returns>
bool ExpireAt(string key, int timestamp);
/// <summary>
/// Find all keys matching the given pattern
/// </summary>
/// <param name="pattern">Pattern to match</param>
/// <returns>Array of keys matching pattern</returns>
string[] Keys(string pattern);
/// <summary>
/// Atomically transfer a key from a Redis instance to another one
/// </summary>
/// <param name="host">Remote Redis host</param>
/// <param name="port">Remote Redis port</param>
/// <param name="key">Key to migrate</param>
/// <param name="destinationDb">Remote database ID</param>
/// <param name="timeoutMilliseconds">Timeout in milliseconds</param>
/// <returns>Status message</returns>
string Migrate(string host, int port, string key, int destinationDb, int timeoutMilliseconds);
/// <summary>
/// Atomically transfer a key from a Redis instance to another one
/// </summary>
/// <param name="host">Remote Redis host</param>
/// <param name="port">Remote Redis port</param>
/// <param name="key">Key to migrate</param>
/// <param name="destinationDb">Remote database ID</param>
/// <param name="timeout">Timeout in milliseconds</param>
/// <returns>Status message</returns>
string Migrate(string host, int port, string key, int destinationDb, TimeSpan timeout);
/// <summary>
/// Move a key to another database
/// </summary>
/// <param name="key">Key to move</param>
/// <param name="database">Database destination ID</param>
/// <returns>True if key was moved</returns>
bool Move(string key, int database);
/// <summary>
/// Get the number of references of the value associated with the specified key
/// </summary>
/// <param name="arguments">Subcommand arguments</param>
/// <returns>The type of internal representation used to store the value at the specified key</returns>
string ObjectEncoding(params string[] arguments);
/// <summary>
/// Inspect the internals of Redis objects
/// </summary>
/// <param name="subCommand">Type of Object command to send</param>
/// <param name="arguments">Subcommand arguments</param>
/// <returns>Varies depending on subCommand</returns>
long? Object(RedisObjectSubCommand subCommand, params string[] arguments);
/// <summary>
/// Remove the expiration from a key
/// </summary>
/// <param name="key">Key to modify</param>
/// <returns>True if timeout was removed</returns>
bool Persist(string key);
/// <summary>
/// Set a key's time to live in milliseconds
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="expiration">Expiration (nearest millisecond);</param>
/// <returns>True if timeout was set</returns>
bool PExpire(string key, TimeSpan expiration);
/// <summary>
/// Set a key's time to live in milliseconds
/// </summary>
/// <param name="key">Key</param>
/// <param name="milliseconds">Expiration in milliseconds</param>
/// <returns>True if timeout was set</returns>
bool PExpire(string key, long milliseconds);
/// <summary>
/// Set the expiration for a key (nearest millisecond);
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="date">Expiration date</param>
/// <returns>True if timeout was set</returns>
bool PExpireAt(string key, DateTime date);
/// <summary>
/// Set the expiration for a key as a UNIX timestamp specified in milliseconds
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="timestamp">Expiration timestamp (milliseconds);</param>
/// <returns>True if timeout was set</returns>
bool PExpireAt(string key, long timestamp);
/// <summary>
/// Get the time to live for a key in milliseconds
/// </summary>
/// <param name="key">Key to check</param>
/// <returns>Time-to-live in milliseconds</returns>
long PTtl(string key);
/// <summary>
/// Return a random key from the keyspace
/// </summary>
/// <returns>A random key</returns>
string RandomKey();
/// <summary>
/// Rename a key
/// </summary>
/// <param name="key">Key to rename</param>
/// <param name="newKey">New key name</param>
/// <returns>Status code</returns>
string Rename(string key, string newKey);
/// <summary>
/// Rename a key, only if the new key does not exist
/// </summary>
/// <param name="key">Key to rename</param>
/// <param name="newKey">New key name</param>
/// <returns>True if key was renamed</returns>
bool RenameNx(string key, string newKey);
/// <summary>
/// Create a key using the provided serialized value, previously obtained using dump
/// </summary>
/// <param name="key">Key to restore</param>
/// <param name="ttlMilliseconds">Time-to-live in milliseconds</param>
/// <param name="serializedValue">Serialized value from DUMP</param>
/// <returns>Status code</returns>
string Restore(string key, long ttlMilliseconds, byte[] serializedValue);
/// <summary>
/// Sort the elements in a list, set or sorted set
/// </summary>
/// <param name="key">Key to sort</param>
/// <param name="offset">Number of elements to skip</param>
/// <param name="count">Number of elements to return</param>
/// <param name="by">Sort by external key</param>
/// <param name="dir">Sort direction</param>
/// <param name="isAlpha">Sort lexicographically</param>
/// <param name="get">Retrieve external keys</param>
/// <returns>The sorted list</returns>
string[] Sort(string key, long? offset = null, long? count = null, string by = null, RedisSortDir? dir = null, bool? isAlpha = null, params string[] get);
/// <summary>
/// Sort the elements in a list, set or sorted set, then store the result in a new list
/// </summary>
/// <param name="key">Key to sort</param>
/// <param name="destination">Destination key name of stored sort</param>
/// <param name="offset">Number of elements to skip</param>
/// <param name="count">Number of elements to return</param>
/// <param name="by">Sort by external key</param>
/// <param name="dir">Sort direction</param>
/// <param name="isAlpha">Sort lexicographically</param>
/// <param name="get">Retrieve external keys</param>
/// <returns>Number of elements stored</returns>
long SortAndStore(string key, string destination, long? offset = null, long? count = null, string by = null, RedisSortDir? dir = null, bool? isAlpha = false, params string[] get);
/// <summary>
/// Get the time to live for a key
/// </summary>
/// <param name="key">Key to check</param>
/// <returns>Time-to-live in seconds</returns>
long Ttl(string key);
/// <summary>
/// Determine the type stored at key
/// </summary>
/// <param name="key">Key to check</param>
/// <returns>Type of key</returns>
string Type(string key);
/// <summary>
/// Iterate the set of keys in the currently selected Redis database
/// </summary>
/// <param name="cursor">The cursor returned by the server in the previous call, or 0 if this is the first call</param>
/// <param name="pattern">Glob-style pattern to filter returned elements</param>
/// <param name="count">Set the maximum number of elements to return</param>
/// <returns>Updated cursor and result set</returns>
RedisScan<string> Scan(long cursor, string pattern = null, long? count = null);
RedisScan<byte[]> ScanBytes(long cursor, string pattern = null, long? count = null);
#endregion
#region Hashes
/// <summary>
/// Delete one or more hash fields
/// </summary>
/// <param name="key">Hash key</param>
/// <param name="fields">Fields to delete</param>
/// <returns>Number of fields removed from hash</returns>
long HDel(string key, params string[] fields);
/// <summary>
/// Determine if a hash field exists
/// </summary>
/// <param name="key">Hash key</param>
/// <param name="field">Field to check</param>
/// <returns>True if hash field exists</returns>
bool HExists(string key, string field);
/// <summary>
/// Get the value of a hash field
/// </summary>
/// <param name="key">Hash key</param>
/// <param name="field">Field to get</param>
/// <returns>Value of hash field</returns>
string HGet(string key, string field);
byte[] HGetBytes(string key, string field);
/// <summary>
/// Get all the fields and values in a hash
/// </summary>
/// <typeparam name="T">Object to map hash</typeparam>
/// <param name="key">Hash key</param>
/// <returns>Strongly typed object mapped from hash</returns>
T HGetAll<T>(string key)
where T : class;
/// <summary>
/// Get all the fields and values in a hash
/// </summary>
/// <param name="key">Hash key</param>
/// <returns>Dictionary mapped from string</returns>
Dictionary<string, string> HGetAll(string key);
Dictionary<string, byte[]> HGetAllBytes(string key);
/// <summary>
/// Increment the integer value of a hash field by the given number
/// </summary>
/// <param name="key">Hash key</param>
/// <param name="field">Field to increment</param>
/// <param name="increment">Increment value</param>
/// <returns>Value of field after increment</returns>
long HIncrBy(string key, string field, long increment);
/// <summary>
/// Increment the float value of a hash field by the given number
/// </summary>
/// <param name="key">Hash key</param>
/// <param name="field">Field to increment</param>
/// <param name="increment">Increment value</param>
/// <returns>Value of field after increment</returns>
decimal HIncrByFloat(string key, string field, decimal increment);
/// <summary>
/// Get all the fields in a hash
/// </summary>
/// <param name="key">Hash key</param>
/// <returns>All hash field names</returns>
string[] HKeys(string key);
/// <summary>
/// Get the number of fields in a hash
/// </summary>
/// <param name="key">Hash key</param>
/// <returns>Number of fields in hash</returns>
long HLen(string key);
/// <summary>
/// Get the values of all the given hash fields
/// </summary>
/// <param name="key">Hash key</param>
/// <param name="fields">Fields to return</param>
/// <returns>Values of given fields</returns>
string[] HMGet(string key, params string[] fields);
byte[][] HMGetBytes(string key, params string[] fields);
/// <summary>
/// Set multiple hash fields to multiple values
/// </summary>
/// <param name="key">Hash key</param>
/// <param name="dict">Dictionary mapping of hash</param>
/// <returns>Status code</returns>
string HMSet(string key, Dictionary<string, object> dict);
/// <summary>
/// Set multiple hash fields to multiple values
/// </summary>
/// <typeparam name="T">Type of object to map hash</typeparam>
/// <param name="key">Hash key</param>
/// <param name="obj">Object mapping of hash</param>
/// <returns>Status code</returns>
string HMSet<T>(string key, T obj)
where T : class;
/// <summary>
/// Set multiple hash fields to multiple values
/// </summary>
/// <param name="key">Hash key</param>
/// <param name="keyValues">Array of [key,value,key,value,..]</param>
/// <returns>Status code</returns>
string HMSet(string key, params object[] keyValues);
/// <summary>
/// Set the value of a hash field
/// </summary>
/// <param name="key">Hash key</param>
/// <param name="field">Hash field to set</param>
/// <param name="value">Value to set</param>
/// <returns>True if field is new</returns>
bool HSet(string key, string field, object value);
/// <summary>
/// Set the value of a hash field, only if the field does not exist
/// </summary>
/// <param name="key">Hash key</param>
/// <param name="field">Hash field to set</param>
/// <param name="value">Value to set</param>
/// <returns>True if field was set to value</returns>
bool HSetNx(string key, string field, object value);
/// <summary>
/// Get all the values in a hash
/// </summary>
/// <param name="key">Hash key</param>
/// <returns>Array of all values in hash</returns>
string[] HVals(string key);
byte[][] HValsBytes(string key);
/// <summary>
/// Iterate the keys and values of a hash field
/// </summary>
/// <param name="key">Hash key</param>
/// <param name="cursor">The cursor returned by the server in the previous call, or 0 if this is the first call</param>
/// <param name="pattern">Glob-style pattern to filter returned elements</param>
/// <param name="count">Maximum number of elements to return</param>
/// <returns>Updated cursor and result set</returns>
RedisScan<Tuple<string, string>> HScan(string key, long cursor, string pattern = null, long? count = null);
RedisScan<Tuple<string, byte[]>> HScanBytes(string key, long cursor, string pattern = null, long? count = null);
#endregion
#region Lists
/// <summary>
/// Remove and get the first element and key in a list, or block until one is available
/// </summary>
/// <param name="timeout">Timeout in seconds</param>
/// <param name="keys">List keys</param>
/// <returns>List key and list value</returns>
Tuple<string, string> BLPopWithKey(int timeout, params string[] keys);
Tuple<string, byte[]> BLPopBytesWithKey(int timeout, params string[] keys);
/// <summary>
/// Remove and get the first element and key in a list, or block until one is available
/// </summary>
/// <param name="timeout">Timeout in seconds</param>
/// <param name="keys">List keys</param>
/// <returns>List key and list value</returns>
Tuple<string, string> BLPopWithKey(TimeSpan timeout, params string[] keys);
Tuple<string, byte[]> BLPopBytesWithKey(TimeSpan timeout, params string[] keys);
/// <summary>
/// Remove and get the first element value in a list, or block until one is available
/// </summary>
/// <param name="timeout">Timeout in seconds</param>
/// <param name="keys">List keys</param>
/// <returns>List value</returns>
string BLPop(int timeout, params string[] keys);
byte[] BLPopBytes(int timeout, params string[] keys);
/// <summary>
/// Remove and get the first element value in a list, or block until one is available
/// </summary>
/// <param name="timeout">Timeout in seconds</param>
/// <param name="keys">List keys</param>
/// <returns>List value</returns>
string BLPop(TimeSpan timeout, params string[] keys);
byte[] BLPopBytes(TimeSpan timeout, params string[] keys);
/// <summary>
/// Remove and get the last element and key in a list, or block until one is available
/// </summary>
/// <param name="timeout">Timeout in seconds</param>
/// <param name="keys">List keys</param>
/// <returns>List key and list value</returns>
Tuple<string, string> BRPopWithKey(int timeout, params string[] keys);
Tuple<string, byte[]> BRPopBytesWithKey(int timeout, params string[] keys);
/// <summary>
/// Remove and get the last element and key in a list, or block until one is available
/// </summary>
/// <param name="timeout">Timeout in seconds</param>
/// <param name="keys">List keys</param>
/// <returns>List key and list value</returns>
Tuple<string, string> BRPopWithKey(TimeSpan timeout, params string[] keys);
Tuple<string, byte[]> BRPopBytesWithKey(TimeSpan timeout, params string[] keys);
/// <summary>
/// Remove and get the last element value in a list, or block until one is available
/// </summary>
/// <param name="timeout">Timeout in seconds</param>
/// <param name="keys">List value</param>
/// <returns></returns>
string BRPop(int timeout, params string[] keys);
byte[] BRPopBytes(int timeout, params string[] keys);
/// <summary>
/// Remove and get the last element value in a list, or block until one is available
/// </summary>
/// <param name="timeout">Timeout in seconds</param>
/// <param name="keys">List keys</param>
/// <returns>List value</returns>
string BRPop(TimeSpan timeout, params string[] keys);
byte[] BRPopBytes(TimeSpan timeout, params string[] keys);
/// <summary>
/// Pop a value from a list, push it to another list and return it; or block until one is available
/// </summary>
/// <param name="source">Source list key</param>
/// <param name="destination">Destination key</param>
/// <param name="timeout">Timeout in seconds</param>
/// <returns>Element popped</returns>
string BRPopLPush(string source, string destination, int timeout);
byte[] BRPopBytesLPush(string source, string destination, int timeout);
/// <summary>
/// Pop a value from a list, push it to another list and return it; or block until one is available
/// </summary>
/// <param name="source">Source list key</param>
/// <param name="destination">Destination key</param>
/// <param name="timeout">Timeout in seconds</param>
/// <returns>Element popped</returns>
string BRPopLPush(string source, string destination, TimeSpan timeout);
byte[] BRPopBytesLPush(string source, string destination, TimeSpan timeout);
/// <summary>
/// Get an element from a list by its index
/// </summary>
/// <param name="key">List key</param>
/// <param name="index">Zero-based index of item to return</param>
/// <returns>Element at index</returns>
string LIndex(string key, long index);
byte[] LIndexBytes(string key, long index);
/// <summary>
/// Insert an element before or after another element in a list
/// </summary>
/// <param name="key">List key</param>
/// <param name="insertType">Relative position</param>
/// <param name="pivot">Relative element</param>
/// <param name="value">Element to insert</param>
/// <returns>Length of list after insert or -1 if pivot not found</returns>
long LInsert(string key, RedisInsert insertType, object pivot, object value);
/// <summary>
/// Get the length of a list
/// </summary>
/// <param name="key">List key</param>
/// <returns>Length of list at key</returns>
long LLen(string key);
/// <summary>
/// Remove and get the first element in a list
/// </summary>
/// <param name="key">List key</param>
/// <returns>First element in list</returns>
string LPop(string key);
byte[] LPopBytes(string key);
/// <summary>
/// Prepend one or multiple values to a list
/// </summary>
/// <param name="key">List key</param>
/// <param name="values">Values to push</param>
/// <returns>Length of list after push</returns>
long LPush(string key, params object[] values);
/// <summary>
/// Prepend a value to a list, only if the list exists
/// </summary>
/// <param name="key">List key</param>
/// <param name="value">Value to push</param>
/// <returns>Length of list after push</returns>
long LPushX(string key, object value);
/// <summary>
/// Get a range of elements from a list
/// </summary>
/// <param name="key">List key</param>
/// <param name="start">Start offset</param>
/// <param name="stop">Stop offset</param>
/// <returns>List of elements in range</returns>
string[] LRange(string key, long start, long stop);
byte[][] LRangeBytes(string key, long start, long stop);
/// <summary>
/// Remove elements from a list
/// </summary>
/// <param name="key">List key</param>
/// <param name="count">>0: remove N elements from head to tail; <0: remove N elements from tail to head; =0: remove all elements</param>
/// <param name="value">Remove elements equal to value</param>
/// <returns>Number of removed elements</returns>
long LRem(string key, long count, object value);
/// <summary>
/// Set the value of an element in a list by its index
/// </summary>
/// <param name="key">List key</param>
/// <param name="index">List index to modify</param>
/// <param name="value">New element value</param>
/// <returns>Status code</returns>
string LSet(string key, long index, object value);
/// <summary>
/// Trim a list to the specified range
/// </summary>
/// <param name="key">List key</param>
/// <param name="start">Zero-based start index</param>
/// <param name="stop">Zero-based stop index</param>
/// <returns>Status code</returns>
string LTrim(string key, long start, long stop);
/// <summary>
/// Remove and get the last elment in a list
/// </summary>
/// <param name="key">List key</param>
/// <returns>Value of last list element</returns>
string RPop(string key);
byte[] RPopBytes(string key);
/// <summary>
/// Remove the last elment in a list, append it to another list and return it
/// </summary>
/// <param name="source">List source key</param>
/// <param name="destination">Destination key</param>
/// <returns>Element being popped and pushed</returns>
string RPopLPush(string source, string destination);
byte[] RPopBytesLPush(string source, string destination);
/// <summary>
/// Append one or multiple values to a list
/// </summary>
/// <param name="key">List key</param>
/// <param name="values">Values to push</param>
/// <returns>Length of list after push</returns>
long RPush(string key, params object[] values);
/// <summary>
/// Append a value to a list, only if the list exists
/// </summary>
/// <param name="key">List key</param>
/// <param name="value">Value to push</param>
/// <returns>Length of list after push</returns>
long RPushX(string key, object value);
#endregion
#region Sets
/// <summary>
/// Add one or more members to a set
/// </summary>
/// <param name="key">Set key</param>
/// <param name="members">Members to add to set</param>
/// <returns>Number of elements added to set</returns>
long SAdd(string key, params object[] members);
/// <summary>
/// Get the number of members in a set
/// </summary>
/// <param name="key">Set key</param>
/// <returns>Number of elements in set</returns>
long SCard(string key);
/// <summary>
/// Subtract multiple sets
/// </summary>
/// <param name="keys">Set keys to subtract</param>
/// <returns>Array of elements in resulting set</returns>
string[] SDiff(params string[] keys);
byte[][] SDiffBytes(params string[] keys);
/// <summary>
/// Subtract multiple sets and store the resulting set in a key
/// </summary>
/// <param name="destination">Destination key</param>
/// <param name="keys">Set keys to subtract</param>
/// <returns>Number of elements in the resulting set</returns>
long SDiffStore(string destination, params string[] keys);
/// <summary>
/// Intersect multiple sets
/// </summary>
/// <param name="keys">Set keys to intersect</param>
/// <returns>Array of elements in resulting set</returns>
string[] SInter(params string[] keys);
byte[][] SInterBytes(params string[] keys);
/// <summary>
/// Intersect multiple sets and store the resulting set in a key
/// </summary>
/// <param name="destination">Destination key</param>
/// <param name="keys">Set keys to intersect</param>
/// <returns>Number of elements in resulting set</returns>
long SInterStore(string destination, params string[] keys);
/// <summary>
/// Determine if a given value is a member of a set
/// </summary>
/// <param name="key">Set key</param>
/// <param name="member">Member to lookup</param>
/// <returns>True if member exists in set</returns>
bool SIsMember(string key, object member);
/// <summary>
/// Get all the members in a set
/// </summary>
/// <param name="key">Set key</param>
/// <returns>All elements in the set</returns>
string[] SMembers(string key);
byte[][] SMembersBytes(string key);
/// <summary>
/// Move a member from one set to another
/// </summary>
/// <param name="source">Source key</param>
/// <param name="destination">Destination key</param>
/// <param name="member">Member to move</param>
/// <returns>True if element was moved</returns>
bool SMove(string source, string destination, object member);
/// <summary>
/// Remove and
/// </summary>
/// <param name="key">Set key</param>
/// <returns>The removed element</returns>
string SPop(string key);
byte[] SPopBytes(string key);
/// <summary>
/// Remove and return one or more random member from a set
/// </summary>
/// <param name="key">Set key</param>
/// <param name="count">Number of elements to remove and return</param>
/// <returns>The removed elements</returns>
string[] SPop(string key, long count);
byte[][] SPopBytes(string key, long count);
/// <summary>
/// Get a random member from a set
/// </summary>
/// <param name="key">Set key</param>
/// <returns>One random element from set</returns>
string SRandMember(string key);
byte[] SRandMemberBytes(string key);
/// <summary>
/// Get one or more random members from a set
/// </summary>
/// <param name="key">Set key</param>
/// <param name="count">Number of elements to return</param>
/// <returns>One or more random elements from set</returns>
string[] SRandMembers(string key, long count);
byte[][] SRandMembersBytes(string key, long count);
/// <summary>
/// Remove one or more members from a set
/// </summary>
/// <param name="key">Set key</param>
/// <param name="members">Set members to remove</param>
/// <returns>Number of elements removed from set</returns>
long SRem(string key, params object[] members);
/// <summary>
/// Add multiple sets
/// </summary>
/// <param name="keys">Set keys to union</param>
/// <returns>Array of elements in resulting set</returns>
string[] SUnion(params string[] keys);
byte[][] SUnionBytes(params string[] keys);
/// <summary>
/// Add multiple sets and store the resulting set in a key
/// </summary>
/// <param name="destination">Destination key</param>
/// <param name="keys">Set keys to union</param>
/// <returns>Number of elements in resulting set</returns>
long SUnionStore(string destination, params string[] keys);
/// <summary>
/// Iterate the elements of a set field
/// </summary>
/// <param name="key">Set key</param>
/// <param name="cursor">The cursor returned by the server in the previous call, or 0 if this is the first call</param>
/// <param name="pattern">Glob-style pattern to filter returned elements</param>
/// <param name="count">Maximum number of elements to return</param>
/// <returns>Updated cursor and result set</returns>
RedisScan<string> SScan(string key, long cursor, string pattern = null, long? count = null);
RedisScan<byte[]> SScanBytes(string key, long cursor, string pattern = null, long? count = null);
#endregion
#region Sorted Sets
/// <summary>
/// Add one or more members to a sorted set, or update its score if it already exists
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="scoreMembers">Array of member scores to add to sorted set</param>
/// <returns>Number of elements added to the sorted set (not including member updates);</returns>
long ZAdd<TScore, TMember>(string key, params Tuple<TScore, TMember>[] scoreMembers);
/// <summary>
/// Add one or more members to a sorted set, or update its score if it already exists
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="scoreMembers">Array of member scores [s1, m1, s2, m2, ..]</param>
/// <returns>Number of elements added to the sorted set (not including member updates);</returns>
long ZAdd(string key, params object[] scoreMembers);
/// <summary>
/// Get the number of members in a sorted set
/// </summary>
/// <param name="key">Sorted set key</param>
/// <returns>Number of elements in the sorted set</returns>
long ZCard(string key);
/// <summary>
/// Count the members in a sorted set with scores within the given values
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="min">Minimum score</param>
/// <param name="max">Maximum score</param>
/// <param name="exclusiveMin">Minimum score is exclusive</param>
/// <param name="exclusiveMax">Maximum score is exclusive</param>
/// <returns>Number of elements in the specified score range</returns>
long ZCount(string key, decimal min, decimal max, bool exclusiveMin = false, bool exclusiveMax = false);
/// <summary>
/// Count the members in a sorted set with scores within the given values
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="min">Minimum score</param>
/// <param name="max">Maximum score</param>
/// <returns>Number of elements in the specified score range</returns>
long ZCount(string key, string min, string max);
/// <summary>
/// Increment the score of a member in a sorted set
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="increment">Increment by value</param>
/// <param name="member">Sorted set member to increment</param>
/// <returns>New score of member</returns>
decimal ZIncrBy(string key, decimal increment, object member);
/// <summary>
/// Intersect multiple sorted sets and store the resulting set in a new key
/// </summary>
/// <param name="destination">Destination key</param>
/// <param name="weights">Multiplication factor for each input set</param>
/// <param name="aggregate">Aggregation function of resulting set</param>
/// <param name="keys">Sorted set keys to intersect</param>
/// <returns>Number of elements in the resulting sorted set</returns>
long ZInterStore(string destination, decimal[] weights = null, RedisAggregate? aggregate = null, params string[] keys);
/// <summary>
/// Intersect multiple sorted sets and store the resulting set in a new key
/// </summary>
/// <param name="destination">Destination key</param>
/// <param name="keys">Sorted set keys to intersect</param>
/// <returns>Number of elements in the resulting sorted set</returns>
long ZInterStore(string destination, params string[] keys);
/// <summary>
///
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="start">Start offset</param>
/// <param name="stop">Stop offset</param>
/// <param name="withScores">Include scores in result</param>
/// <returns>Array of elements in the specified range (with optional scores);</returns>
string[] ZRange(string key, long start, long stop, bool withScores = false);
byte[][] ZRangeBytes(string key, long start, long stop, bool withScores = false);
/// <summary>
///
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="start">Start offset</param>
/// <param name="stop">Stop offset</param>
/// <returns>Array of elements in the specified range with scores</returns>
Tuple<string, decimal>[] ZRangeWithScores(string key, long start, long stop);
Tuple<byte[], decimal>[] ZRangeBytesWithScores(string key, long start, long stop);
/// <summary>
///
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="min">Minimum score</param>
/// <param name="max">Maximum score</param>
/// <param name="withScores">Include scores in result</param>
/// <param name="exclusiveMin">Minimum score is exclusive</param>
/// <param name="exclusiveMax">Maximum score is exclusive</param>
/// <param name="offset">Start offset</param>
/// <param name="count">Number of elements to return</param>
/// <returns>List of elements in the specified range (with optional scores);</returns>
string[] ZRangeByScore(string key, decimal min, decimal max, bool withScores = false, bool exclusiveMin = false, bool exclusiveMax = false, long? offset = null, long? count = null);
byte[][] ZRangeBytesByScore(string key, decimal min, decimal max, bool withScores = false, bool exclusiveMin = false, bool exclusiveMax = false, long? offset = null, long? count = null);
/// <summary>
///
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="min">Minimum score</param>
/// <param name="max">Maximum score</param>
/// <param name="withScores">Include scores in result</param>
/// <param name="offset">Start offset</param>
/// <param name="count">Number of elements to return</param>
/// <returns>List of elements in the specified range (with optional scores);</returns>
string[] ZRangeByScore(string key, string min, string max, bool withScores = false, long? offset = null, long? count = null);
byte[][] ZRangeBytesByScore(string key, string min, string max, bool withScores = false, long? offset = null, long? count = null);
/// <summary>
///
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="min">Minimum score</param>
/// <param name="max">Maximum score</param>
/// <param name="exclusiveMin">Minimum score is exclusive</param>
/// <param name="exclusiveMax">Maximum score is exclusive</param>
/// <param name="offset">Start offset</param>
/// <param name="count">Number of elements to return</param>
/// <returns>List of elements in the specified range (with optional scores);</returns>
Tuple<string, decimal>[] ZRangeByScoreWithScores(string key, decimal min, decimal max, bool exclusiveMin = false, bool exclusiveMax = false, long? offset = null, long? count = null);
Tuple<byte[], decimal>[] ZRangeBytesByScoreWithScores(string key, decimal min, decimal max, bool exclusiveMin = false, bool exclusiveMax = false, long? offset = null, long? count = null);
/// <summary>
///
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="min">Minimum score</param>
/// <param name="max">Maximum score</param>
/// <param name="offset">Start offset</param>
/// <param name="count">Number of elements to return</param>
/// <returns>List of elements in the specified range (with optional scores);</returns>
Tuple<string, decimal>[] ZRangeByScoreWithScores(string key, string min, string max, long? offset = null, long? count = null);
Tuple<byte[], decimal>[] ZRangeBytesByScoreWithScores(string key, string min, string max, long? offset = null, long? count = null);
/// <summary>
/// Determine the index of a member in a sorted set
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="member">Member to lookup</param>
/// <returns>Rank of member or null if key does not exist</returns>
long? ZRank(string key, object member);
/// <summary>
/// Remove one or more members from a sorted set
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="members">Members to remove</param>
/// <returns>Number of elements removed</returns>
long ZRem(string key, params object[] members);
/// <summary>
/// Remove all members in a sorted set within the given indexes
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="start">Start offset</param>
/// <param name="stop">Stop offset</param>
/// <returns>Number of elements removed</returns>
long ZRemRangeByRank(string key, long start, long stop);
/// <summary>
/// Remove all members in a sorted set within the given scores
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="min">Minimum score</param>
/// <param name="max">Maximum score</param>
/// <param name="exclusiveMin">Minimum score is exclusive</param>
/// <param name="exclusiveMax">Maximum score is exclusive</param>
/// <returns>Number of elements removed</returns>
long ZRemRangeByScore(string key, decimal min, decimal max, bool exclusiveMin = false, bool exclusiveMax = false);
long ZRemRangeByScore(string key, string min, string max);
/// <summary>
///
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="start">Start offset</param>
/// <param name="stop">Stop offset</param>
/// <param name="withScores">Include scores in result</param>
/// <returns>List of elements in the specified range (with optional scores);</returns>
string[] ZRevRange(string key, long start, long stop, bool withScores = false);
byte[][] ZRevRangeBytes(string key, long start, long stop, bool withScores = false);
/// <summary>
///
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="start">Start offset</param>
/// <param name="stop">Stop offset</param>
/// <returns>List of elements in the specified range (with optional scores);</returns>
Tuple<string, decimal>[] ZRevRangeWithScores(string key, long start, long stop);
Tuple<byte[], decimal>[] ZRevRangeBytesWithScores(string key, long start, long stop);
/// <summary>
///
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="max">Maximum score</param>
/// <param name="min">Minimum score</param>
/// <param name="withScores">Include scores in result</param>
/// <param name="exclusiveMax">Maximum score is exclusive</param>
/// <param name="exclusiveMin">Minimum score is exclusive</param>
/// <param name="offset">Start offset</param>
/// <param name="count">Number of elements to return</param>
/// <returns>List of elements in the specified score range (with optional scores);</returns>
string[] ZRevRangeByScore(string key, decimal max, decimal min, bool withScores = false, bool exclusiveMax = false, bool exclusiveMin = false, long? offset = null, long? count = null);
byte[][] ZRevRangeBytesByScore(string key, decimal max, decimal min, bool withScores = false, bool exclusiveMax = false, bool exclusiveMin = false, long? offset = null, long? count = null);
/// <summary>
///
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="max">Maximum score</param>
/// <param name="min">Minimum score</param>
/// <param name="withScores">Include scores in result</param>
/// <param name="offset">Start offset</param>
/// <param name="count">Number of elements to return</param>
/// <returns>List of elements in the specified score range (with optional scores);</returns>
string[] ZRevRangeByScore(string key, string max, string min, bool withScores = false, long? offset = null, long? count = null);
byte[][] ZRevRangeBytesByScore(string key, string max, string min, bool withScores = false, long? offset = null, long? count = null);
/// <summary>
///
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="max">Maximum score</param>
/// <param name="min">Minimum score</param>
/// <param name="exclusiveMax">Maximum score is exclusive</param>
/// <param name="exclusiveMin">Minimum score is exclusive</param>
/// <param name="offset">Start offset</param>
/// <param name="count">Number of elements to return</param>
/// <returns>List of elements in the specified score range (with optional scores);</returns>
Tuple<string, decimal>[] ZRevRangeByScoreWithScores(string key, decimal max, decimal min, bool exclusiveMax = false, bool exclusiveMin = false, long? offset = null, long? count = null);
Tuple<byte[], decimal>[] ZRevRangeBytesByScoreWithScores(string key, decimal max, decimal min, bool exclusiveMax = false, bool exclusiveMin = false, long? offset = null, long? count = null);
/// <summary>
///
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="max">Maximum score</param>
/// <param name="min">Minimum score</param>
/// <param name="offset">Start offset</param>
/// <param name="count">Number of elements to return</param>
/// <returns>List of elements in the specified score range (with optional scores);</returns>
Tuple<string, decimal>[] ZRevRangeByScoreWithScores(string key, string max, string min, long? offset = null, long? count = null);
Tuple<byte[], decimal>[] ZRevRangeBytesByScoreWithScores(string key, string max, string min, long? offset = null, long? count = null);
/// <summary>
/// Determine the index of a member in a sorted set, with scores ordered from high to low
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="member">Member to lookup</param>
/// <returns>Rank of member, or null if member does not exist</returns>
long? ZRevRank(string key, object member);
/// <summary>
/// Get the score associated with the given member in a sorted set
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="member">Member to lookup</param>
/// <returns>Score of member, or null if member does not exist</returns>
decimal? ZScore(string key, object member);
/// <summary>
/// Add multiple sorted sets and store the resulting sorted set in a new key
/// </summary>
/// <param name="destination">Destination key</param>
/// <param name="weights">Multiplication factor for each input set</param>
/// <param name="aggregate">Aggregation function of resulting set</param>
/// <param name="keys">Sorted set keys to union</param>
/// <returns>Number of elements in the resulting sorted set</returns>
long ZUnionStore(string destination, decimal[] weights = null, RedisAggregate? aggregate = null, params string[] keys);
/// <summary>
/// Add multiple sorted sets and store the resulting sorted set in a new key
/// </summary>
/// <param name="destination">Destination key</param>
/// <param name="keys">Sorted set keys to union</param>
/// <returns>Number of elements in the resulting sorted set</returns>
long ZUnionStore(string destination, params string[] keys);
/// <summary>
/// Iterate the scores and elements of a sorted set field
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="cursor">The cursor returned by the server in the previous call, or 0 if this is the first call</param>
/// <param name="pattern">Glob-style pattern to filter returned elements</param>
/// <param name="count">Maximum number of elements to return</param>
/// <returns>Updated cursor and result set</returns>
RedisScan<Tuple<string, decimal>> ZScan(string key, long cursor, string pattern = null, long? count = null);
RedisScan<Tuple<byte[], decimal>> ZScanBytes(string key, long cursor, string pattern = null, long? count = null);
/// <summary>
/// Retrieve all the elements in a sorted set with a value between min and max
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="min">Lexagraphic start value. Prefix value with '(' to indicate exclusive; '[' to indicate inclusive. Use '-' or '+' to specify infinity.</param>
/// <param name="max">Lexagraphic stop value. Prefix value with '(' to indicate exclusive; '[' to indicate inclusive. Use '-' or '+' to specify infinity.</param>
/// <param name="offset">Limit result set by offset</param>
/// <param name="count">Limimt result set by size</param>
/// <returns>List of elements in the specified range</returns>
string[] ZRangeByLex(string key, string min, string max, long? offset = null, long? count = null);
byte[][] ZRangeBytesByLex(string key, string min, string max, long? offset = null, long? count = null);
/// <summary>
/// Remove all elements in the sorted set with a value between min and max
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="min">Lexagraphic start value. Prefix value with '(' to indicate exclusive; '[' to indicate inclusive. Use '-' or '+' to specify infinity.</param>
/// <param name="max">Lexagraphic stop value. Prefix value with '(' to indicate exclusive; '[' to indicate inclusive. Use '-' or '+' to specify infinity.</param>
/// <returns>Number of elements removed</returns>
long ZRemRangeByLex(string key, string min, string max);
/// <summary>
/// Returns the number of elements in the sorted set with a value between min and max.
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="min">Lexagraphic start value. Prefix value with '(' to indicate exclusive; '[' to indicate inclusive. Use '-' or '+' to specify infinity.</param>
/// <param name="max">Lexagraphic stop value. Prefix value with '(' to indicate exclusive; '[' to indicate inclusive. Use '-' or '+' to specify infinity.</param>
/// <returns>Number of elements in the specified score range</returns>
long ZLexCount(string key, string min, string max);
#endregion
#region Pub/Sub
/// <summary>
/// Listen for messages published to channels matching the given patterns
/// </summary>
/// <param name="channelPatterns">Patterns to subscribe</param>
void PSubscribe(params string[] channelPatterns);
/// <summary>
/// Post a message to a channel
/// </summary>
/// <param name="channel">Channel to post message</param>
/// <param name="message">Message to send</param>
/// <returns>Number of clients that received the message</returns>
long Publish(string channel, string message);
/// <summary>
/// List the currently active channels
/// </summary>
/// <param name="pattern">Return only channels matching this pattern</param>
/// <returns>Array of channel names</returns>
string[] PubSubChannels(string pattern = null);
/// <summary>
///
/// </summary>
/// <param name="channels">Channel names</param>
/// <returns>Array of channel/count tuples</returns>
Tuple<string, long>[] PubSubNumSub(params string[] channels);
/// <summary>
///
/// </summary>
/// <returns>Number of patterns all clients are subscribed to</returns>
long PubSubNumPat();
/// <summary>
/// Stop listening for messages posted to channels matching the given patterns
/// </summary>
/// <param name="channelPatterns">Patterns to unsubscribe</param>
void PUnsubscribe(params string[] channelPatterns);
/// <summary>
/// Listen for messages published to the given channels
/// </summary>
/// <param name="channels">Channels to subscribe</param>
void Subscribe(params string[] channels);
/// <summary>
/// Stop listening for messages posted to the given channels
/// </summary>
/// <param name="channels">Channels to unsubscribe</param>
void Unsubscribe(params string[] channels);
#endregion
#region Scripting
/// <summary>
/// Execute a Lua script server side
/// </summary>
/// <param name="script">Script to run on server</param>
/// <param name="keys">Keys used by script</param>
/// <param name="arguments">Arguments to pass to script</param>
/// <returns>Redis object</returns>
object Eval(string script, string[] keys, params object[] arguments);
/// <summary>
/// Execute a Lua script server side, sending only the script's cached SHA hash
/// </summary>
/// <param name="sha1">SHA1 hash of script</param>
/// <param name="keys">Keys used by script</param>
/// <param name="arguments">Arguments to pass to script</param>
/// <returns>Redis object</returns>
object EvalSHA(string sha1, string[] keys, params object[] arguments);
/// <summary>
/// Check existence of script SHA hashes in the script cache
/// </summary>
/// <param name="sha1s">SHA1 script hashes</param>
/// <returns>Array of boolean values indicating script existence on server</returns>
bool[] ScriptExists(params string[] sha1s);
/// <summary>
/// Remove all scripts from the script cache
/// </summary>
/// <returns>Status code</returns>
string ScriptFlush();
/// <summary>
/// Kill the script currently in execution
/// </summary>
/// <returns>Status code</returns>
string ScriptKill();
/// <summary>
/// Load the specified Lua script into the script cache
/// </summary>
/// <param name="script">Lua script to load</param>
/// <returns>SHA1 hash of script</returns>
string ScriptLoad(string script);
#endregion
#region Strings
/// <summary>
/// Append a value to a key
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="value">Value to append to key</param>
/// <returns>Length of string after append</returns>
long Append(string key, object value);
/// <summary>
/// Count set bits in a string
/// </summary>
/// <param name="key">Key to check</param>
/// <param name="start">Start offset</param>
/// <param name="end">Stop offset</param>
/// <returns>Number of bits set to 1</returns>
long BitCount(string key, long? start = null, long? end = null);
/// <summary>
/// Perform bitwise operations between strings
/// </summary>
/// <param name="operation">Bit command to execute</param>
/// <param name="destKey">Store result in destination key</param>
/// <param name="keys">Keys to operate</param>
/// <returns>Size of string stored in the destination key</returns>
long BitOp(RedisBitOp operation, string destKey, params string[] keys);
/// <summary>
/// Find first bit set or clear in a string
/// </summary>
/// <param name="key">Key to examine</param>
/// <param name="bit">Bit value (1 or 0);</param>
/// <param name="start">Examine string at specified byte offset</param>
/// <param name="end">Examine string to specified byte offset</param>
/// <returns>Position of the first bit set to the specified value</returns>
long BitPos(string key, bool bit, long? start = null, long? end = null);
/// <summary>
/// Decrement the integer value of a key by one
/// </summary>
/// <param name="key">Key to modify</param>
/// <returns>Value of key after decrement</returns>
long Decr(string key);
/// <summary>
/// Decrement the integer value of a key by the given number
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="decrement">Decrement value</param>
/// <returns>Value of key after decrement</returns>
long DecrBy(string key, long decrement);
/// <summary>
/// Get the value of a key
/// </summary>
/// <param name="key">Key to lookup</param>
/// <returns>Value of key</returns>
string Get(string key);
byte[] GetBytes(string key);
/// <summary>
/// Returns the bit value at offset in the string value stored at key
/// </summary>
/// <param name="key">Key to lookup</param>
/// <param name="offset">Offset of key to check</param>
/// <returns>Bit value stored at offset</returns>
bool GetBit(string key, uint offset);
/// <summary>
/// Get a substring of the string stored at a key
/// </summary>
/// <param name="key">Key to lookup</param>
/// <param name="start">Start offset</param>
/// <param name="end">End offset</param>
/// <returns>Substring in the specified range</returns>
string GetRange(string key, long start, long end);
byte[] GetRangeBytes(string key, long start, long end);
/// <summary>
/// Set the string value of a key and
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="value">Value to set</param>
/// <returns>Old value stored at key, or null if key did not exist</returns>
string GetSet(string key, object value);
byte[] GetSetBytes(string key, object value);
/// <summary>
/// Increment the integer value of a key by one
/// </summary>
/// <param name="key">Key to modify</param>
/// <returns>Value of key after increment</returns>
long Incr(string key);
/// <summary>
/// Increment the integer value of a key by the given amount
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="increment">Increment amount</param>
/// <returns>Value of key after increment</returns>
long IncrBy(string key, long increment);
/// <summary>
/// Increment the float value of a key by the given amount
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="increment">Increment amount</param>
/// <returns>Value of key after increment</returns>
decimal IncrByFloat(string key, decimal increment);
/// <summary>
/// Get the values of all the given keys
/// </summary>
/// <param name="keys">Keys to lookup</param>
/// <returns>Array of values at the specified keys</returns>
string[] MGet(params string[] keys);
byte[][] MGetBytes(params string[] keys);
/// <summary>
/// Set multiple keys to multiple values
/// </summary>
/// <param name="keyValues">Key values to set</param>
/// <returns>Status code</returns>
string MSet(params Tuple<string, object>[] keyValues);
/// <summary>
/// Set multiple keys to multiple values
/// </summary>
/// <param name="keyValues">Key values to set [k1, v1, k2, v2, ..]</param>
/// <returns>Status code</returns>
string MSet(params object[] keyValues);
/// <summary>
/// Set multiple keys to multiple values, only if none of the keys exist
/// </summary>
/// <param name="keyValues">Key values to set</param>
/// <returns>True if all keys were set</returns>
bool MSetNx(params Tuple<string, object>[] keyValues);
/// <summary>
/// Set multiple keys to multiple values, only if none of the keys exist
/// </summary>
/// <param name="keyValues">Key values to set [k1, v1, k2, v2, ..]</param>
/// <returns>True if all keys were set</returns>
bool MSetNx(params object[] keyValues);
/// <summary>
/// Set the value and expiration in milliseconds of a key
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="milliseconds">Expiration in milliseconds</param>
/// <param name="value">Value to set</param>
/// <returns>Status code</returns>
string PSetEx(string key, long milliseconds, object value);
/// <summary>
/// Set the string value of a key
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="value">Value to set</param>
/// <returns>Status code</returns>
string Set(string key, object value);
/// <summary>
/// Set the string value of a key with atomic expiration and existence condition
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="value">Value to set</param>
/// <param name="expiration">Set expiration to nearest millisecond</param>
/// <param name="condition">Set key if existence condition</param>
/// <returns>Status code, or null if condition not met</returns>
string Set(string key, object value, TimeSpan expiration, RedisExistence? condition = null);
/// <summary>
/// Set the string value of a key with atomic expiration and existence condition
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="value">Value to set</param>
/// <param name="expirationSeconds">Set expiration to nearest second</param>
/// <param name="condition">Set key if existence condition</param>
/// <returns>Status code, or null if condition not met</returns>
string Set(string key, object value, int? expirationSeconds = null, RedisExistence? condition = null);
/// <summary>
/// Set the string value of a key with atomic expiration and existence condition
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="value">Value to set</param>
/// <param name="expirationMilliseconds">Set expiration to nearest millisecond</param>
/// <param name="condition">Set key if existence condition</param>
/// <returns>Status code, or null if condition not met</returns>
string Set(string key, object value, long? expirationMilliseconds = null, RedisExistence? condition = null);
/// <summary>
/// Sets or clears the bit at offset in the string value stored at key
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="offset">Modify key at offset</param>
/// <param name="value">Value to set (on or off);</param>
/// <returns>Original bit stored at offset</returns>
bool SetBit(string key, uint offset, bool value);
/// <summary>
/// Set the value and expiration of a key
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="seconds">Expiration in seconds</param>
/// <param name="value">Value to set</param>
/// <returns>Status code</returns>
string SetEx(string key, long seconds, object value);
/// <summary>
/// Set the value of a key, only if the key does not exist
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="value">Value to set</param>
/// <returns>True if key was set</returns>
bool SetNx(string key, object value);
/// <summary>
/// Overwrite part of a string at key starting at the specified offset
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="offset">Start offset</param>
/// <param name="value">Value to write at offset</param>
/// <returns>Length of string after operation</returns>
long SetRange(string key, uint offset, object value);
/// <summary>
/// Get the length of the value stored in a key
/// </summary>
/// <param name="key">Key to lookup</param>
/// <returns>Length of string at key</returns>
long StrLen(string key);
#endregion
#region Server
/// <summary>
/// Asyncronously rewrite the append-only file
/// </summary>
/// <returns>Status code</returns>
string BgRewriteAof();
/// <summary>
/// Asynchronously save the dataset to disk
/// </summary>
/// <returns>Status code</returns>
string BgSave();
/// <summary>
/// Kill the connection of a client
/// </summary>
/// <param name="ip">Client IP returned from CLIENT LIST</param>
/// <param name="port">Client port returned from CLIENT LIST</param>
/// <returns>Status code</returns>
string ClientKill(string ip, int port);
/// <summary>
/// Kill the connection of a client
/// </summary>
/// <param name="addr">client's ip:port</param>
/// <param name="id">client's unique ID</param>
/// <param name="type">client type (normal|slave|pubsub);</param>
/// <param name="skipMe">do not kill the calling client</param>
/// <returns>Nummber of clients killed</returns>
long ClientKill(string addr = null, string id = null, string type = null, bool? skipMe = null);
/// <summary>
/// Get the list of client connections
/// </summary>
/// <returns>Formatted string of clients</returns>
string ClientList();
/// <summary>
/// Suspend all Redis clients for the specified amount of time
/// </summary>
/// <param name="milliseconds">Time to pause in milliseconds</param>
/// <returns>Status code</returns>
string ClientPause(int milliseconds);
/// <summary>
/// Suspend all Redis clients for the specified amount of time
/// </summary>
/// <param name="timeout">Time to pause</param>
/// <returns>Status code</returns>
string ClientPause(TimeSpan timeout);
/// <summary>
/// Get the current connection name
/// </summary>
/// <returns>Connection name</returns>
string ClientGetName();
/// <summary>
/// Set the current connection name
/// </summary>
/// <param name="connectionName">Name of connection (no spaces);</param>
/// <returns>Status code</returns>
string ClientSetName(string connectionName);
/// <summary>
/// Get the value of a configuration paramter
/// </summary>
/// <param name="parameter">Configuration parameter to lookup</param>
/// <returns>Configuration value</returns>
Tuple<string, string>[] ConfigGet(string parameter);
/// <summary>
/// Reset the stats returned by INFO
/// </summary>
/// <returns>Status code</returns>
string ConfigResetStat();
/// <summary>
/// Rewrite the redis.conf file the server was started with, applying the minimal changes needed to make it reflect current configuration
/// </summary>
/// <returns>Status code</returns>
string ConfigRewrite();
/// <summary>
/// Set a configuration parameter to the given value
/// </summary>
/// <param name="parameter">Parameter to set</param>
/// <param name="value">Value to set</param>
/// <returns>Status code</returns>
string ConfigSet(string parameter, string value);
/// <summary>
///
/// </summary>
/// <returns>Number of keys</returns>
long DbSize();
/// <summary>
/// Make the server crash :(
/// </summary>
/// <returns>Status code</returns>
string DebugSegFault();
/// <summary>
/// Remove all keys from all databases
/// </summary>
/// <returns>Status code</returns>
string FlushAll();
/// <summary>
/// Remove all keys from the current database
/// </summary>
/// <returns>Status code</returns>
string FlushDb();
/// <summary>
/// Get information and statistics about the server
/// </summary>
/// <param name="section">all|default|server|clients|memory|persistence|stats|replication|cpu|commandstats|cluster|keyspace</param>
/// <returns>Formatted string</returns>
string Info(string section = null);
/// <summary>
/// Get the timestamp of the last successful save to disk
/// </summary>
/// <returns>Date of last save</returns>
DateTime LastSave();
/// <summary>
/// Listen for all requests received by the server in real time
/// </summary>
/// <returns>Status code</returns>
string Monitor();
/// <summary>
/// Get role information for the current Redis instance
/// </summary>
/// <returns>RedisMasterRole|RedisSlaveRole|RedisSentinelRole</returns>
RedisRole Role();
/// <summary>
/// Syncronously save the dataset to disk
/// </summary>
/// <returns>Status code</returns>
string Save();
/// <summary>
/// Syncronously save the dataset to disk an then shut down the server
/// </summary>
/// <param name="save">Force a DB saving operation even if no save points are configured</param>
/// <returns>Status code</returns>
string Shutdown(bool? save = null);
/// <summary>
/// Make the server a slave of another instance or promote it as master
/// </summary>
/// <param name="host">Master host</param>
/// <param name="port">master port</param>
/// <returns>Status code</returns>
string SlaveOf(string host, int port);
/// <summary>
/// Turn off replication, turning the Redis server into a master
/// </summary>
/// <returns>Status code</returns>
string SlaveOfNoOne();
/// <summary>
/// Get latest entries from the slow log
/// </summary>
/// <param name="count">Limit entries returned</param>
/// <returns>Slow log entries</returns>
RedisSlowLogEntry[] SlowLogGet(long? count = null);
/// <summary>
/// Get the length of the slow log
/// </summary>
/// <returns>Slow log length</returns>
long SlowLogLen();
/// <summary>
/// Reset the slow log
/// </summary>
/// <returns>Status code</returns>
string SlowLogReset();
/// <summary>
/// Internal command used for replication
/// </summary>
/// <returns>Byte array of Redis sync data</returns>
byte[] Sync();
/// <summary>
///
/// </summary>
/// <returns>Server time</returns>
DateTime Time();
#endregion
#region Transactions
/// <summary>
/// Discard all commands issued after MULTI
/// </summary>
/// <returns>Status code</returns>
string Discard();
/// <summary>
/// Execute all commands issued after MULTI
/// </summary>
/// <returns>Array of output from all transaction commands</returns>
object[] Exec();
/// <summary>
/// Mark the start of a transaction block
/// </summary>
/// <returns>Status code</returns>
string Multi();
/// <summary>
/// Forget about all watched keys
/// </summary>
/// <returns>Status code</returns>
string Unwatch();
/// <summary>
/// Watch the given keys to determine execution of the MULTI/EXEC block
/// </summary>
/// <param name="keys">Keys to watch</param>
/// <returns>Status code</returns>
string Watch(params string[] keys);
#endregion
#region HyperLogLog
/// <summary>
/// Adds the specified elements to the specified HyperLogLog.
/// </summary>
/// <param name="key">Key to update</param>
/// <param name="elements">Elements to add</param>
/// <returns>1 if at least 1 HyperLogLog internal register was altered. 0 otherwise.</returns>
bool PfAdd(string key, params object[] elements);
/// <summary>
///
/// </summary>
/// <param name="keys">One or more HyperLogLog keys to examine</param>
/// <returns>Approximated number of unique elements observed via PFADD</returns>
long PfCount(params string[] keys);
/// <summary>
/// Merge N different HyperLogLogs into a single key.
/// </summary>
/// <param name="destKey">Where to store the merged HyperLogLogs</param>
/// <param name="sourceKeys">The HyperLogLogs keys that will be combined</param>
/// <returns>Status code</returns>
string PfMerge(string destKey, params string[] sourceKeys);
#endregion
#region Geo redis-server 3.2
long GeoAdd(string key, params (decimal longitude, decimal latitude, object member)[] values);
decimal? GeoDist(string key, object member1, object member2, GeoUnit unit = GeoUnit.m);
string[] GeoHash(string key, object[] members);
(decimal longitude, decimal latitude)?[] GeoPos(string key, object[] members);
(string member, decimal dist, decimal longitude, decimal latitude, long hash)[] GeoRadius(string key, decimal longitude, decimal latitude, decimal radius, GeoUnit unit = GeoUnit.m, long? count = null, GeoOrderBy? sorting = null, bool withCoord = false, bool withDist = false, bool withHash = false);
(byte[] member, decimal dist, decimal longitude, decimal latitude, long hash)[] GeoRadiusBytes(string key, decimal longitude, decimal latitude, decimal radius, GeoUnit unit = GeoUnit.m, long? count = null, GeoOrderBy? sorting = null, bool withCoord = false, bool withDist = false, bool withHash = false);
(string member, decimal dist, decimal longitude, decimal latitude, long hash)[] GeoRadiusByMember(string key, object member, decimal radius, GeoUnit unit = GeoUnit.m, long? count = null, GeoOrderBy? sorting = null, bool withCoord = false, bool withDist = false, bool withHash = false);
(byte[] member, decimal dist, decimal longitude, decimal latitude, long hash)[] GeoRadiusBytesByMember(string key, object member, decimal radius, GeoUnit unit = GeoUnit.m, long? count = null, GeoOrderBy? sorting = null, bool withCoord = false, bool withDist = false, bool withHash = false);
#endregion
}
public enum GeoUnit { m, km, mi, ft }
public enum GeoOrderBy { ASC, DESC }
}
|
27182812/ChatGLM-LLaMA-chinese-insturct | 13,560 | src/transformers/models/m2m_100/configuration_m2m_100.py | # coding=utf-8
# Copyright 2021 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" M2M100 model configuration"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeq2SeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
logger = logging.get_logger(__name__)
M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/config.json",
# See all M2M100 models at https://huggingface.co/models?filter=m2m_100
}
class M2M100Config(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`M2M100Model`]. It is used to instantiate an
M2M100 model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the M2M100
[facebook/m2m100_418M](https://huggingface.co/facebook/m2m100_418M) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50265):
Vocabulary size of the M2M100 model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`M2M100Model`] or
d_model (`int`, *optional*, defaults to 1024):
Dimensionality of the layers and the pooler layer.
encoder_layers (`int`, *optional*, defaults to 12):
Number of encoder layers.
decoder_layers (`int`, *optional*, defaults to 12):
Number of decoder layers.
encoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
decoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer decoder.
decoder_ffn_dim (`int`, *optional*, defaults to 4096):
Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
encoder_ffn_dim (`int`, *optional*, defaults to 4096):
Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
activation_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for activations inside the fully connected layer.
classifier_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for classifier.
max_position_embeddings (`int`, *optional*, defaults to 1024):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
init_std (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
encoder_layerdrop (`float`, *optional*, defaults to 0.0):
The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
for more details.
decoder_layerdrop (`float`, *optional*, defaults to 0.0):
The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
for more details.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
Example:
```python
>>> from transformers import M2M100Model, M2M100Config
>>> # Initializing a M2M100 facebook/m2m100_418M style configuration
>>> configuration = M2M100Config()
>>> # Initializing a model from the facebook/m2m100_418M style configuration
>>> model = M2M100Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "m2m_100"
keys_to_ignore_at_inference = ["past_key_values"]
attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__(
self,
vocab_size=128112,
max_position_embeddings=1024,
encoder_layers=12,
encoder_ffn_dim=4096,
encoder_attention_heads=16,
decoder_layers=12,
decoder_ffn_dim=4096,
decoder_attention_heads=16,
encoder_layerdrop=0.05,
decoder_layerdrop=0.05,
use_cache=True,
is_encoder_decoder=True,
activation_function="relu",
d_model=1024,
dropout=0.1,
attention_dropout=0.1,
activation_dropout=0.0,
init_std=0.02,
decoder_start_token_id=2,
scale_embedding=True,
pad_token_id=1,
bos_token_id=0,
eos_token_id=2,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.d_model = d_model
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.activation_function = activation_function
self.init_std = init_std
self.encoder_layerdrop = encoder_layerdrop
self.decoder_layerdrop = decoder_layerdrop
self.use_cache = use_cache
self.num_hidden_layers = encoder_layers
self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
is_encoder_decoder=is_encoder_decoder,
decoder_start_token_id=decoder_start_token_id,
**kwargs,
)
class M2M100OnnxConfig(OnnxSeq2SeqConfigWithPast):
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
common_inputs = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
]
)
if self.use_past:
common_inputs["decoder_input_ids"] = {0: "batch"}
common_inputs["decoder_attention_mask"] = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
common_inputs["decoder_input_ids"] = {0: "batch", 1: "decoder_sequence"}
common_inputs["decoder_attention_mask"] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(common_inputs, direction="inputs")
return common_inputs
# Copied from BartOnnxConfig._generate_dummy_inputs_for_sequence_classification_and_question_answering
# A better name would be _generate_dummy_inputs_for_encoder_and_decoder because sequence classification and question
# answering are not supported for M2M100, but this name is preserved to be able to check that the copy matches what
# was done for BART so that it can be updated if need be.
def _generate_dummy_inputs_for_sequence_classification_and_question_answering(
self,
tokenizer: PreTrainedTokenizer,
batch_size: int = -1,
seq_length: int = -1,
is_pair: bool = False,
framework: Optional[TensorType] = None,
) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
batch_size = compute_effective_axis_dimension(
batch_size, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0
)
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
token_to_add = tokenizer.num_special_tokens_to_add(is_pair)
seq_length = compute_effective_axis_dimension(
seq_length, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=token_to_add
)
# Generate dummy inputs according to compute batch and sequence
dummy_input = [" ".join([tokenizer.unk_token]) * seq_length] * batch_size
common_inputs = dict(tokenizer(dummy_input, return_tensors=framework))
return common_inputs
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig._generate_dummy_inputs_for_default_and_seq2seq_lm
def _generate_dummy_inputs_for_default_and_seq2seq_lm(
self,
tokenizer: PreTrainedTokenizer,
batch_size: int = -1,
seq_length: int = -1,
is_pair: bool = False,
framework: Optional[TensorType] = None,
) -> Mapping[str, Any]:
encoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
tokenizer, batch_size, seq_length, is_pair, framework
)
# Generate decoder inputs
decoder_seq_length = seq_length if not self.use_past else 1
decoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
tokenizer, batch_size, decoder_seq_length, is_pair, framework
)
decoder_inputs = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
common_inputs = dict(**encoder_inputs, **decoder_inputs)
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
else:
import torch
batch, encoder_seq_length = common_inputs["input_ids"].shape
decoder_seq_length = common_inputs["decoder_input_ids"].shape[1]
num_encoder_attention_heads, num_decoder_attention_heads = self.num_attention_heads
encoder_shape = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
decoder_past_length = decoder_seq_length + 3
decoder_shape = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
common_inputs["decoder_attention_mask"] = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(batch, decoder_past_length)], dim=1
)
common_inputs["past_key_values"] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
num_encoder_layers, num_decoder_layers = self.num_layers
min_num_layers = min(num_encoder_layers, num_decoder_layers)
max_num_layers = max(num_encoder_layers, num_decoder_layers) - min_num_layers
remaining_side_name = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(min_num_layers):
common_inputs["past_key_values"].append(
(
torch.zeros(decoder_shape),
torch.zeros(decoder_shape),
torch.zeros(encoder_shape),
torch.zeros(encoder_shape),
)
)
# TODO: test this.
shape = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(min_num_layers, max_num_layers):
common_inputs["past_key_values"].append((torch.zeros(shape), torch.zeros(shape)))
return common_inputs
generate_dummy_inputs = _generate_dummy_inputs_for_default_and_seq2seq_lm
|
2877025939/PlanADScrollView | 1,540 | PlanADScrollView/PlanADScrollView/Info.plist | <?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>NSAppTransportSecurity</key>
<dict>
<key>NSAllowsArbitraryLoads</key>
<true/>
</dict>
<key>CFBundleDevelopmentRegion</key>
<string>en</string>
<key>CFBundleExecutable</key>
<string>$(EXECUTABLE_NAME)</string>
<key>CFBundleIdentifier</key>
<string>$(PRODUCT_BUNDLE_IDENTIFIER)</string>
<key>CFBundleInfoDictionaryVersion</key>
<string>6.0</string>
<key>CFBundleName</key>
<string>$(PRODUCT_NAME)</string>
<key>CFBundlePackageType</key>
<string>APPL</string>
<key>CFBundleShortVersionString</key>
<string>1.0</string>
<key>CFBundleVersion</key>
<string>1</string>
<key>LSRequiresIPhoneOS</key>
<true/>
<key>UILaunchStoryboardName</key>
<string>LaunchScreen</string>
<key>UIMainStoryboardFile</key>
<string>Main</string>
<key>UIRequiredDeviceCapabilities</key>
<array>
<string>armv7</string>
</array>
<key>UISupportedInterfaceOrientations</key>
<array>
<string>UIInterfaceOrientationPortrait</string>
<string>UIInterfaceOrientationLandscapeLeft</string>
<string>UIInterfaceOrientationLandscapeRight</string>
</array>
<key>UISupportedInterfaceOrientations~ipad</key>
<array>
<string>UIInterfaceOrientationPortrait</string>
<string>UIInterfaceOrientationPortraitUpsideDown</string>
<string>UIInterfaceOrientationLandscapeLeft</string>
<string>UIInterfaceOrientationLandscapeRight</string>
</array>
</dict>
</plist>
|
2877025939/PlanADScrollView | 2,089 | PlanADScrollView/PlanADScrollView/ViewController.m | //
// ViewController.m
// PlanADScrollView
//
// Created by anan on 2017/10/18.
// Copyright © 2017年 Plan. All rights reserved.
//
#import "ViewController.h"
#import "UIImageView+WebCache.h"
#import "PlanADScrollView.h"
@interface ViewController ()<PlanADScrollViewDelegate>
@end
@implementation ViewController
- (void)viewDidLoad {
[super viewDidLoad];
self.view.backgroundColor = [UIColor whiteColor];
[self PlanADScrollView];
}
-(void)PlanADScrollView{
//完美支持 本地图片、网络图片
NSArray *imageUrls = @[@"http://pic1.win4000.com/wallpaper/1/58194457a834e.jpg",
@"http://pic31.photophoto.cn/20140622/0005018377654922_b.jpg",
@"http://p1.gexing.com/shaitu/20130128/0005/51055060557b3.jpg",
@"http://pic31.photophoto.cn/20140419/0005018307715498_b.jpg",
@"http://img.wanyx.com/Uploads/ueditor/image/20170418/1492498061368610.jpg",
@"http://img.wanyx.com/Uploads/ueditor/image/20170405/1491381650725820.jpg"];
//本地图片演示
NSArray *imageUrl1s = @[@"1.jpg",@"2.jpg",@"3.jpg",@"4.jpg",@"5.jpg"];
PlanADScrollView *ad =[[PlanADScrollView alloc]initWithFrame:CGRectMake(0, 100, self.view.frame.size.width, 200)imageUrls:imageUrls placeholderimage:[UIImage imageNamed:@"placeholderimage"]];
ad.delegate =self;
ad.pageContolStyle = PlanPageContolStyleRectangle;
[self.view addSubview:ad];
PlanADScrollView *ad1 =[[PlanADScrollView alloc]initWithFrame:CGRectMake(0, 400, self.view.frame.size.width, 200)imageUrls:imageUrl1s placeholderimage:nil];
ad1.delegate =self;
ad1.pageContolStyle = PlanPageContolStyleImage;
[ad1 currentImage:[UIImage imageNamed:@"check"] pageImage:[UIImage imageNamed:@"check1"]];
[self.view addSubview:ad1];
}
-(void)PlanADScrollViewdidSelectAtIndex:(NSInteger)index{
NSLog(@"点击了第%ld图片",index);
}
- (void)didReceiveMemoryWarning {
[super didReceiveMemoryWarning];
// Dispose of any resources that can be recreated.
}
@end
|
2877025939/PlanADScrollView | 36,919 | PlanADScrollView/PlanADScrollView.xcodeproj/project.pbxproj | // !$*UTF8*$!
{
archiveVersion = 1;
classes = {
};
objectVersion = 46;
objects = {
/* Begin PBXBuildFile section */
27F117121F974A4600F9CA18 /* main.m in Sources */ = {isa = PBXBuildFile; fileRef = 27F117111F974A4600F9CA18 /* main.m */; };
27F117151F974A4600F9CA18 /* AppDelegate.m in Sources */ = {isa = PBXBuildFile; fileRef = 27F117141F974A4600F9CA18 /* AppDelegate.m */; };
27F117181F974A4600F9CA18 /* ViewController.m in Sources */ = {isa = PBXBuildFile; fileRef = 27F117171F974A4600F9CA18 /* ViewController.m */; };
27F1171B1F974A4600F9CA18 /* Main.storyboard in Resources */ = {isa = PBXBuildFile; fileRef = 27F117191F974A4600F9CA18 /* Main.storyboard */; };
27F1171D1F974A4600F9CA18 /* Assets.xcassets in Resources */ = {isa = PBXBuildFile; fileRef = 27F1171C1F974A4600F9CA18 /* Assets.xcassets */; };
27F117201F974A4600F9CA18 /* LaunchScreen.storyboard in Resources */ = {isa = PBXBuildFile; fileRef = 27F1171E1F974A4600F9CA18 /* LaunchScreen.storyboard */; };
27F1172B1F974A4700F9CA18 /* PlanADScrollViewTests.m in Sources */ = {isa = PBXBuildFile; fileRef = 27F1172A1F974A4700F9CA18 /* PlanADScrollViewTests.m */; };
27F117361F974A4700F9CA18 /* PlanADScrollViewUITests.m in Sources */ = {isa = PBXBuildFile; fileRef = 27F117351F974A4700F9CA18 /* PlanADScrollViewUITests.m */; };
27F1177B1F975AF400F9CA18 /* NSData+ImageContentType.m in Sources */ = {isa = PBXBuildFile; fileRef = 27F117571F975AF400F9CA18 /* NSData+ImageContentType.m */; };
27F1177C1F975AF400F9CA18 /* NSImage+WebCache.m in Sources */ = {isa = PBXBuildFile; fileRef = 27F117591F975AF400F9CA18 /* NSImage+WebCache.m */; };
27F1177D1F975AF400F9CA18 /* SDImageCache.m in Sources */ = {isa = PBXBuildFile; fileRef = 27F1175B1F975AF400F9CA18 /* SDImageCache.m */; };
27F1177E1F975AF400F9CA18 /* SDImageCacheConfig.m in Sources */ = {isa = PBXBuildFile; fileRef = 27F1175D1F975AF400F9CA18 /* SDImageCacheConfig.m */; };
27F1177F1F975AF400F9CA18 /* SDWebImageCompat.m in Sources */ = {isa = PBXBuildFile; fileRef = 27F1175F1F975AF400F9CA18 /* SDWebImageCompat.m */; };
27F117801F975AF400F9CA18 /* SDWebImageDecoder.m in Sources */ = {isa = PBXBuildFile; fileRef = 27F117611F975AF400F9CA18 /* SDWebImageDecoder.m */; };
27F117811F975AF400F9CA18 /* SDWebImageDownloader.m in Sources */ = {isa = PBXBuildFile; fileRef = 27F117631F975AF400F9CA18 /* SDWebImageDownloader.m */; };
27F117821F975AF400F9CA18 /* SDWebImageDownloaderOperation.m in Sources */ = {isa = PBXBuildFile; fileRef = 27F117651F975AF400F9CA18 /* SDWebImageDownloaderOperation.m */; };
27F117831F975AF400F9CA18 /* SDWebImageManager.m in Sources */ = {isa = PBXBuildFile; fileRef = 27F117671F975AF400F9CA18 /* SDWebImageManager.m */; };
27F117841F975AF400F9CA18 /* SDWebImagePrefetcher.m in Sources */ = {isa = PBXBuildFile; fileRef = 27F1176A1F975AF400F9CA18 /* SDWebImagePrefetcher.m */; };
27F117851F975AF400F9CA18 /* UIButton+WebCache.m in Sources */ = {isa = PBXBuildFile; fileRef = 27F1176C1F975AF400F9CA18 /* UIButton+WebCache.m */; };
27F117861F975AF400F9CA18 /* UIImage+GIF.m in Sources */ = {isa = PBXBuildFile; fileRef = 27F1176E1F975AF400F9CA18 /* UIImage+GIF.m */; };
27F117871F975AF400F9CA18 /* UIImage+MultiFormat.m in Sources */ = {isa = PBXBuildFile; fileRef = 27F117701F975AF400F9CA18 /* UIImage+MultiFormat.m */; };
27F117881F975AF400F9CA18 /* UIImageView+HighlightedWebCache.m in Sources */ = {isa = PBXBuildFile; fileRef = 27F117721F975AF400F9CA18 /* UIImageView+HighlightedWebCache.m */; };
27F117891F975AF400F9CA18 /* UIImageView+WebCache.m in Sources */ = {isa = PBXBuildFile; fileRef = 27F117741F975AF400F9CA18 /* UIImageView+WebCache.m */; };
27F1178A1F975AF400F9CA18 /* UIView+WebCache.m in Sources */ = {isa = PBXBuildFile; fileRef = 27F117761F975AF400F9CA18 /* UIView+WebCache.m */; };
27F1178B1F975AF400F9CA18 /* UIView+WebCacheOperation.m in Sources */ = {isa = PBXBuildFile; fileRef = 27F117781F975AF400F9CA18 /* UIView+WebCacheOperation.m */; };
27F117AA1F98E8AD00F9CA18 /* PlanADCollectionViewCell.m in Sources */ = {isa = PBXBuildFile; fileRef = 27F117A51F98E8AD00F9CA18 /* PlanADCollectionViewCell.m */; };
27F117AB1F98E8AD00F9CA18 /* PlanADScrollView.m in Sources */ = {isa = PBXBuildFile; fileRef = 27F117A71F98E8AD00F9CA18 /* PlanADScrollView.m */; };
27F117AC1F98E8AD00F9CA18 /* PlanPageControl.m in Sources */ = {isa = PBXBuildFile; fileRef = 27F117A91F98E8AD00F9CA18 /* PlanPageControl.m */; };
/* End PBXBuildFile section */
/* Begin PBXContainerItemProxy section */
27F117271F974A4700F9CA18 /* PBXContainerItemProxy */ = {
isa = PBXContainerItemProxy;
containerPortal = 27F117051F974A4600F9CA18 /* Project object */;
proxyType = 1;
remoteGlobalIDString = 27F1170C1F974A4600F9CA18;
remoteInfo = PlanADScrollView;
};
27F117321F974A4700F9CA18 /* PBXContainerItemProxy */ = {
isa = PBXContainerItemProxy;
containerPortal = 27F117051F974A4600F9CA18 /* Project object */;
proxyType = 1;
remoteGlobalIDString = 27F1170C1F974A4600F9CA18;
remoteInfo = PlanADScrollView;
};
/* End PBXContainerItemProxy section */
/* Begin PBXFileReference section */
27F1170D1F974A4600F9CA18 /* PlanADScrollView.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = PlanADScrollView.app; sourceTree = BUILT_PRODUCTS_DIR; };
27F117111F974A4600F9CA18 /* main.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = main.m; sourceTree = "<group>"; };
27F117131F974A4600F9CA18 /* AppDelegate.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = AppDelegate.h; sourceTree = "<group>"; };
27F117141F974A4600F9CA18 /* AppDelegate.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = AppDelegate.m; sourceTree = "<group>"; };
27F117161F974A4600F9CA18 /* ViewController.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = ViewController.h; sourceTree = "<group>"; };
27F117171F974A4600F9CA18 /* ViewController.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = ViewController.m; sourceTree = "<group>"; };
27F1171A1F974A4600F9CA18 /* Base */ = {isa = PBXFileReference; lastKnownFileType = file.storyboard; name = Base; path = Base.lproj/Main.storyboard; sourceTree = "<group>"; };
27F1171C1F974A4600F9CA18 /* Assets.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Assets.xcassets; sourceTree = "<group>"; };
27F1171F1F974A4600F9CA18 /* Base */ = {isa = PBXFileReference; lastKnownFileType = file.storyboard; name = Base; path = Base.lproj/LaunchScreen.storyboard; sourceTree = "<group>"; };
27F117211F974A4600F9CA18 /* Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = "<group>"; };
27F117261F974A4700F9CA18 /* PlanADScrollViewTests.xctest */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = PlanADScrollViewTests.xctest; sourceTree = BUILT_PRODUCTS_DIR; };
27F1172A1F974A4700F9CA18 /* PlanADScrollViewTests.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = PlanADScrollViewTests.m; sourceTree = "<group>"; };
27F1172C1F974A4700F9CA18 /* Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = "<group>"; };
27F117311F974A4700F9CA18 /* PlanADScrollViewUITests.xctest */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = PlanADScrollViewUITests.xctest; sourceTree = BUILT_PRODUCTS_DIR; };
27F117351F974A4700F9CA18 /* PlanADScrollViewUITests.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = PlanADScrollViewUITests.m; sourceTree = "<group>"; };
27F117371F974A4700F9CA18 /* Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = "<group>"; };
27F117561F975AF400F9CA18 /* NSData+ImageContentType.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "NSData+ImageContentType.h"; sourceTree = "<group>"; };
27F117571F975AF400F9CA18 /* NSData+ImageContentType.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = "NSData+ImageContentType.m"; sourceTree = "<group>"; };
27F117581F975AF400F9CA18 /* NSImage+WebCache.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "NSImage+WebCache.h"; sourceTree = "<group>"; };
27F117591F975AF400F9CA18 /* NSImage+WebCache.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = "NSImage+WebCache.m"; sourceTree = "<group>"; };
27F1175A1F975AF400F9CA18 /* SDImageCache.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = SDImageCache.h; sourceTree = "<group>"; };
27F1175B1F975AF400F9CA18 /* SDImageCache.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = SDImageCache.m; sourceTree = "<group>"; };
27F1175C1F975AF400F9CA18 /* SDImageCacheConfig.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = SDImageCacheConfig.h; sourceTree = "<group>"; };
27F1175D1F975AF400F9CA18 /* SDImageCacheConfig.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = SDImageCacheConfig.m; sourceTree = "<group>"; };
27F1175E1F975AF400F9CA18 /* SDWebImageCompat.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = SDWebImageCompat.h; sourceTree = "<group>"; };
27F1175F1F975AF400F9CA18 /* SDWebImageCompat.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = SDWebImageCompat.m; sourceTree = "<group>"; };
27F117601F975AF400F9CA18 /* SDWebImageDecoder.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = SDWebImageDecoder.h; sourceTree = "<group>"; };
27F117611F975AF400F9CA18 /* SDWebImageDecoder.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = SDWebImageDecoder.m; sourceTree = "<group>"; };
27F117621F975AF400F9CA18 /* SDWebImageDownloader.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = SDWebImageDownloader.h; sourceTree = "<group>"; };
27F117631F975AF400F9CA18 /* SDWebImageDownloader.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = SDWebImageDownloader.m; sourceTree = "<group>"; };
27F117641F975AF400F9CA18 /* SDWebImageDownloaderOperation.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = SDWebImageDownloaderOperation.h; sourceTree = "<group>"; };
27F117651F975AF400F9CA18 /* SDWebImageDownloaderOperation.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = SDWebImageDownloaderOperation.m; sourceTree = "<group>"; };
27F117661F975AF400F9CA18 /* SDWebImageManager.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = SDWebImageManager.h; sourceTree = "<group>"; };
27F117671F975AF400F9CA18 /* SDWebImageManager.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = SDWebImageManager.m; sourceTree = "<group>"; };
27F117681F975AF400F9CA18 /* SDWebImageOperation.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = SDWebImageOperation.h; sourceTree = "<group>"; };
27F117691F975AF400F9CA18 /* SDWebImagePrefetcher.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = SDWebImagePrefetcher.h; sourceTree = "<group>"; };
27F1176A1F975AF400F9CA18 /* SDWebImagePrefetcher.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = SDWebImagePrefetcher.m; sourceTree = "<group>"; };
27F1176B1F975AF400F9CA18 /* UIButton+WebCache.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "UIButton+WebCache.h"; sourceTree = "<group>"; };
27F1176C1F975AF400F9CA18 /* UIButton+WebCache.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = "UIButton+WebCache.m"; sourceTree = "<group>"; };
27F1176D1F975AF400F9CA18 /* UIImage+GIF.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "UIImage+GIF.h"; sourceTree = "<group>"; };
27F1176E1F975AF400F9CA18 /* UIImage+GIF.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = "UIImage+GIF.m"; sourceTree = "<group>"; };
27F1176F1F975AF400F9CA18 /* UIImage+MultiFormat.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "UIImage+MultiFormat.h"; sourceTree = "<group>"; };
27F117701F975AF400F9CA18 /* UIImage+MultiFormat.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = "UIImage+MultiFormat.m"; sourceTree = "<group>"; };
27F117711F975AF400F9CA18 /* UIImageView+HighlightedWebCache.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "UIImageView+HighlightedWebCache.h"; sourceTree = "<group>"; };
27F117721F975AF400F9CA18 /* UIImageView+HighlightedWebCache.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = "UIImageView+HighlightedWebCache.m"; sourceTree = "<group>"; };
27F117731F975AF400F9CA18 /* UIImageView+WebCache.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "UIImageView+WebCache.h"; sourceTree = "<group>"; };
27F117741F975AF400F9CA18 /* UIImageView+WebCache.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = "UIImageView+WebCache.m"; sourceTree = "<group>"; };
27F117751F975AF400F9CA18 /* UIView+WebCache.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "UIView+WebCache.h"; sourceTree = "<group>"; };
27F117761F975AF400F9CA18 /* UIView+WebCache.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = "UIView+WebCache.m"; sourceTree = "<group>"; };
27F117771F975AF400F9CA18 /* UIView+WebCacheOperation.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "UIView+WebCacheOperation.h"; sourceTree = "<group>"; };
27F117781F975AF400F9CA18 /* UIView+WebCacheOperation.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = "UIView+WebCacheOperation.m"; sourceTree = "<group>"; };
27F117A41F98E8AD00F9CA18 /* PlanADCollectionViewCell.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = PlanADCollectionViewCell.h; sourceTree = "<group>"; };
27F117A51F98E8AD00F9CA18 /* PlanADCollectionViewCell.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = PlanADCollectionViewCell.m; sourceTree = "<group>"; };
27F117A61F98E8AD00F9CA18 /* PlanADScrollView.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = PlanADScrollView.h; sourceTree = "<group>"; };
27F117A71F98E8AD00F9CA18 /* PlanADScrollView.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = PlanADScrollView.m; sourceTree = "<group>"; };
27F117A81F98E8AD00F9CA18 /* PlanPageControl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = PlanPageControl.h; sourceTree = "<group>"; };
27F117A91F98E8AD00F9CA18 /* PlanPageControl.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = PlanPageControl.m; sourceTree = "<group>"; };
/* End PBXFileReference section */
/* Begin PBXFrameworksBuildPhase section */
27F1170A1F974A4600F9CA18 /* Frameworks */ = {
isa = PBXFrameworksBuildPhase;
buildActionMask = 2147483647;
files = (
);
runOnlyForDeploymentPostprocessing = 0;
};
27F117231F974A4700F9CA18 /* Frameworks */ = {
isa = PBXFrameworksBuildPhase;
buildActionMask = 2147483647;
files = (
);
runOnlyForDeploymentPostprocessing = 0;
};
27F1172E1F974A4700F9CA18 /* Frameworks */ = {
isa = PBXFrameworksBuildPhase;
buildActionMask = 2147483647;
files = (
);
runOnlyForDeploymentPostprocessing = 0;
};
/* End PBXFrameworksBuildPhase section */
/* Begin PBXGroup section */
27F117041F974A4600F9CA18 = {
isa = PBXGroup;
children = (
27F1170F1F974A4600F9CA18 /* PlanADScrollView */,
27F117291F974A4700F9CA18 /* PlanADScrollViewTests */,
27F117341F974A4700F9CA18 /* PlanADScrollViewUITests */,
27F1170E1F974A4600F9CA18 /* Products */,
);
sourceTree = "<group>";
};
27F1170E1F974A4600F9CA18 /* Products */ = {
isa = PBXGroup;
children = (
27F1170D1F974A4600F9CA18 /* PlanADScrollView.app */,
27F117261F974A4700F9CA18 /* PlanADScrollViewTests.xctest */,
27F117311F974A4700F9CA18 /* PlanADScrollViewUITests.xctest */,
);
name = Products;
sourceTree = "<group>";
};
27F1170F1F974A4600F9CA18 /* PlanADScrollView */ = {
isa = PBXGroup;
children = (
27F117A31F98E8AD00F9CA18 /* PlanADScrollView */,
27F117131F974A4600F9CA18 /* AppDelegate.h */,
27F117141F974A4600F9CA18 /* AppDelegate.m */,
27F117161F974A4600F9CA18 /* ViewController.h */,
27F117171F974A4600F9CA18 /* ViewController.m */,
27F117551F975AF400F9CA18 /* SDWebImage */,
27F117191F974A4600F9CA18 /* Main.storyboard */,
27F1171C1F974A4600F9CA18 /* Assets.xcassets */,
27F1171E1F974A4600F9CA18 /* LaunchScreen.storyboard */,
27F117211F974A4600F9CA18 /* Info.plist */,
27F117101F974A4600F9CA18 /* Supporting Files */,
);
path = PlanADScrollView;
sourceTree = "<group>";
};
27F117101F974A4600F9CA18 /* Supporting Files */ = {
isa = PBXGroup;
children = (
27F117111F974A4600F9CA18 /* main.m */,
);
name = "Supporting Files";
sourceTree = "<group>";
};
27F117291F974A4700F9CA18 /* PlanADScrollViewTests */ = {
isa = PBXGroup;
children = (
27F1172A1F974A4700F9CA18 /* PlanADScrollViewTests.m */,
27F1172C1F974A4700F9CA18 /* Info.plist */,
);
path = PlanADScrollViewTests;
sourceTree = "<group>";
};
27F117341F974A4700F9CA18 /* PlanADScrollViewUITests */ = {
isa = PBXGroup;
children = (
27F117351F974A4700F9CA18 /* PlanADScrollViewUITests.m */,
27F117371F974A4700F9CA18 /* Info.plist */,
);
path = PlanADScrollViewUITests;
sourceTree = "<group>";
};
27F117551F975AF400F9CA18 /* SDWebImage */ = {
isa = PBXGroup;
children = (
27F117561F975AF400F9CA18 /* NSData+ImageContentType.h */,
27F117571F975AF400F9CA18 /* NSData+ImageContentType.m */,
27F117581F975AF400F9CA18 /* NSImage+WebCache.h */,
27F117591F975AF400F9CA18 /* NSImage+WebCache.m */,
27F1175A1F975AF400F9CA18 /* SDImageCache.h */,
27F1175B1F975AF400F9CA18 /* SDImageCache.m */,
27F1175C1F975AF400F9CA18 /* SDImageCacheConfig.h */,
27F1175D1F975AF400F9CA18 /* SDImageCacheConfig.m */,
27F1175E1F975AF400F9CA18 /* SDWebImageCompat.h */,
27F1175F1F975AF400F9CA18 /* SDWebImageCompat.m */,
27F117601F975AF400F9CA18 /* SDWebImageDecoder.h */,
27F117611F975AF400F9CA18 /* SDWebImageDecoder.m */,
27F117621F975AF400F9CA18 /* SDWebImageDownloader.h */,
27F117631F975AF400F9CA18 /* SDWebImageDownloader.m */,
27F117641F975AF400F9CA18 /* SDWebImageDownloaderOperation.h */,
27F117651F975AF400F9CA18 /* SDWebImageDownloaderOperation.m */,
27F117661F975AF400F9CA18 /* SDWebImageManager.h */,
27F117671F975AF400F9CA18 /* SDWebImageManager.m */,
27F117681F975AF400F9CA18 /* SDWebImageOperation.h */,
27F117691F975AF400F9CA18 /* SDWebImagePrefetcher.h */,
27F1176A1F975AF400F9CA18 /* SDWebImagePrefetcher.m */,
27F1176B1F975AF400F9CA18 /* UIButton+WebCache.h */,
27F1176C1F975AF400F9CA18 /* UIButton+WebCache.m */,
27F1176D1F975AF400F9CA18 /* UIImage+GIF.h */,
27F1176E1F975AF400F9CA18 /* UIImage+GIF.m */,
27F1176F1F975AF400F9CA18 /* UIImage+MultiFormat.h */,
27F117701F975AF400F9CA18 /* UIImage+MultiFormat.m */,
27F117711F975AF400F9CA18 /* UIImageView+HighlightedWebCache.h */,
27F117721F975AF400F9CA18 /* UIImageView+HighlightedWebCache.m */,
27F117731F975AF400F9CA18 /* UIImageView+WebCache.h */,
27F117741F975AF400F9CA18 /* UIImageView+WebCache.m */,
27F117751F975AF400F9CA18 /* UIView+WebCache.h */,
27F117761F975AF400F9CA18 /* UIView+WebCache.m */,
27F117771F975AF400F9CA18 /* UIView+WebCacheOperation.h */,
27F117781F975AF400F9CA18 /* UIView+WebCacheOperation.m */,
);
name = SDWebImage;
path = SDWebImage/SDWebImage;
sourceTree = "<group>";
};
27F117A31F98E8AD00F9CA18 /* PlanADScrollView */ = {
isa = PBXGroup;
children = (
27F117A61F98E8AD00F9CA18 /* PlanADScrollView.h */,
27F117A71F98E8AD00F9CA18 /* PlanADScrollView.m */,
27F117A41F98E8AD00F9CA18 /* PlanADCollectionViewCell.h */,
27F117A51F98E8AD00F9CA18 /* PlanADCollectionViewCell.m */,
27F117A81F98E8AD00F9CA18 /* PlanPageControl.h */,
27F117A91F98E8AD00F9CA18 /* PlanPageControl.m */,
);
path = PlanADScrollView;
sourceTree = "<group>";
};
/* End PBXGroup section */
/* Begin PBXNativeTarget section */
27F1170C1F974A4600F9CA18 /* PlanADScrollView */ = {
isa = PBXNativeTarget;
buildConfigurationList = 27F1173A1F974A4700F9CA18 /* Build configuration list for PBXNativeTarget "PlanADScrollView" */;
buildPhases = (
27F117091F974A4600F9CA18 /* Sources */,
27F1170A1F974A4600F9CA18 /* Frameworks */,
27F1170B1F974A4600F9CA18 /* Resources */,
);
buildRules = (
);
dependencies = (
);
name = PlanADScrollView;
productName = PlanADScrollView;
productReference = 27F1170D1F974A4600F9CA18 /* PlanADScrollView.app */;
productType = "com.apple.product-type.application";
};
27F117251F974A4700F9CA18 /* PlanADScrollViewTests */ = {
isa = PBXNativeTarget;
buildConfigurationList = 27F1173D1F974A4700F9CA18 /* Build configuration list for PBXNativeTarget "PlanADScrollViewTests" */;
buildPhases = (
27F117221F974A4700F9CA18 /* Sources */,
27F117231F974A4700F9CA18 /* Frameworks */,
27F117241F974A4700F9CA18 /* Resources */,
);
buildRules = (
);
dependencies = (
27F117281F974A4700F9CA18 /* PBXTargetDependency */,
);
name = PlanADScrollViewTests;
productName = PlanADScrollViewTests;
productReference = 27F117261F974A4700F9CA18 /* PlanADScrollViewTests.xctest */;
productType = "com.apple.product-type.bundle.unit-test";
};
27F117301F974A4700F9CA18 /* PlanADScrollViewUITests */ = {
isa = PBXNativeTarget;
buildConfigurationList = 27F117401F974A4700F9CA18 /* Build configuration list for PBXNativeTarget "PlanADScrollViewUITests" */;
buildPhases = (
27F1172D1F974A4700F9CA18 /* Sources */,
27F1172E1F974A4700F9CA18 /* Frameworks */,
27F1172F1F974A4700F9CA18 /* Resources */,
);
buildRules = (
);
dependencies = (
27F117331F974A4700F9CA18 /* PBXTargetDependency */,
);
name = PlanADScrollViewUITests;
productName = PlanADScrollViewUITests;
productReference = 27F117311F974A4700F9CA18 /* PlanADScrollViewUITests.xctest */;
productType = "com.apple.product-type.bundle.ui-testing";
};
/* End PBXNativeTarget section */
/* Begin PBXProject section */
27F117051F974A4600F9CA18 /* Project object */ = {
isa = PBXProject;
attributes = {
LastUpgradeCheck = 0830;
ORGANIZATIONNAME = Plan;
TargetAttributes = {
27F1170C1F974A4600F9CA18 = {
CreatedOnToolsVersion = 8.3.1;
DevelopmentTeam = HWQF636ATC;
ProvisioningStyle = Automatic;
};
27F117251F974A4700F9CA18 = {
CreatedOnToolsVersion = 8.3.1;
DevelopmentTeam = HWQF636ATC;
ProvisioningStyle = Automatic;
TestTargetID = 27F1170C1F974A4600F9CA18;
};
27F117301F974A4700F9CA18 = {
CreatedOnToolsVersion = 8.3.1;
DevelopmentTeam = HWQF636ATC;
ProvisioningStyle = Automatic;
TestTargetID = 27F1170C1F974A4600F9CA18;
};
};
};
buildConfigurationList = 27F117081F974A4600F9CA18 /* Build configuration list for PBXProject "PlanADScrollView" */;
compatibilityVersion = "Xcode 3.2";
developmentRegion = English;
hasScannedForEncodings = 0;
knownRegions = (
en,
Base,
);
mainGroup = 27F117041F974A4600F9CA18;
productRefGroup = 27F1170E1F974A4600F9CA18 /* Products */;
projectDirPath = "";
projectRoot = "";
targets = (
27F1170C1F974A4600F9CA18 /* PlanADScrollView */,
27F117251F974A4700F9CA18 /* PlanADScrollViewTests */,
27F117301F974A4700F9CA18 /* PlanADScrollViewUITests */,
);
};
/* End PBXProject section */
/* Begin PBXResourcesBuildPhase section */
27F1170B1F974A4600F9CA18 /* Resources */ = {
isa = PBXResourcesBuildPhase;
buildActionMask = 2147483647;
files = (
27F117201F974A4600F9CA18 /* LaunchScreen.storyboard in Resources */,
27F1171D1F974A4600F9CA18 /* Assets.xcassets in Resources */,
27F1171B1F974A4600F9CA18 /* Main.storyboard in Resources */,
);
runOnlyForDeploymentPostprocessing = 0;
};
27F117241F974A4700F9CA18 /* Resources */ = {
isa = PBXResourcesBuildPhase;
buildActionMask = 2147483647;
files = (
);
runOnlyForDeploymentPostprocessing = 0;
};
27F1172F1F974A4700F9CA18 /* Resources */ = {
isa = PBXResourcesBuildPhase;
buildActionMask = 2147483647;
files = (
);
runOnlyForDeploymentPostprocessing = 0;
};
/* End PBXResourcesBuildPhase section */
/* Begin PBXSourcesBuildPhase section */
27F117091F974A4600F9CA18 /* Sources */ = {
isa = PBXSourcesBuildPhase;
buildActionMask = 2147483647;
files = (
27F117891F975AF400F9CA18 /* UIImageView+WebCache.m in Sources */,
27F117801F975AF400F9CA18 /* SDWebImageDecoder.m in Sources */,
27F117831F975AF400F9CA18 /* SDWebImageManager.m in Sources */,
27F1177E1F975AF400F9CA18 /* SDImageCacheConfig.m in Sources */,
27F1178B1F975AF400F9CA18 /* UIView+WebCacheOperation.m in Sources */,
27F1177F1F975AF400F9CA18 /* SDWebImageCompat.m in Sources */,
27F117AA1F98E8AD00F9CA18 /* PlanADCollectionViewCell.m in Sources */,
27F117181F974A4600F9CA18 /* ViewController.m in Sources */,
27F1177B1F975AF400F9CA18 /* NSData+ImageContentType.m in Sources */,
27F117851F975AF400F9CA18 /* UIButton+WebCache.m in Sources */,
27F117841F975AF400F9CA18 /* SDWebImagePrefetcher.m in Sources */,
27F1178A1F975AF400F9CA18 /* UIView+WebCache.m in Sources */,
27F117821F975AF400F9CA18 /* SDWebImageDownloaderOperation.m in Sources */,
27F117881F975AF400F9CA18 /* UIImageView+HighlightedWebCache.m in Sources */,
27F117871F975AF400F9CA18 /* UIImage+MultiFormat.m in Sources */,
27F117AB1F98E8AD00F9CA18 /* PlanADScrollView.m in Sources */,
27F117151F974A4600F9CA18 /* AppDelegate.m in Sources */,
27F117AC1F98E8AD00F9CA18 /* PlanPageControl.m in Sources */,
27F117121F974A4600F9CA18 /* main.m in Sources */,
27F1177C1F975AF400F9CA18 /* NSImage+WebCache.m in Sources */,
27F117861F975AF400F9CA18 /* UIImage+GIF.m in Sources */,
27F117811F975AF400F9CA18 /* SDWebImageDownloader.m in Sources */,
27F1177D1F975AF400F9CA18 /* SDImageCache.m in Sources */,
);
runOnlyForDeploymentPostprocessing = 0;
};
27F117221F974A4700F9CA18 /* Sources */ = {
isa = PBXSourcesBuildPhase;
buildActionMask = 2147483647;
files = (
27F1172B1F974A4700F9CA18 /* PlanADScrollViewTests.m in Sources */,
);
runOnlyForDeploymentPostprocessing = 0;
};
27F1172D1F974A4700F9CA18 /* Sources */ = {
isa = PBXSourcesBuildPhase;
buildActionMask = 2147483647;
files = (
27F117361F974A4700F9CA18 /* PlanADScrollViewUITests.m in Sources */,
);
runOnlyForDeploymentPostprocessing = 0;
};
/* End PBXSourcesBuildPhase section */
/* Begin PBXTargetDependency section */
27F117281F974A4700F9CA18 /* PBXTargetDependency */ = {
isa = PBXTargetDependency;
target = 27F1170C1F974A4600F9CA18 /* PlanADScrollView */;
targetProxy = 27F117271F974A4700F9CA18 /* PBXContainerItemProxy */;
};
27F117331F974A4700F9CA18 /* PBXTargetDependency */ = {
isa = PBXTargetDependency;
target = 27F1170C1F974A4600F9CA18 /* PlanADScrollView */;
targetProxy = 27F117321F974A4700F9CA18 /* PBXContainerItemProxy */;
};
/* End PBXTargetDependency section */
/* Begin PBXVariantGroup section */
27F117191F974A4600F9CA18 /* Main.storyboard */ = {
isa = PBXVariantGroup;
children = (
27F1171A1F974A4600F9CA18 /* Base */,
);
name = Main.storyboard;
sourceTree = "<group>";
};
27F1171E1F974A4600F9CA18 /* LaunchScreen.storyboard */ = {
isa = PBXVariantGroup;
children = (
27F1171F1F974A4600F9CA18 /* Base */,
);
name = LaunchScreen.storyboard;
sourceTree = "<group>";
};
/* End PBXVariantGroup section */
/* Begin XCBuildConfiguration section */
27F117381F974A4700F9CA18 /* Debug */ = {
isa = XCBuildConfiguration;
buildSettings = {
ALWAYS_SEARCH_USER_PATHS = NO;
CLANG_ANALYZER_NONNULL = YES;
CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE;
CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x";
CLANG_CXX_LIBRARY = "libc++";
CLANG_ENABLE_MODULES = YES;
CLANG_ENABLE_OBJC_ARC = YES;
CLANG_WARN_BOOL_CONVERSION = YES;
CLANG_WARN_CONSTANT_CONVERSION = YES;
CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR;
CLANG_WARN_DOCUMENTATION_COMMENTS = YES;
CLANG_WARN_EMPTY_BODY = YES;
CLANG_WARN_ENUM_CONVERSION = YES;
CLANG_WARN_INFINITE_RECURSION = YES;
CLANG_WARN_INT_CONVERSION = YES;
CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR;
CLANG_WARN_SUSPICIOUS_MOVE = YES;
CLANG_WARN_UNREACHABLE_CODE = YES;
CLANG_WARN__DUPLICATE_METHOD_MATCH = YES;
"CODE_SIGN_IDENTITY[sdk=iphoneos*]" = "iPhone Developer";
COPY_PHASE_STRIP = NO;
DEBUG_INFORMATION_FORMAT = dwarf;
ENABLE_STRICT_OBJC_MSGSEND = YES;
ENABLE_TESTABILITY = YES;
GCC_C_LANGUAGE_STANDARD = gnu99;
GCC_DYNAMIC_NO_PIC = NO;
GCC_NO_COMMON_BLOCKS = YES;
GCC_OPTIMIZATION_LEVEL = 0;
GCC_PREPROCESSOR_DEFINITIONS = (
"DEBUG=1",
"$(inherited)",
);
GCC_WARN_64_TO_32_BIT_CONVERSION = YES;
GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR;
GCC_WARN_UNDECLARED_SELECTOR = YES;
GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE;
GCC_WARN_UNUSED_FUNCTION = YES;
GCC_WARN_UNUSED_VARIABLE = YES;
IPHONEOS_DEPLOYMENT_TARGET = 10.3;
MTL_ENABLE_DEBUG_INFO = YES;
ONLY_ACTIVE_ARCH = YES;
SDKROOT = iphoneos;
TARGETED_DEVICE_FAMILY = "1,2";
};
name = Debug;
};
27F117391F974A4700F9CA18 /* Release */ = {
isa = XCBuildConfiguration;
buildSettings = {
ALWAYS_SEARCH_USER_PATHS = NO;
CLANG_ANALYZER_NONNULL = YES;
CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE;
CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x";
CLANG_CXX_LIBRARY = "libc++";
CLANG_ENABLE_MODULES = YES;
CLANG_ENABLE_OBJC_ARC = YES;
CLANG_WARN_BOOL_CONVERSION = YES;
CLANG_WARN_CONSTANT_CONVERSION = YES;
CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR;
CLANG_WARN_DOCUMENTATION_COMMENTS = YES;
CLANG_WARN_EMPTY_BODY = YES;
CLANG_WARN_ENUM_CONVERSION = YES;
CLANG_WARN_INFINITE_RECURSION = YES;
CLANG_WARN_INT_CONVERSION = YES;
CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR;
CLANG_WARN_SUSPICIOUS_MOVE = YES;
CLANG_WARN_UNREACHABLE_CODE = YES;
CLANG_WARN__DUPLICATE_METHOD_MATCH = YES;
"CODE_SIGN_IDENTITY[sdk=iphoneos*]" = "iPhone Developer";
COPY_PHASE_STRIP = NO;
DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym";
ENABLE_NS_ASSERTIONS = NO;
ENABLE_STRICT_OBJC_MSGSEND = YES;
GCC_C_LANGUAGE_STANDARD = gnu99;
GCC_NO_COMMON_BLOCKS = YES;
GCC_WARN_64_TO_32_BIT_CONVERSION = YES;
GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR;
GCC_WARN_UNDECLARED_SELECTOR = YES;
GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE;
GCC_WARN_UNUSED_FUNCTION = YES;
GCC_WARN_UNUSED_VARIABLE = YES;
IPHONEOS_DEPLOYMENT_TARGET = 10.3;
MTL_ENABLE_DEBUG_INFO = NO;
SDKROOT = iphoneos;
TARGETED_DEVICE_FAMILY = "1,2";
VALIDATE_PRODUCT = YES;
};
name = Release;
};
27F1173B1F974A4700F9CA18 /* Debug */ = {
isa = XCBuildConfiguration;
buildSettings = {
ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon;
DEVELOPMENT_TEAM = HWQF636ATC;
INFOPLIST_FILE = PlanADScrollView/Info.plist;
IPHONEOS_DEPLOYMENT_TARGET = 8.0;
LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks";
PRODUCT_BUNDLE_IDENTIFIER = com.Plan.PlanADScrollView;
PRODUCT_NAME = "$(TARGET_NAME)";
};
name = Debug;
};
27F1173C1F974A4700F9CA18 /* Release */ = {
isa = XCBuildConfiguration;
buildSettings = {
ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon;
DEVELOPMENT_TEAM = HWQF636ATC;
INFOPLIST_FILE = PlanADScrollView/Info.plist;
IPHONEOS_DEPLOYMENT_TARGET = 8.0;
LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks";
PRODUCT_BUNDLE_IDENTIFIER = com.Plan.PlanADScrollView;
PRODUCT_NAME = "$(TARGET_NAME)";
};
name = Release;
};
27F1173E1F974A4700F9CA18 /* Debug */ = {
isa = XCBuildConfiguration;
buildSettings = {
BUNDLE_LOADER = "$(TEST_HOST)";
DEVELOPMENT_TEAM = HWQF636ATC;
INFOPLIST_FILE = PlanADScrollViewTests/Info.plist;
LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks @loader_path/Frameworks";
PRODUCT_BUNDLE_IDENTIFIER = com.Plan.PlanADScrollViewTests;
PRODUCT_NAME = "$(TARGET_NAME)";
TEST_HOST = "$(BUILT_PRODUCTS_DIR)/PlanADScrollView.app/PlanADScrollView";
};
name = Debug;
};
27F1173F1F974A4700F9CA18 /* Release */ = {
isa = XCBuildConfiguration;
buildSettings = {
BUNDLE_LOADER = "$(TEST_HOST)";
DEVELOPMENT_TEAM = HWQF636ATC;
INFOPLIST_FILE = PlanADScrollViewTests/Info.plist;
LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks @loader_path/Frameworks";
PRODUCT_BUNDLE_IDENTIFIER = com.Plan.PlanADScrollViewTests;
PRODUCT_NAME = "$(TARGET_NAME)";
TEST_HOST = "$(BUILT_PRODUCTS_DIR)/PlanADScrollView.app/PlanADScrollView";
};
name = Release;
};
27F117411F974A4700F9CA18 /* Debug */ = {
isa = XCBuildConfiguration;
buildSettings = {
DEVELOPMENT_TEAM = HWQF636ATC;
INFOPLIST_FILE = PlanADScrollViewUITests/Info.plist;
LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks @loader_path/Frameworks";
PRODUCT_BUNDLE_IDENTIFIER = com.Plan.PlanADScrollViewUITests;
PRODUCT_NAME = "$(TARGET_NAME)";
TEST_TARGET_NAME = PlanADScrollView;
};
name = Debug;
};
27F117421F974A4700F9CA18 /* Release */ = {
isa = XCBuildConfiguration;
buildSettings = {
DEVELOPMENT_TEAM = HWQF636ATC;
INFOPLIST_FILE = PlanADScrollViewUITests/Info.plist;
LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks @loader_path/Frameworks";
PRODUCT_BUNDLE_IDENTIFIER = com.Plan.PlanADScrollViewUITests;
PRODUCT_NAME = "$(TARGET_NAME)";
TEST_TARGET_NAME = PlanADScrollView;
};
name = Release;
};
/* End XCBuildConfiguration section */
/* Begin XCConfigurationList section */
27F117081F974A4600F9CA18 /* Build configuration list for PBXProject "PlanADScrollView" */ = {
isa = XCConfigurationList;
buildConfigurations = (
27F117381F974A4700F9CA18 /* Debug */,
27F117391F974A4700F9CA18 /* Release */,
);
defaultConfigurationIsVisible = 0;
defaultConfigurationName = Release;
};
27F1173A1F974A4700F9CA18 /* Build configuration list for PBXNativeTarget "PlanADScrollView" */ = {
isa = XCConfigurationList;
buildConfigurations = (
27F1173B1F974A4700F9CA18 /* Debug */,
27F1173C1F974A4700F9CA18 /* Release */,
);
defaultConfigurationIsVisible = 0;
defaultConfigurationName = Release;
};
27F1173D1F974A4700F9CA18 /* Build configuration list for PBXNativeTarget "PlanADScrollViewTests" */ = {
isa = XCConfigurationList;
buildConfigurations = (
27F1173E1F974A4700F9CA18 /* Debug */,
27F1173F1F974A4700F9CA18 /* Release */,
);
defaultConfigurationIsVisible = 0;
defaultConfigurationName = Release;
};
27F117401F974A4700F9CA18 /* Build configuration list for PBXNativeTarget "PlanADScrollViewUITests" */ = {
isa = XCConfigurationList;
buildConfigurations = (
27F117411F974A4700F9CA18 /* Debug */,
27F117421F974A4700F9CA18 /* Release */,
);
defaultConfigurationIsVisible = 0;
defaultConfigurationName = Release;
};
/* End XCConfigurationList section */
};
rootObject = 27F117051F974A4600F9CA18 /* Project object */;
}
|
2881099/csredis | 230,658 | src/CSRedisCore/CSRedisClient.cs | using CSRedis.Internal.ObjectPool;
using Newtonsoft.Json;
using System;
using System.Collections.Concurrent;
using System.Collections.Generic;
using System.Diagnostics;
using System.Globalization;
using System.IO;
using System.Linq;
using System.Text;
using System.Threading;
namespace CSRedis
{
public partial class CSRedisClient : IDisposable
{
/// <summary>
/// 按 key 规则分区存储
/// </summary>
public ConcurrentDictionary<string, RedisClientPool> Nodes { get; } = new ConcurrentDictionary<string, RedisClientPool>();
private int NodesIndexIncrement = -1;
public ConcurrentDictionary<int, string> NodesIndex { get; } = new ConcurrentDictionary<int, string>();
private ConcurrentDictionary<string, int> NodesKey { get; } = new ConcurrentDictionary<string, int>();
internal Func<string, string> NodeRuleRaw;
internal Func<string, string> NodeRuleExternal;
internal RedisSentinelManager SentinelManager;
internal string SentinelMasterName;
internal string SentinelMasterValue;
internal bool IsMultiNode => Nodes.Count > 1 && SentinelManager == null;
private object NodesLock = new object();
public ConcurrentDictionary<ushort, ushort> SlotCache = new ConcurrentDictionary<ushort, ushort>();
private Func<JsonSerializerSettings> JsonSerializerSettings = () =>
{
var st = new JsonSerializerSettings();
st.Converters.Add(new Newtonsoft.Json.Converters.StringEnumConverter());
st.DateFormatHandling = DateFormatHandling.IsoDateFormat;
st.DateTimeZoneHandling = DateTimeZoneHandling.RoundtripKind;
return st;
};
/// <summary>
/// 自定义序列化(全局默认)
/// </summary>
public static Func<object, string> Serialize;
/// <summary>
/// 自定义反序列化(全局默认)
/// </summary>
public static Func<string, Type, object> Deserialize;
/// <summary>
/// 自定义序列化
/// </summary>
public Func<object, string> CurrentSerialize;
/// <summary>
/// 自定义反序列化
/// </summary>
public Func<string, Type, object> CurrentDeserialize;
DateTime _dt1970 = new DateTime(1970, 1, 1);
Random _rnd = new Random();
#region 序列化写入,反序列化
internal string SerializeObject(object value)
{
if (CurrentSerialize != null) return CurrentSerialize(value);
if (Serialize != null) return Serialize(value);
return JsonConvert.SerializeObject(value, this.JsonSerializerSettings());
}
internal T DeserializeObject<T>(string value)
{
if (CurrentDeserialize != null) return (T)CurrentDeserialize(value, typeof(T));
if (Deserialize != null) return (T)Deserialize(value, typeof(T));
return JsonConvert.DeserializeObject<T>(value, this.JsonSerializerSettings());
}
internal object SerializeRedisValueInternal(object value)
{
if (value == null) return null;
var type = value.GetType();
var typename = type.ToString().TrimEnd(']');
if (typename == "System.Byte[" ||
typename == "System.String") return value;
if (type.IsValueType)
{
bool isNullable = typename.StartsWith("System.Nullable`1[");
var basename = isNullable ? typename.Substring(18) : typename;
switch (basename)
{
case "System.Boolean": return value.ToString() == "True" ? "1" : "0";
case "System.Byte": return value.ToString();
case "System.Char": return value.ToString()[0];
case "System.Decimal":
case "System.Double":
case "System.Single":
case "System.Int32":
case "System.Int64":
case "System.SByte":
case "System.Int16":
case "System.UInt32":
case "System.UInt64":
case "System.UInt16": return value.ToString();
case "System.DateTime": return ((DateTime)value).ToString("yyyy-MM-ddTHH:mm:sszzzz", System.Globalization.DateTimeFormatInfo.InvariantInfo);
case "System.DateTimeOffset": return value.ToString();
case "System.TimeSpan": return ((TimeSpan)value).Ticks;
case "System.Guid": return value.ToString();
}
}
return this.SerializeObject(value);
}
internal T DeserializeRedisValueInternal<T>(byte[] value)
{
if (value == null) return default(T);
var type = typeof(T);
var typename = type.ToString().TrimEnd(']');
if (typename == "System.Byte[") return (T)Convert.ChangeType(value, type);
if (typename == "System.String") return (T)Convert.ChangeType(Nodes.First().Value.Encoding.GetString(value), type);
if (typename == "System.Boolean[") return (T)Convert.ChangeType(value.Select(a => a == 49).ToArray(), type);
var valueStr = Nodes.First().Value.Encoding.GetString(value);
if (string.IsNullOrEmpty(valueStr)) return default(T);
if (type.IsValueType)
{
bool isNullable = typename.StartsWith("System.Nullable`1[");
var basename = isNullable ? typename.Substring(18) : typename;
bool isElse = false;
object obj = null;
switch (basename)
{
case "System.Boolean":
if (valueStr == "1") obj = true;
else if (valueStr == "0") obj = false;
break;
case "System.Byte":
if (byte.TryParse(valueStr, NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat, out var trybyte)) obj = trybyte;
break;
case "System.Char":
if (valueStr.Length > 0) obj = valueStr[0];
break;
case "System.Decimal":
if (Decimal.TryParse(valueStr, NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat, out var trydec)) obj = trydec;
break;
case "System.Double":
if (Double.TryParse(valueStr, NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat, out var trydb)) obj = trydb;
break;
case "System.Single":
if (Single.TryParse(valueStr, NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat, out var trysg)) obj = trysg;
break;
case "System.Int32":
if (Int32.TryParse(valueStr, NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat, out var tryint32)) obj = tryint32;
break;
case "System.Int64":
if (Int64.TryParse(valueStr, NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat, out var tryint64)) obj = tryint64;
break;
case "System.SByte":
if (SByte.TryParse(valueStr, NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat, out var trysb)) obj = trysb;
break;
case "System.Int16":
if (Int16.TryParse(valueStr, NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat, out var tryint16)) obj = tryint16;
break;
case "System.UInt32":
if (UInt32.TryParse(valueStr, NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat, out var tryuint32)) obj = tryuint32;
break;
case "System.UInt64":
if (UInt64.TryParse(valueStr, NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat, out var tryuint64)) obj = tryuint64;
break;
case "System.UInt16":
if (UInt16.TryParse(valueStr, NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat, out var tryuint16)) obj = tryuint16;
break;
case "System.DateTime":
if (DateTime.TryParse(valueStr, out var trydt)) obj = trydt;
break;
case "System.DateTimeOffset":
if (DateTimeOffset.TryParse(valueStr, out var trydtos)) obj = trydtos;
break;
case "System.TimeSpan":
if (Int64.TryParse(valueStr, NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat, out tryint64)) obj = new TimeSpan(tryint64);
break;
case "System.Guid":
if (Guid.TryParse(valueStr, out var tryguid)) obj = tryguid;
break;
default:
isElse = true;
break;
}
if (isElse == false)
{
if (obj == null) return default(T);
return (T)obj;
//return (T)Convert.ChangeType(obj, typeof(T));
}
}
return this.DeserializeObject<T>(valueStr);
}
internal T[] DeserializeRedisValueArrayInternal<T>(byte[][] value)
{
if (value == null) return null;
var list = new T[value.Length];
for (var a = 0; a < value.Length; a++) list[a] = this.DeserializeRedisValueInternal<T>(value[a]);
return list;
}
internal (T1, T2)[] DeserializeRedisValueTuple1Internal<T1, T2>(Tuple<byte[], T2>[] value)
{
if (value == null) return null;
var list = new (T1, T2)[value.Length];
for (var a = 0; a < value.Length; a++) list[a] = (this.DeserializeRedisValueInternal<T1>(value[a].Item1), value[a].Item2);
return list;
}
internal (T2, T1)[] DeserializeRedisValueTuple2Internal<T2, T1>(Tuple<T2, byte[]>[] value)
{
if (value == null) return null;
var list = new (T2, T1)[value.Length];
for (var a = 0; a < value.Length; a++) list[a] = (value[a].Item1, this.DeserializeRedisValueInternal<T1>(value[a].Item2));
return list;
}
internal Dictionary<TKey, TValue> DeserializeRedisValueDictionaryInternal<TKey, TValue>(Dictionary<TKey, byte[]> value)
{
if (value == null) return null;
var dic = new Dictionary<TKey, TValue>();
foreach (var kv in value) dic.Add(kv.Key, this.DeserializeRedisValueInternal<TValue>(kv.Value));
return dic;
}
#endregion
/// <summary>
/// 创建redis访问类(支持单机或集群)
/// </summary>
/// <param name="connectionString">127.0.0.1[:6379],password=123456,defaultDatabase=13,poolsize=50,ssl=false,writeBuffer=10240,prefix=key前辍</param>
public CSRedisClient(string connectionString) : this(null, new string[0], false, null, connectionString) { }
/// <summary>
/// 创建redis哨兵访问类(Redis Sentinel)
/// </summary>
/// <param name="connectionString">mymaster,password=123456,poolsize=50,connectTimeout=200,ssl=false</param>
/// <param name="sentinels">哨兵节点,如:ip1:26379、ip2:26379</param>
/// <param name="readOnly">false: 只获取master节点进行读写操作<para></para>true: 只获取可用slave节点进行只读操作</param>
public CSRedisClient(string connectionString, string[] sentinels, bool readOnly = false) : this(null, sentinels, readOnly, null, connectionString) { }
/// <summary>
/// 创建redis哨兵访问类(Redis Sentinel) <see cref="CSRedisClient"/>
/// </summary>
/// <param name="connectionString">mymaster,password=123456,poolsize=50,connectTimeout=200,ssl=false</param>
/// <param name="sentinels">哨兵节点,如:ip1:26379、ip2:26379</param>
/// <param name="readOnly">false: 只获取master节点进行读写操作<para></para>true: 只获取可用slave节点进行只读操作</param>
/// <param name="convert">哨兵主机转换规则</param>
public CSRedisClient(string connectionString, string[] sentinels, bool readOnly, SentinelMasterConverter convert) : this(null, sentinels, readOnly, convert, connectionString) { }
/// <summary>
/// 创建redis分区访问类,通过 KeyRule 对 key 进行分区,连接对应的 connectionString
/// </summary>
/// <param name="NodeRule">按key分区规则,返回值格式:127.0.0.1:6379/13,默认方案(null):取key哈希与节点数取模</param>
/// <param name="connectionStrings">127.0.0.1[:6379],password=123456,defaultDatabase=13,poolsize=50,ssl=false,writeBuffer=10240,prefix=key前辍</param>
public CSRedisClient(Func<string, string> NodeRule, params string[] connectionStrings) : this(NodeRule, null, false, null, connectionStrings) { }
protected CSRedisClient(Func<string, string> NodeRule, string[] sentinels, bool readOnly, SentinelMasterConverter convert = null, params string[] connectionStrings)
{
if (connectionStrings == null || !connectionStrings.Any()) throw new Exception("Redis ConnectionString 未设置");
var tmppoolPolicy = new RedisClientPoolPolicy();
tmppoolPolicy.ConnectionString = connectionStrings.First() + ",preheat=false";
if (sentinels?.Any() == true)
{
if (connectionStrings.Length > 1) throw new Exception("Redis Sentinel 不可设置多个 ConnectionString");
SentinelManager = new RedisSentinelManager(readOnly, sentinels);
SentinelManager.SentinelMasterConverter = convert;
SentinelManager.Connected += (s, e) =>
{
if (!string.IsNullOrEmpty(tmppoolPolicy._password))
{
try
{
SentinelManager.Call(c => c.Auth(tmppoolPolicy._password));
}
catch (Exception authEx)
{
if (authEx.Message != "ERR Client sent AUTH, but no password is set")
throw;
}
}
};
SentinelMasterName = connectionStrings.First().Split(',').FirstOrDefault() ?? "mymaster";
try
{
SentinelMasterValue = SentinelManager.Connect(SentinelMasterName, tmppoolPolicy._connectTimeout);
}
catch
{
//没有可用的master
}
}
RedisClientPool firstPool = null;
this.NodeRuleRaw = key =>
{
if (Nodes.Count <= 1) return NodesIndex[0];
var prefix = firstPool?.Prefix;
var slot = GetClusterSlot(string.Concat(prefix, key)); //redis-cluster 模式,选取第一个 connectionString prefix 前辍求 slot
if (SlotCache.TryGetValue(slot, out var slotIndex) && NodesIndex.TryGetValue(slotIndex, out var slotKey))
{
if (Nodes.TryGetValue(slotKey, out var b) && b.IsAvailable == false)
{
var availableNode = Nodes.Where(a => a.Value.IsAvailable).FirstOrDefault();
if (string.IsNullOrEmpty(availableNode.Key) == false) return availableNode.Key; //随便连向一个可用的节点
}
return slotKey; //按上一次 MOVED 记录查找节点
}
if (this.NodeRuleExternal == null)
{
if (string.IsNullOrEmpty(prefix) == false) slot = GetClusterSlot(key ?? string.Empty);
var idx = slot % NodesIndex.Count;
slotKey = idx < 0 || idx >= NodesIndex.Count ? NodesIndex[0] : NodesIndex[idx];
if (Nodes.TryGetValue(slotKey, out var b) && b.IsAvailable == false)
{
var availableNode = Nodes.Where(a => a.Value.IsAvailable).FirstOrDefault();
if (string.IsNullOrEmpty(availableNode.Key) == false) return availableNode.Key; //随便连向一个可用的节点
}
return slotKey;
}
return this.NodeRuleExternal(key);
};
this.NodeRuleExternal = NodeRule;
foreach (var connectionString in connectionStrings)
{
var connStr = connectionString;
if (SentinelManager != null)
{
var startIdx = connStr.IndexOf(',');
connStr = startIdx == -1 ? "" : connStr.Substring(startIdx);
if (string.IsNullOrEmpty(SentinelMasterValue))
connStr = $"255.255.255.255:19736{connStr},preheat=false"; //这是一个等待恢复的 pool
else
connStr = $"{SentinelMasterValue}{connStr}";
}
var pool = new RedisClientPool(connStr, client => { });
var nodeKey = SentinelMasterName ?? pool.Key;
if (Nodes.ContainsKey(nodeKey)) throw new Exception($"Node: {nodeKey} 重复,请检查");
if (this.TryAddNode(nodeKey, pool) == false)
{
pool.Dispose();
pool = null;
throw new Exception($"Node: {nodeKey} 无法添加");
}
if (firstPool == null) firstPool = pool;
}
this.NodesServerManager = new NodesServerManagerProvider(this);
if (firstPool._policy._testCluster)
{
//尝试求出其他节点,并缓存slot
try
{
byte[] cnret = null;
using (var obj = firstPool.Get())
{
cnret = obj.Value.Call("cluster nodes") as byte[];
}
if (cnret != null)
{
var cnodes = firstPool.Encoding.GetString(cnret).Split('\n');
foreach (var cnode in cnodes)
{
if (string.IsNullOrEmpty(cnode)) continue;
var dt = cnode.Trim().Split(' ');
if (dt.Length >= 9)
{
if (dt[2].StartsWith("master") || dt[2].EndsWith("master"))
{
if (dt[7] == "connected")
{
var endpoint = dt[1];
var at40 = endpoint.IndexOf('@');
if (at40 != -1) endpoint = endpoint.Remove(at40);
for (var slotIndex = 8; slotIndex < dt.Length; slotIndex++)
{
var slots = dt[slotIndex].Split('-');
if (ushort.TryParse(slots[0], out var tryslotStart) &&
ushort.TryParse(slots[1], out var tryslotEnd))
{
for (var slot = tryslotStart; slot <= tryslotEnd; slot++)
{
GetRedirectPool((true, false, slot, endpoint), firstPool);
}
}
}
}
}
}
}
}
}
catch { }
}
}
public void Dispose()
{
foreach (var pool in this.Nodes.Values) pool.Dispose();
SentinelManager?.Dispose();
}
bool BackgroundGetSentinelMasterValueIng = false;
object BackgroundGetSentinelMasterValueIngLock = new object();
bool BackgroundGetSentinelMasterValue()
{
if (SentinelManager == null) return false;
if (Nodes.Count > 1) return false;
var ing = false;
if (BackgroundGetSentinelMasterValueIng == false)
{
lock (BackgroundGetSentinelMasterValueIngLock)
{
if (BackgroundGetSentinelMasterValueIng == false)
{
BackgroundGetSentinelMasterValueIng = ing = true;
}
}
}
if (ing)
{
var pool = Nodes.First().Value;
new Thread(() =>
{
while (true)
{
Thread.CurrentThread.Join(1000);
try
{
SentinelMasterValue = SentinelManager.Connect(SentinelMasterName, pool._policy._connectTimeout);
pool._policy.SetHost(SentinelMasterValue);
if (pool.CheckAvailable())
{
var bgcolor = Console.BackgroundColor;
var forecolor = Console.ForegroundColor;
Console.BackgroundColor = ConsoleColor.DarkGreen;
Console.ForegroundColor = ConsoleColor.White;
Console.Write($"Redis Sentinel Pool 已切换至 {SentinelMasterValue}");
Console.BackgroundColor = bgcolor;
Console.ForegroundColor = forecolor;
Console.WriteLine();
BackgroundGetSentinelMasterValueIng = false;
return;
}
}
catch (Exception ex21)
{
Trace.WriteLine($"Redis Sentinel: {ex21.Message}");
}
}
}).Start();
}
return ing;
}
T GetAndExecute<T>(RedisClientPool pool, Func<Object<RedisClient>, T> handler, int jump = 100, int errtimes = 0)
{
Object<RedisClient> obj = null;
Exception ex = null;
var redirect = ParseClusterRedirect(null);
try
{
obj = pool.Get();
while (true)
{ //因网络出错重试,默认1次
try
{
var ret = handler(obj);
return ret;
}
catch (RedisException ex3)
{
redirect = ParseClusterRedirect(ex3); //官方集群跳转
if (redirect == null || jump <= 0)
{
ex = ex3;
if (SentinelManager != null && ex.Message.Contains("READONLY"))
{ //哨兵轮询
if (pool.SetUnavailable(ex, obj.LastGetTimeCopy) == true)
BackgroundGetSentinelMasterValue();
}
throw ex;
}
break;
}
catch (Exception ex2)
{
ex = ex2;
if (pool.UnavailableException != null) throw ex;
var isPong = false;
try
{
obj.Value.Ping();
isPong = true;
}
catch
{
obj.ResetValue();
}
if (isPong == false || ++errtimes > pool._policy._tryit)
{
if (SentinelManager != null)
{ //哨兵轮询
if (pool.SetUnavailable(ex, obj.LastGetTimeCopy) == true)
BackgroundGetSentinelMasterValue();
throw new Exception($"Redis Sentinel Master is switching:{ex.Message}");
}
throw ex; //重试次数完成
}
else
{
ex = null;
Trace.WriteLine($"csredis tryit ({errtimes}) ...");
}
}
}
}
finally
{
pool.Return(obj, ex);
}
if (redirect == null)
return GetAndExecute(pool, handler, jump - 1, errtimes);
var redirectHander = redirect.Value.isMoved ? handler : redirectObj =>
{
redirectObj.Value.Call("ASKING");
return handler(redirectObj);
};
return GetAndExecute<T>(GetRedirectPool(redirect.Value, pool), redirectHander, jump - 1);
}
bool TryAddNode(string nodeKey, RedisClientPool pool)
{
if (Nodes.TryAdd(nodeKey, pool))
{
var nodeIndex = Interlocked.Increment(ref NodesIndexIncrement);
if (NodesIndex.TryAdd(nodeIndex, nodeKey) && NodesKey.TryAdd(nodeKey, nodeIndex)) return true;
Nodes.TryRemove(nodeKey, out var rempool);
Interlocked.Decrement(ref NodesIndexIncrement);
}
return false;
}
RedisClientPool GetRedirectPool((bool isMoved, bool isAsk, ushort slot, string endpoint) redirect, RedisClientPool pool)
{
if (redirect.endpoint.StartsWith("127.0.0.1"))
redirect.endpoint = $"{pool._policy._ip}:{redirect.endpoint.Substring(10)}";
else if (redirect.endpoint.StartsWith("localhost", StringComparison.CurrentCultureIgnoreCase))
redirect.endpoint = $"{pool._policy._ip}:{redirect.endpoint.Substring(10)}";
var nodeKey = $"{redirect.endpoint}/{pool._policy._database}";
if (Nodes.TryGetValue(nodeKey, out var movedPool) == false)
{
lock (NodesLock)
{
if (Nodes.TryGetValue(nodeKey, out movedPool) == false)
{
var connectionString = pool._policy.BuildConnectionString(redirect.endpoint);
movedPool = new RedisClientPool(connectionString, client => { });
if (this.TryAddNode(nodeKey, movedPool) == false)
{
movedPool.Dispose();
movedPool = null;
}
}
}
if (movedPool == null)
throw new Exception($"{(redirect.isMoved ? "MOVED" : "ASK")} {redirect.slot} {redirect.endpoint}");
}
// moved 永久定向,ask 临时性一次定向
if (redirect.isMoved && NodesKey.TryGetValue(nodeKey, out var nodeIndex2))
{
SlotCache.AddOrUpdate(redirect.slot, (ushort)nodeIndex2, (oldkey, oldvalue) => (ushort)nodeIndex2);
}
return movedPool;
}
(bool isMoved, bool isAsk, ushort slot, string endpoint)? ParseClusterRedirect(Exception ex)
{
if (ex == null) return null;
bool isMoved = ex.Message.StartsWith("MOVED ");
bool isAsk = ex.Message.StartsWith("ASK ");
if (isMoved == false && isAsk == false) return null;
var parts = ex.Message.Split(new string[] { "\r\n" }, StringSplitOptions.None).FirstOrDefault().Split(new[] { ' ' }, 3);
if (parts.Length != 3 ||
ushort.TryParse(parts[1], out var slot) == false) return null;
return (isMoved, isAsk, slot, parts[2]);
}
T NodesNotSupport<T>(string[] keys, T defaultValue, Func<Object<RedisClient>, string[], T> callback)
{
if (keys == null || keys.Any() == false) return defaultValue;
var rules = Nodes.Count > 1 ? keys.Select(a => NodeRuleRaw(a)).Distinct() : new[] { Nodes.FirstOrDefault().Key };
if (rules.Count() > 1) throw new Exception("由于开启了分区模式,keys 分散在多个节点,无法使用此功能");
var pool = Nodes.TryGetValue(rules.First(), out var b) ? b : Nodes.First().Value;
string[] rkeys = new string[keys.Length];
for (int a = 0; a < keys.Length; a++) rkeys[a] = string.Concat(pool.Prefix, keys[a]);
if (rkeys.Length == 0) return defaultValue;
return GetAndExecute(pool, conn => callback(conn, rkeys));
}
T NodesNotSupport<T>(string key, Func<Object<RedisClient>, string, T> callback)
{
if (IsMultiNode) throw new Exception("由于开启了分区模式,无法使用此功能");
return ExecuteScalar<T>(key, callback);
}
RedisClientPool GetNodeOrThrowNotFound(string nodeKey)
{
if (Nodes.Count == 1) return Nodes.First().Value;
if (Nodes.ContainsKey(nodeKey) == false) throw new Exception($"找不到群集节点:{nodeKey}");
return Nodes[nodeKey];
}
#region 缓存壳
/// <summary>
/// 缓存壳
/// </summary>
/// <typeparam name="T">缓存类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="timeoutSeconds">缓存秒数</param>
/// <param name="getData">获取源数据的函数</param>
/// <returns></returns>
public T CacheShell<T>(string key, int timeoutSeconds, Func<T> getData)
{
if (timeoutSeconds == 0) return getData();
var cacheValue = Get(key);
if (cacheValue != null)
{
try
{
return this.DeserializeObject<T>(cacheValue);
}
catch
{
Del(key);
throw;
}
}
var ret = getData();
Set(key, this.SerializeObject(ret), timeoutSeconds);
return ret;
}
/// <summary>
/// 缓存壳(哈希表)
/// </summary>
/// <typeparam name="T">缓存类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="field">字段</param>
/// <param name="timeoutSeconds">缓存秒数</param>
/// <param name="getData">获取源数据的函数</param>
/// <returns></returns>
public T CacheShell<T>(string key, string field, int timeoutSeconds, Func<T> getData)
{
if (timeoutSeconds == 0) return getData();
var cacheValue = HGet(key, field);
if (cacheValue != null)
{
try
{
var value = this.DeserializeObject<(T, long)>(cacheValue);
if (DateTime.Now.Subtract(_dt1970.AddSeconds(value.Item2)).TotalSeconds <= timeoutSeconds) return value.Item1;
}
catch
{
HDel(key, field);
throw;
}
}
var ret = getData();
HSet(key, field, this.SerializeObject((ret, (long)DateTime.Now.Subtract(_dt1970).TotalSeconds)));
return ret;
}
/// <summary>
/// 缓存壳(哈希表),将 fields 每个元素存储到单独的缓存片,实现最大化复用
/// </summary>
/// <typeparam name="T">缓存类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="fields">字段</param>
/// <param name="timeoutSeconds">缓存秒数</param>
/// <param name="getData">获取源数据的函数,输入参数是没有缓存的 fields,返回值应该是 (field, value)[]</param>
/// <returns></returns>
public (string key, T value)[] CacheShell<T>(string key, string[] fields, int timeoutSeconds, Func<string[], (string, T)[]> getData)
{
fields = fields?.Distinct().ToArray();
if (fields == null || fields.Length == 0) return new (string, T)[0];
if (timeoutSeconds == 0) return getData(fields);
var ret = new (string, T)[fields.Length];
var cacheValue = HMGet(key, fields);
var fieldsMGet = new Dictionary<string, int>();
for (var a = 0; a < ret.Length; a++)
{
if (cacheValue[a] != null)
{
try
{
var value = this.DeserializeObject<(T, long)>(cacheValue[a]);
if (DateTime.Now.Subtract(_dt1970.AddSeconds(value.Item2)).TotalSeconds <= timeoutSeconds)
{
ret[a] = (fields[a], value.Item1);
continue;
}
}
catch
{
HDel(key, fields[a]);
throw;
}
}
fieldsMGet.Add(fields[a], a);
}
if (fieldsMGet.Any())
{
var getDataIntput = fieldsMGet.Keys.ToArray();
var data = getData(getDataIntput);
var mset = new object[fieldsMGet.Count * 2];
var msetIndex = 0;
foreach (var d in data)
{
if (fieldsMGet.ContainsKey(d.Item1) == false) throw new Exception($"使用 CacheShell 请确认 getData 返回值 (string, T)[] 中的 Item1 值: {d.Item1} 存在于 输入参数: {string.Join(",", getDataIntput)}");
ret[fieldsMGet[d.Item1]] = d;
mset[msetIndex++] = d.Item1;
mset[msetIndex++] = this.SerializeObject((d.Item2, (long)DateTime.Now.Subtract(_dt1970).TotalSeconds));
fieldsMGet.Remove(d.Item1);
}
foreach (var fieldNull in fieldsMGet.Keys)
{
ret[fieldsMGet[fieldNull]] = (fieldNull, default(T));
mset[msetIndex++] = fieldNull;
mset[msetIndex++] = this.SerializeObject((default(T), (long)DateTime.Now.Subtract(_dt1970).TotalSeconds));
}
if (mset.Any()) HMSet(key, mset);
}
return ret;
}
#endregion
#region 分区方式 Execute
internal T ExecuteScalar<T>(string key, Func<Object<RedisClient>, string, T> hander)
{
if (key == null) return default(T);
var pool = NodeRuleRaw == null || Nodes.Count == 1 ? Nodes.First().Value : (Nodes.TryGetValue(NodeRuleRaw(key), out var b) ? b : Nodes.First().Value);
key = string.Concat(pool.Prefix, key);
return GetAndExecute(pool, conn => hander(conn, key));
}
internal T[] ExecuteArray<T>(string[] key, Func<Object<RedisClient>, string[], T[]> hander)
{
if (key == null || key.Any() == false) return new T[0];
if (NodeRuleRaw == null || Nodes.Count == 1)
{
var pool = Nodes.First().Value;
var keys = key.Select(a => string.Concat(pool.Prefix, a)).ToArray();
return GetAndExecute(pool, conn => hander(conn, keys));
}
var rules = new Dictionary<string, List<(string, int)>>();
for (var a = 0; a < key.Length; a++)
{
var rule = NodeRuleRaw(key[a]);
if (rules.ContainsKey(rule)) rules[rule].Add((key[a], a));
else rules.Add(rule, new List<(string, int)> { (key[a], a) });
}
T[] ret = new T[key.Length];
foreach (var r in rules)
{
var pool = Nodes.TryGetValue(r.Key, out var b) ? b : Nodes.First().Value;
var keys = r.Value.Select(a => string.Concat(pool.Prefix, a.Item1)).ToArray();
GetAndExecute(pool, conn =>
{
var vals = hander(conn, keys);
for (var z = 0; z < r.Value.Count; z++)
{
ret[r.Value[z].Item2] = vals == null || z >= vals.Length ? default(T) : vals[z];
}
return 0;
});
}
return ret;
}
internal long ExecuteNonQuery(string[] key, Func<Object<RedisClient>, string[], long> hander)
{
if (key == null || key.Any() == false) return 0;
if (NodeRuleRaw == null || Nodes.Count == 1)
{
var pool = Nodes.First().Value;
var keys = key.Select(a => string.Concat(pool.Prefix, a)).ToArray();
return GetAndExecute(pool, conn => hander(conn, keys));
}
var rules = new Dictionary<string, List<string>>();
for (var a = 0; a < key.Length; a++)
{
var rule = NodeRuleRaw(key[a]);
if (rules.ContainsKey(rule)) rules[rule].Add(key[a]);
else rules.Add(rule, new List<string> { key[a] });
}
long affrows = 0;
foreach (var r in rules)
{
var pool = Nodes.TryGetValue(r.Key, out var b) ? b : Nodes.First().Value;
var keys = r.Value.Select(a => string.Concat(pool.Prefix, a)).ToArray();
affrows += GetAndExecute(pool, conn => hander(conn, keys));
}
return affrows;
}
#region crc16
private static readonly ushort[] crc16tab = {
0x0000,0x1021,0x2042,0x3063,0x4084,0x50a5,0x60c6,0x70e7,
0x8108,0x9129,0xa14a,0xb16b,0xc18c,0xd1ad,0xe1ce,0xf1ef,
0x1231,0x0210,0x3273,0x2252,0x52b5,0x4294,0x72f7,0x62d6,
0x9339,0x8318,0xb37b,0xa35a,0xd3bd,0xc39c,0xf3ff,0xe3de,
0x2462,0x3443,0x0420,0x1401,0x64e6,0x74c7,0x44a4,0x5485,
0xa56a,0xb54b,0x8528,0x9509,0xe5ee,0xf5cf,0xc5ac,0xd58d,
0x3653,0x2672,0x1611,0x0630,0x76d7,0x66f6,0x5695,0x46b4,
0xb75b,0xa77a,0x9719,0x8738,0xf7df,0xe7fe,0xd79d,0xc7bc,
0x48c4,0x58e5,0x6886,0x78a7,0x0840,0x1861,0x2802,0x3823,
0xc9cc,0xd9ed,0xe98e,0xf9af,0x8948,0x9969,0xa90a,0xb92b,
0x5af5,0x4ad4,0x7ab7,0x6a96,0x1a71,0x0a50,0x3a33,0x2a12,
0xdbfd,0xcbdc,0xfbbf,0xeb9e,0x9b79,0x8b58,0xbb3b,0xab1a,
0x6ca6,0x7c87,0x4ce4,0x5cc5,0x2c22,0x3c03,0x0c60,0x1c41,
0xedae,0xfd8f,0xcdec,0xddcd,0xad2a,0xbd0b,0x8d68,0x9d49,
0x7e97,0x6eb6,0x5ed5,0x4ef4,0x3e13,0x2e32,0x1e51,0x0e70,
0xff9f,0xefbe,0xdfdd,0xcffc,0xbf1b,0xaf3a,0x9f59,0x8f78,
0x9188,0x81a9,0xb1ca,0xa1eb,0xd10c,0xc12d,0xf14e,0xe16f,
0x1080,0x00a1,0x30c2,0x20e3,0x5004,0x4025,0x7046,0x6067,
0x83b9,0x9398,0xa3fb,0xb3da,0xc33d,0xd31c,0xe37f,0xf35e,
0x02b1,0x1290,0x22f3,0x32d2,0x4235,0x5214,0x6277,0x7256,
0xb5ea,0xa5cb,0x95a8,0x8589,0xf56e,0xe54f,0xd52c,0xc50d,
0x34e2,0x24c3,0x14a0,0x0481,0x7466,0x6447,0x5424,0x4405,
0xa7db,0xb7fa,0x8799,0x97b8,0xe75f,0xf77e,0xc71d,0xd73c,
0x26d3,0x36f2,0x0691,0x16b0,0x6657,0x7676,0x4615,0x5634,
0xd94c,0xc96d,0xf90e,0xe92f,0x99c8,0x89e9,0xb98a,0xa9ab,
0x5844,0x4865,0x7806,0x6827,0x18c0,0x08e1,0x3882,0x28a3,
0xcb7d,0xdb5c,0xeb3f,0xfb1e,0x8bf9,0x9bd8,0xabbb,0xbb9a,
0x4a75,0x5a54,0x6a37,0x7a16,0x0af1,0x1ad0,0x2ab3,0x3a92,
0xfd2e,0xed0f,0xdd6c,0xcd4d,0xbdaa,0xad8b,0x9de8,0x8dc9,
0x7c26,0x6c07,0x5c64,0x4c45,0x3ca2,0x2c83,0x1ce0,0x0cc1,
0xef1f,0xff3e,0xcf5d,0xdf7c,0xaf9b,0xbfba,0x8fd9,0x9ff8,
0x6e17,0x7e36,0x4e55,0x5e74,0x2e93,0x3eb2,0x0ed1,0x1ef0
};
public static ushort GetClusterSlot(string key)
{
//HASH_SLOT = CRC16(key) mod 16384
var blob = Encoding.ASCII.GetBytes(key);
int offset = 0, count = blob.Length, start = -1, end = -1;
byte lt = (byte)'{', rt = (byte)'}';
for (int a = 0; a < count - 1; a++)
if (blob[a] == lt)
{
start = a;
break;
}
if (start >= 0)
{
for (int a = start + 1; a < count; a++)
if (blob[a] == rt)
{
end = a;
break;
}
}
if (start >= 0
&& end >= 0
&& --end != start)
{
offset = start + 1;
count = end - start;
}
uint crc = 0;
for (int i = 0; i < count; i++)
crc = ((crc << 8) ^ crc16tab[((crc >> 8) ^ blob[offset++]) & 0x00FF]) & 0x0000FFFF;
return (ushort)(crc % 16384);
}
#endregion
#endregion
/// <summary>
/// 创建管道传输,注意:官方集群时请务必预热slotCache,否则会产生moved错误
/// </summary>
/// <param name="handler"></param>
/// <returns></returns>
public object[] StartPipe(Action<CSRedisClientPipe<string>> handler)
{
if (handler == null) return new object[0];
var pipe = new CSRedisClientPipe<string>(this);
handler(pipe);
return pipe.EndPipe();
}
/// <summary>
/// 创建管道传输,注意:官方集群时请务必预热slotCache,否则会产生moved错误,打包提交如:RedisHelper.StartPipe().Set("a", "1").HSet("b", "f", "2").EndPipe();
/// </summary>
/// <returns></returns>
public CSRedisClientPipe<string> StartPipe()
{
return new CSRedisClientPipe<string>(this);
}
#region 服务器命令
/// <summary>
/// 在所有分区节点上,执行服务器命令
/// </summary>
public NodesServerManagerProvider NodesServerManager { get; set; }
public partial class NodesServerManagerProvider
{
private CSRedisClient _csredis;
public NodesServerManagerProvider(CSRedisClient csredis)
{
_csredis = csredis;
}
/// <summary>
/// 异步执行一个 AOF(AppendOnly File) 文件重写操作
/// </summary>
/// <returns></returns>
public (string node, string value)[] BgRewriteAof() => _csredis.Nodes.Values.Select(a => _csredis.GetAndExecute(a, c => (a.Key, c.Value.BgRewriteAof()))).ToArray();
/// <summary>
/// 在后台异步保存当前数据库的数据到磁盘
/// </summary>
/// <returns></returns>
public (string node, string value)[] BgSave() => _csredis.Nodes.Values.Select(a => _csredis.GetAndExecute(a, c => (a.Key, c.Value.BgSave()))).ToArray();
/// <summary>
/// 关闭客户端连接
/// </summary>
/// <param name="ip">ip</param>
/// <param name="port">端口</param>
/// <returns></returns>
public (string node, string value)[] ClientKill(string ip, int port) => _csredis.Nodes.Values.Select(a => _csredis.GetAndExecute(a, c => (a.Key, c.Value.ClientKill(ip, port)))).ToArray();
/// <summary>
/// 关闭客户端连接
/// </summary>
/// <param name="addr">ip:port</param>
/// <param name="id">客户唯一标识</param>
/// <param name="type">类型:normal | slave | pubsub</param>
/// <param name="skipMe">跳过自己</param>
/// <returns></returns>
public (string node, long value)[] ClientKill(string addr = null, string id = null, ClientKillType? type = null, bool? skipMe = null) => _csredis.Nodes.Values.Select(a => _csredis.GetAndExecute(a, c => (a.Key, c.Value.ClientKill(addr, id, type?.ToString(), skipMe)))).ToArray();
/// <summary>
/// 获取连接到服务器的客户端连接列表
/// </summary>
/// <returns></returns>
public (string node, string value)[] ClientList() => _csredis.Nodes.Values.Select(a => _csredis.GetAndExecute(a, c => (a.Key, c.Value.ClientList()))).ToArray();
/// <summary>
/// 获取连接的名称
/// </summary>
/// <returns></returns>
public (string node, string value)[] ClientGetName() => _csredis.Nodes.Values.Select(a => _csredis.GetAndExecute(a, c => (a.Key, c.Value.ClientGetName()))).ToArray();
/// <summary>
/// 在指定时间内终止运行来自客户端的命令
/// </summary>
/// <param name="timeout">阻塞时间</param>
/// <returns></returns>
public (string node, string value)[] ClientPause(TimeSpan timeout) => _csredis.Nodes.Values.Select(a => _csredis.GetAndExecute(a, c => (a.Key, c.Value.ClientPause(timeout)))).ToArray();
/// <summary>
/// 设置当前连接的名称
/// </summary>
/// <param name="connectionName">连接名称</param>
/// <returns></returns>
public (string node, string value)[] ClientSetName(string connectionName) => _csredis.Nodes.Values.Select(a => _csredis.GetAndExecute(a, c => (a.Key, c.Value.ClientSetName(connectionName)))).ToArray();
/// <summary>
/// 返回当前服务器时间
/// </summary>
/// <returns></returns>
public (string node, DateTime value)[] Time() => _csredis.Nodes.Values.Select(a => _csredis.GetAndExecute(a, c => (a.Key, c.Value.Time()))).ToArray();
/// <summary>
/// 获取指定配置参数的值
/// </summary>
/// <param name="parameter">参数</param>
/// <returns></returns>
public (string node, Dictionary<string, string> value)[] ConfigGet(string parameter) => _csredis.Nodes.Values.Select(a => _csredis.GetAndExecute(a, c => (a.Key, c.Value.ConfigGet(parameter).ToDictionary(z => z.Item1, y => y.Item2)))).ToArray();
/// <summary>
/// 对启动 Redis 服务器时所指定的 redis.conf 配置文件进行改写
/// </summary>
/// <returns></returns>
public (string node, string value)[] ConfigRewrite() => _csredis.Nodes.Values.Select(a => _csredis.GetAndExecute(a, c => (a.Key, c.Value.ConfigRewrite()))).ToArray();
/// <summary>
/// 修改 redis 配置参数,无需重启
/// </summary>
/// <param name="parameter">参数</param>
/// <param name="value">值</param>
/// <returns></returns>
public (string node, string value)[] ConfigSet(string parameter, string value) => _csredis.Nodes.Values.Select(a => _csredis.GetAndExecute(a, c => (a.Key, c.Value.ConfigSet(parameter, value)))).ToArray();
/// <summary>
/// 重置 INFO 命令中的某些统计数据
/// </summary>
/// <returns></returns>
public (string node, string value)[] ConfigResetStat() => _csredis.Nodes.Values.Select(a => _csredis.GetAndExecute(a, c => (a.Key, c.Value.ConfigResetStat()))).ToArray();
/// <summary>
/// 返回当前数据库的 key 的数量
/// </summary>
/// <returns></returns>
public (string node, long value)[] DbSize() => _csredis.Nodes.Values.Select(a => _csredis.GetAndExecute(a, c => (a.Key, c.Value.DbSize()))).ToArray();
/// <summary>
/// 让 Redis 服务崩溃
/// </summary>
/// <returns></returns>
public (string node, string value)[] DebugSegFault() => _csredis.Nodes.Values.Select(a => _csredis.GetAndExecute(a, c => (a.Key, c.Value.DebugSegFault()))).ToArray();
/// <summary>
/// 删除所有数据库的所有key
/// </summary>
/// <returns></returns>
public (string node, string value)[] FlushAll() => _csredis.Nodes.Values.Select(a => _csredis.GetAndExecute(a, c => (a.Key, c.Value.FlushAll()))).ToArray();
/// <summary>
/// 删除当前数据库的所有key
/// </summary>
/// <returns></returns>
public (string node, string value)[] FlushDb() => _csredis.Nodes.Values.Select(a => _csredis.GetAndExecute(a, c => (a.Key, c.Value.FlushDb()))).ToArray();
/// <summary>
/// 获取 Redis 服务器的各种信息和统计数值
/// </summary>
/// <param name="section">部分(all|default|server|clients|memory|persistence|stats|replication|cpu|commandstats|cluster|keyspace)</param>
/// <returns></returns>
public (string node, string value)[] Info(InfoSection? section = null) => _csredis.Nodes.Values.Select(a => _csredis.GetAndExecute(a, c => (a.Key, c.Value.Info(section?.ToString())))).ToArray();
/// <summary>
/// 返回最近一次 Redis 成功将数据保存到磁盘上的时间
/// </summary>
/// <returns></returns>
public (string node, DateTime value)[] LastSave() => _csredis.Nodes.Values.Select(a => _csredis.GetAndExecute(a, c => (a.Key, c.Value.LastSave()))).ToArray();
///// <summary>
///// 实时打印出 Redis 服务器接收到的命令,调试用
///// </summary>
///// <param name="onReceived">接收命令</param>
///// <returns></returns>
//public (string node, string value)[] Monitor(Action<object, object> onReceived) => _csredis.Nodes.Values.Select(a => _csredis.GetAndExecute(a, c => {
// c.Value.MonitorReceived += (s, o) => onReceived?.Invoke(s, o.Message);
// return (a.Key, c.Value.Monitor());
//})).ToArray();
/// <summary>
/// 返回主从实例所属的角色
/// </summary>
/// <returns></returns>
public (string node, RedisRole value)[] Role() => _csredis.Nodes.Values.Select(a => _csredis.GetAndExecute(a, c => (a.Key, c.Value.Role()))).ToArray();
/// <summary>
/// 同步保存数据到硬盘
/// </summary>
/// <returns></returns>
public (string node, string value)[] Save() => _csredis.Nodes.Values.Select(a => _csredis.GetAndExecute(a, c => (a.Key, c.Value.Save()))).ToArray();
/// <summary>
/// 异步保存数据到硬盘,并关闭服务器
/// </summary>
/// <param name="isSave">是否保存</param>
/// <returns></returns>
public (string node, string value)[] Shutdown(bool isSave = true) => _csredis.Nodes.Values.Select(a => _csredis.GetAndExecute(a, c => (a.Key, c.Value.Shutdown(isSave)))).ToArray();
/// <summary>
/// 将服务器转变为指定服务器的从属服务器(slave server),如果当前服务器已经是某个主服务器(master server)的从属服务器,那么执行 SLAVEOF host port 将使当前服务器停止对旧主服务器的同步,丢弃旧数据集,转而开始对新主服务器进行同步。
/// </summary>
/// <param name="host">主机</param>
/// <param name="port">端口</param>
/// <returns></returns>
public (string node, string value)[] SlaveOf(string host, int port) => _csredis.Nodes.Values.Select(a => _csredis.GetAndExecute(a, c => (a.Key, c.Value.SlaveOf(host, port)))).ToArray();
/// <summary>
/// 从属服务器执行命令 SLAVEOF NO ONE 将使得这个从属服务器关闭复制功能,并从从属服务器转变回主服务器,原来同步所得的数据集不会被丢弃。
/// </summary>
/// <returns></returns>
public (string node, string value)[] SlaveOfNoOne() => _csredis.Nodes.Values.Select(a => _csredis.GetAndExecute(a, c => (a.Key, c.Value.SlaveOfNoOne()))).ToArray();
/// <summary>
/// 管理 redis 的慢日志,按数量获取
/// </summary>
/// <param name="count">数量</param>
/// <returns></returns>
public (string node, RedisSlowLogEntry[] value)[] SlowLogGet(long? count = null) => _csredis.Nodes.Values.Select(a => _csredis.GetAndExecute(a, c => (a.Key, c.Value.SlowLogGet(count)))).ToArray();
/// <summary>
/// 管理 redis 的慢日志,总数量
/// </summary>
/// <returns></returns>
public (string node, long value)[] SlowLogLen() => _csredis.Nodes.Values.Select(a => _csredis.GetAndExecute(a, c => (a.Key, c.Value.SlowLogLen()))).ToArray();
/// <summary>
/// 管理 redis 的慢日志,清空
/// </summary>
/// <returns></returns>
public (string node, string value)[] SlowLogReset() => _csredis.Nodes.Values.Select(a => _csredis.GetAndExecute(a, c => (a.Key, c.Value.SlowLogReset()))).ToArray();
/// <summary>
/// 用于复制功能(replication)的内部命令
/// </summary>
/// <returns></returns>
public (string node, byte[] value)[] Sync() => _csredis.Nodes.Values.Select(a => _csredis.GetAndExecute(a, c => (a.Key, c.Value.Sync()))).ToArray();
}
/// <summary>
/// 在指定分区节点上,执行服务器命令
/// </summary>
/// <param name="node">节点</param>
/// <returns></returns>
public NodeServerManagerProvider NodeServerManager(string node) => new NodeServerManagerProvider(this, GetNodeOrThrowNotFound(node));
public partial class NodeServerManagerProvider
{
private CSRedisClient _csredis;
private RedisClientPool _pool;
public NodeServerManagerProvider(CSRedisClient csredis, RedisClientPool pool)
{
_csredis = csredis;
_pool = pool;
}
/// <summary>
/// 异步执行一个 AOF(AppendOnly File) 文件重写操作
/// </summary>
/// <returns></returns>
public string BgRewriteAof() => _csredis.GetAndExecute(_pool, c => c.Value.BgRewriteAof());
/// <summary>
/// 在后台异步保存当前数据库的数据到磁盘
/// </summary>
/// <returns></returns>
public string BgSave() => _csredis.GetAndExecute(_pool, c => c.Value.BgSave());
/// <summary>
/// 关闭客户端连接
/// </summary>
/// <param name="ip">ip</param>
/// <param name="port">端口</param>
/// <returns></returns>
public string ClientKill(string ip, int port) => _csredis.GetAndExecute(_pool, c => c.Value.ClientKill(ip, port));
/// <summary>
/// 关闭客户端连接
/// </summary>
/// <param name="addr">ip:port</param>
/// <param name="id">客户唯一标识</param>
/// <param name="type">类型:normal | slave | pubsub</param>
/// <param name="skipMe">跳过自己</param>
/// <returns></returns>
public long ClientKill(string addr = null, string id = null, ClientKillType? type = null, bool? skipMe = null) => _csredis.GetAndExecute(_pool, c => c.Value.ClientKill(addr, id, type?.ToString(), skipMe));
public enum ClientKillType { normal, slave, pubsub }
/// <summary>
/// 获取连接到服务器的客户端连接列表
/// </summary>
/// <returns></returns>
public string ClientList() => _csredis.GetAndExecute(_pool, c => c.Value.ClientList());
/// <summary>
/// 获取连接的名称
/// </summary>
/// <returns></returns>
public string ClientGetName() => _csredis.GetAndExecute(_pool, c => c.Value.ClientGetName());
/// <summary>
/// 在指定时间内终止运行来自客户端的命令
/// </summary>
/// <param name="timeout">阻塞时间</param>
/// <returns></returns>
public string ClientPause(TimeSpan timeout) => _csredis.GetAndExecute(_pool, c => c.Value.ClientPause(timeout));
/// <summary>
/// 设置当前连接的名称
/// </summary>
/// <param name="connectionName">连接名称</param>
/// <returns></returns>
public string ClientSetName(string connectionName) => _csredis.GetAndExecute(_pool, c => c.Value.ClientSetName(connectionName));
/// <summary>
/// 返回当前服务器时间
/// </summary>
/// <returns></returns>
public DateTime Time() => _csredis.GetAndExecute(_pool, c => c.Value.Time());
/// <summary>
/// 获取指定配置参数的值
/// </summary>
/// <param name="parameter">参数</param>
/// <returns></returns>
public Dictionary<string, string> ConfigGet(string parameter) => _csredis.GetAndExecute(_pool, c => c.Value.ConfigGet(parameter)).ToDictionary(z => z.Item1, y => y.Item2);
/// <summary>
/// 对启动 Redis 服务器时所指定的 redis.conf 配置文件进行改写
/// </summary>
/// <returns></returns>
public string ConfigRewrite() => _csredis.GetAndExecute(_pool, c => c.Value.ConfigRewrite());
/// <summary>
/// 修改 redis 配置参数,无需重启
/// </summary>
/// <param name="parameter">参数</param>
/// <param name="value">值</param>
/// <returns></returns>
public string ConfigSet(string parameter, string value) => _csredis.GetAndExecute(_pool, c => c.Value.ConfigSet(parameter, value));
/// <summary>
/// 重置 INFO 命令中的某些统计数据
/// </summary>
/// <returns></returns>
public string ConfigResetStat() => _csredis.GetAndExecute(_pool, c => c.Value.ConfigResetStat());
/// <summary>
/// 返回当前数据库的 key 的数量
/// </summary>
/// <returns></returns>
public long DbSize() => _csredis.GetAndExecute(_pool, c => c.Value.DbSize());
/// <summary>
/// 让 Redis 服务崩溃
/// </summary>
/// <returns></returns>
public string DebugSegFault() => _csredis.GetAndExecute(_pool, c => c.Value.DebugSegFault());
/// <summary>
/// 删除所有数据库的所有key
/// </summary>
/// <returns></returns>
public string FlushAll() => _csredis.GetAndExecute(_pool, c => c.Value.FlushAll());
/// <summary>
/// 删除当前数据库的所有key
/// </summary>
/// <returns></returns>
public string FlushDb() => _csredis.GetAndExecute(_pool, c => c.Value.FlushDb());
/// <summary>
/// 获取 Redis 服务器的各种信息和统计数值
/// </summary>
/// <param name="section">部分(Server | Clients | Memory | Persistence | Stats | Replication | CPU | Keyspace)</param>
/// <returns></returns>
public string Info(InfoSection? section = null) => _csredis.GetAndExecute(_pool, c => c.Value.Info(section?.ToString()));
/// <summary>
/// 返回最近一次 Redis 成功将数据保存到磁盘上的时间
/// </summary>
/// <returns></returns>
public DateTime LastSave() => _csredis.GetAndExecute(_pool, c => c.Value.LastSave());
///// <summary>
///// 实时打印出 Redis 服务器接收到的命令,调试用
///// </summary>
///// <param name="onReceived">接收命令</param>
///// <returns></returns>
//public string Monitor(Action<object, object> onReceived) => _csredis.GetAndExecute(_pool, c => {
// c.Value.MonitorReceived += (s, o) => onReceived?.Invoke(s, o.Message);
// return c.Value.Monitor();
//});
/// <summary>
/// 返回主从实例所属的角色
/// </summary>
/// <returns></returns>
public RedisRole Role() => _csredis.GetAndExecute(_pool, c => c.Value.Role());
/// <summary>
/// 同步保存数据到硬盘
/// </summary>
/// <returns></returns>
public string Save() => _csredis.GetAndExecute(_pool, c => c.Value.Save());
/// <summary>
/// 异步保存数据到硬盘,并关闭服务器
/// </summary>
/// <param name="isSave">是否保存</param>
/// <returns></returns>
public string Shutdown(bool isSave = true) => _csredis.GetAndExecute(_pool, c => c.Value.Shutdown(isSave));
/// <summary>
/// 将服务器转变为指定服务器的从属服务器(slave server),如果当前服务器已经是某个主服务器(master server)的从属服务器,那么执行 SLAVEOF host port 将使当前服务器停止对旧主服务器的同步,丢弃旧数据集,转而开始对新主服务器进行同步。
/// </summary>
/// <param name="host">主机</param>
/// <param name="port">端口</param>
/// <returns></returns>
public string SlaveOf(string host, int port) => _csredis.GetAndExecute(_pool, c => c.Value.SlaveOf(host, port));
/// <summary>
/// 从属服务器执行命令 SLAVEOF NO ONE 将使得这个从属服务器关闭复制功能,并从从属服务器转变回主服务器,原来同步所得的数据集不会被丢弃。
/// </summary>
/// <returns></returns>
public string SlaveOfNoOne() => _csredis.GetAndExecute(_pool, c => c.Value.SlaveOfNoOne());
/// <summary>
/// 管理 redis 的慢日志,按数量获取
/// </summary>
/// <param name="count">数量</param>
/// <returns></returns>
public RedisSlowLogEntry[] SlowLogGet(long? count = null) => _csredis.GetAndExecute(_pool, c => c.Value.SlowLogGet(count));
/// <summary>
/// 管理 redis 的慢日志,总数量
/// </summary>
/// <returns></returns>
public long SlowLogLen() => _csredis.GetAndExecute(_pool, c => c.Value.SlowLogLen());
/// <summary>
/// 管理 redis 的慢日志,清空
/// </summary>
/// <returns></returns>
public string SlowLogReset() => _csredis.GetAndExecute(_pool, c => c.Value.SlowLogReset());
/// <summary>
/// 用于复制功能(replication)的内部命令
/// </summary>
/// <returns></returns>
public byte[] Sync() => _csredis.GetAndExecute(_pool, c => c.Value.Sync());
}
#endregion
#region 连接命令
/// <summary>
/// 验证密码是否正确
/// </summary>
/// <param name="nodeKey">分区key</param>
/// <param name="password">密码</param>
/// <returns></returns>
[Obsolete("不建议手工执行,连接池自己管理最佳")]
private bool Auth(string nodeKey, string password) => GetAndExecute(GetNodeOrThrowNotFound(nodeKey), c => c.Value.Auth(password)) == "OK";
/// <summary>
/// 打印字符串
/// </summary>
/// <param name="nodeKey">分区key</param>
/// <param name="message">消息</param>
/// <returns></returns>
public string Echo(string nodeKey, string message) => GetAndExecute(GetNodeOrThrowNotFound(nodeKey), c => c.Value.Echo(message));
/// <summary>
/// 打印字符串
/// </summary>
/// <param name="message">消息</param>
/// <returns></returns>
public string Echo(string message) => GetAndExecute(Nodes.First().Value, c => c.Value.Echo(message));
/// <summary>
/// 查看服务是否运行
/// </summary>
/// <param name="nodeKey">分区key</param>
/// <returns></returns>
public bool Ping(string nodeKey) => GetAndExecute(GetNodeOrThrowNotFound(nodeKey), c => c.Value.Ping()) == "PONG";
/// <summary>
/// 查看服务是否运行
/// </summary>
/// <returns></returns>
public bool Ping() => GetAndExecute(Nodes.First().Value, c => c.Value.Ping()) == "PONG";
/// <summary>
/// 关闭当前连接
/// </summary>
/// <param name="nodeKey">分区key</param>
/// <returns></returns>
[Obsolete("不建议手工执行,连接池自己管理最佳")]
private bool Quit(string nodeKey) => GetAndExecute(GetNodeOrThrowNotFound(nodeKey), c => c.Value.Quit()) == "OK";
/// <summary>
/// 切换到指定的数据库
/// </summary>
/// <param name="nodeKey">分区key</param>
/// <param name="index">数据库</param>
/// <returns></returns>
[Obsolete("不建议手工执行,连接池所有连接应该指向同一数据库,若手工修改将导致数据的不一致")]
private bool Select(string nodeKey, int index) => GetAndExecute(GetNodeOrThrowNotFound(nodeKey), c => c.Value.Select(index)) == "OK";
#endregion
#region Script
/// <summary>
/// 执行脚本
/// </summary>
/// <param name="script">Lua 脚本</param>
/// <param name="key">用于定位分区节点,不含prefix前辍</param>
/// <param name="args">参数</param>
/// <returns></returns>
public object Eval(string script, string key, params object[] args)
{
var args2 = args?.Select(z => this.SerializeRedisValueInternal(z)).ToArray();
return ExecuteScalar(key, (c, k) => c.Value.Eval(script, new[] { k }, args2));
}
/// <summary>
/// 执行脚本
/// </summary>
/// <param name="sha1">脚本缓存的sha1</param>
/// <param name="key">用于定位分区节点,不含prefix前辍</param>
/// <param name="args">参数</param>
/// <returns></returns>
public object EvalSHA(string sha1, string key, params object[] args)
{
var args2 = args?.Select(z => this.SerializeRedisValueInternal(z)).ToArray();
return ExecuteScalar(key, (c, k) => c.Value.EvalSHA(sha1, new[] { k }, args2));
}
/// <summary>
/// 校验所有分区节点中,脚本是否已经缓存。任何分区节点未缓存sha1,都返回false。
/// </summary>
/// <param name="sha1">脚本缓存的sha1</param>
/// <returns></returns>
public bool[] ScriptExists(params string[] sha1) => Nodes.Select(a => GetAndExecute(a.Value, c => c.Value.ScriptExists(sha1))?.Where(z => z == false).Any() == false).ToArray();
/// <summary>
/// 清除所有分区节点中,所有 Lua 脚本缓存
/// </summary>
public void ScriptFlush() => Nodes.Select(a => GetAndExecute(a.Value, c => c.Value.ScriptFlush()));
/// <summary>
/// 杀死所有分区节点中,当前正在运行的 Lua 脚本
/// </summary>
public void ScriptKill() => Nodes.Select(a => GetAndExecute(a.Value, c => c.Value.ScriptKill()));
/// <summary>
/// 在所有分区节点中,缓存脚本后返回 sha1(同样的脚本在任何服务器,缓存后的 sha1 都是相同的)
/// </summary>
/// <param name="script">Lua 脚本</param>
/// <returns></returns>
public string ScriptLoad(string script) => Nodes.Select(a => GetAndExecute(a.Value, c => (c.Pool.Policy.Name.ToString(), c.Value.ScriptLoad(script)))).First().Item2;
#endregion
#region Pub/Sub
/// <summary>
/// 用于将信息发送到指定分区节点的频道,最终消息发布格式:1|message
/// </summary>
/// <param name="channel">频道名</param>
/// <param name="message">消息文本</param>
/// <returns></returns>
public long Publish(string channel, string message)
{
var msgid = HIncrBy("csredisclient:Publish:msgid", channel, 1);
return ExecuteScalar(channel, (c, k) => c.Value.Publish(channel, $"{msgid}|{message}"));
}
/// <summary>
/// 用于将信息发送到指定分区节点的频道,与 Publish 方法不同,不返回消息id头,即 1|
/// </summary>
/// <param name="channel">频道名</param>
/// <param name="message">消息文本</param>
/// <returns></returns>
public long PublishNoneMessageId(string channel, string message) => ExecuteScalar(channel, (c, k) => c.Value.Publish(channel, message));
/// <summary>
/// 查看所有订阅频道
/// </summary>
/// <param name="pattern"></param>
/// <returns></returns>
public string[] PubSubChannels(string pattern)
{
var ret = new List<string>();
Nodes.Values.ToList().ForEach(a => ret.AddRange(GetAndExecute(a, c => c.Value.PubSubChannels(pattern))));
return ret.ToArray();
}
/// <summary>
/// 查看所有模糊订阅端的数量<para></para>
/// 注意:分区模式下,其他客户端的订阅可能不会返回
/// </summary>
/// <returns></returns>
public long PubSubNumPat() => GetAndExecute(Nodes.First().Value, c => c.Value.PubSubNumPat());
/// <summary>
/// 查看所有订阅端的数量<para></para>
/// 注意:分区模式下,其他客户端的订阅可能不会返回
/// </summary>
/// <param name="channels">频道</param>
/// <returns></returns>
public Dictionary<string, long> PubSubNumSub(params string[] channels) => ExecuteArray(channels, (c, k) =>
{
var prefix = (c.Pool as RedisClientPool).Prefix;
return c.Value.PubSubNumSub(k.Select(z => string.IsNullOrEmpty(prefix) == false && z.StartsWith(prefix) ? z.Substring(prefix.Length) : z).ToArray());
}).ToDictionary(z => z.Item1, y => y.Item2);
/// <summary>
/// 订阅,根据分区规则返回SubscribeObject,Subscribe(("chan1", msg => Console.WriteLine(msg.Body)), ("chan2", msg => Console.WriteLine(msg.Body)))
/// </summary>
/// <param name="channels">频道和接收器</param>
/// <returns>返回可停止订阅的对象</returns>
public SubscribeObject Subscribe(params (string, Action<SubscribeMessageEventArgs>)[] channels)
{
var chans = channels.Select(a => a.Item1).Distinct().ToArray();
var onmessages = channels.ToDictionary(a => a.Item1, b => b.Item2);
var rules = new Dictionary<string, List<string>>();
for (var a = 0; a < chans.Length; a++)
{
var rule = NodeRuleRaw(chans[a]);
if (rules.ContainsKey(rule)) rules[rule].Add(chans[a]);
else rules.Add(rule, new List<string> { chans[a] });
}
List<(string[] keys, Object<RedisClient> conn)> subscrs = new List<(string[] keys, Object<RedisClient> conn)>();
foreach (var r in rules)
{
var pool = Nodes.TryGetValue(r.Key, out var p) ? p : Nodes.First().Value;
subscrs.Add((r.Value.ToArray(), pool.Get()));
}
var so = new SubscribeObject(this, chans, subscrs.ToArray(), onmessages);
return so;
}
public class SubscribeObject : IDisposable
{
internal CSRedisClient Redis;
public string[] Channels { get; }
public (string[] chans, Object<RedisClient> conn)[] Subscrs { get; }
internal Dictionary<string, Action<SubscribeMessageEventArgs>> OnMessageDic;
public bool IsUnsubscribed { get; private set; } = true;
internal SubscribeObject(CSRedisClient redis, string[] channels, (string[] chans, Object<RedisClient> conn)[] subscrs, Dictionary<string, Action<SubscribeMessageEventArgs>> onMessageDic)
{
this.Redis = redis;
this.Channels = channels;
this.Subscrs = subscrs;
this.OnMessageDic = onMessageDic;
this.IsUnsubscribed = false;
AppDomain.CurrentDomain.ProcessExit += (s1, e1) =>
{
this.Dispose();
};
try
{
Console.CancelKeyPress += (s1, e1) =>
{
if (e1.Cancel) return;
this.Dispose();
};
}
catch { }
foreach (var subscr in this.Subscrs)
{
new Thread(Subscribe).Start(subscr);
}
}
private void Subscribe(object state)
{
var subscr = ((string[] chans, Object<RedisClient> conn))state;
var pool = subscr.conn.Pool as RedisClientPool;
var testCSRedis_Subscribe_Keepalive = "0\r\n";// $"CSRedis_Subscribe_Keepalive{Guid.NewGuid().ToString()}";
var testKeepalived = true;
EventHandler<RedisSubscriptionReceivedEventArgs> SubscriptionReceived = (a, b) =>
{
try
{
if (b.Message.Type == "message" && this.OnMessageDic != null && this.OnMessageDic.TryGetValue(b.Message.Channel, out var action) == true)
{
var msgidIdx = b.Message.Body.IndexOf('|');
if (msgidIdx != -1 && long.TryParse(b.Message.Body.Substring(0, msgidIdx), out var trylong))
action(new SubscribeMessageEventArgs
{
MessageId = trylong,
Body = b.Message.Body.Substring(msgidIdx + 1),
Channel = b.Message.Channel
});
else if (b.Message.Body != testCSRedis_Subscribe_Keepalive)
action(new SubscribeMessageEventArgs
{
MessageId = 0,
Body = b.Message.Body,
Channel = b.Message.Channel
});
else
{
testKeepalived = true;
}
}
}
catch (Exception ex)
{
var bgcolor = Console.BackgroundColor;
var forecolor = Console.ForegroundColor;
Console.BackgroundColor = ConsoleColor.DarkRed;
Console.ForegroundColor = ConsoleColor.White;
Console.Write($"订阅方法执行出错【{pool.Key}】(channels:{string.Join(",", Channels)})/(chans:{string.Join(",", subscr.chans)}):{ex.Message}\r\n{ex.StackTrace}");
Console.BackgroundColor = bgcolor;
Console.ForegroundColor = forecolor;
Console.WriteLine();
}
};
subscr.conn.Value.SubscriptionReceived += SubscriptionReceived;
bool isSubscribeing = false;
bool isKeepliveReSubscribe = false;
Timer keeplive = new Timer(state2 =>
{
if (isSubscribeing == false) return;
try
{
foreach (var chan in subscr.chans)
{
testKeepalived = false;
Redis.PublishNoneMessageId(chan, testCSRedis_Subscribe_Keepalive);
for (var a = 0; a < 50; a++)
{
if (isSubscribeing == false) return;
Thread.CurrentThread.Join(100);
if (testKeepalived) break;
}
if (testKeepalived == false)
{
isKeepliveReSubscribe = true;
//订阅掉线,重新订阅
try { subscr.conn.Value.Unsubscribe(); } catch { }
try { subscr.conn.Value.Quit(); } catch { }
try { subscr.conn.Value.Socket?.Shutdown(System.Net.Sockets.SocketShutdown.Both); } catch { }
break;
}
}
}
catch
{
}
}, null, 60000, 60000);
while (IsUnsubscribed == false)
{
try
{
subscr.conn.Value.Ping();
var bgcolor = Console.BackgroundColor;
var forecolor = Console.ForegroundColor;
Console.BackgroundColor = ConsoleColor.DarkGreen;
Console.ForegroundColor = ConsoleColor.White;
Console.Write($"正在订阅【{pool.Key}】(channels:{string.Join(",", Channels)})/(chans:{string.Join(",", subscr.chans)})");
Console.BackgroundColor = bgcolor;
Console.ForegroundColor = forecolor;
Console.WriteLine();
isSubscribeing = true;
isKeepliveReSubscribe = false;
//SetSocketOption KeepAlive 经测试无效,仍然侍丢失频道
//subscr.conn.Value.Socket?.SetSocketOption(System.Net.Sockets.SocketOptionLevel.Socket, System.Net.Sockets.SocketOptionName.KeepAlive, 60000);
subscr.conn.Value.ReceiveTimeout = 0;
subscr.conn.Value.Subscribe(subscr.chans);
if (IsUnsubscribed == false)
{
if (isKeepliveReSubscribe == true)
throw new Exception("每60秒检查发现订阅频道丢失");
//服务器断开连接 IsConnected == false https://github.com/2881099/csredis/issues/37
if (subscr.conn.Value.IsConnected == false)
throw new Exception("redis-server 连接已断开");
}
}
catch (Exception ex)
{
if (IsUnsubscribed) break;
var bgcolor = Console.BackgroundColor;
var forecolor = Console.ForegroundColor;
Console.BackgroundColor = ConsoleColor.DarkYellow;
Console.ForegroundColor = ConsoleColor.White;
Console.Write($"订阅出错【{pool.Key}】(channels:{string.Join(",", Channels)})/(chans:{string.Join(",", subscr.chans)}):{ex.Message},3秒后重连。。。");
Console.BackgroundColor = bgcolor;
Console.ForegroundColor = forecolor;
Console.WriteLine();
Thread.CurrentThread.Join(1000 * 3);
subscr.conn.ResetValue();
subscr.conn.Value.SubscriptionReceived += SubscriptionReceived;
}
}
subscr.conn.Value.SubscriptionReceived -= SubscriptionReceived;
isSubscribeing = false;
isKeepliveReSubscribe = false;
try { keeplive.Dispose(); } catch { }
}
public void Unsubscribe()
{
this.Dispose();
}
public void Dispose()
{
this.IsUnsubscribed = true;
if (this.Subscrs != null)
{
foreach (var subscr in this.Subscrs)
{
//try { subscr.conn.Value.Unsubscribe(); } catch { }
try { subscr.conn.Value.ReceiveTimeout = (subscr.conn.Pool as RedisClientPool)._policy._syncTimeout; } catch { }
subscr.conn.Pool.Return(subscr.conn, true);
}
}
}
}
public class SubscribeMessageEventArgs
{
/// <summary>
/// 频道的消息id
/// </summary>
public long MessageId { get; set; }
/// <summary>
/// 频道
/// </summary>
public string Channel { get; set; }
/// <summary>
/// 接收到的内容
/// </summary>
public string Body { get; set; }
}
/// <summary>
/// 模糊订阅,订阅所有分区节点(同条消息只处理一次),返回SubscribeObject,PSubscribe(new [] { "chan1*", "chan2*" }, msg => Console.WriteLine(msg.Body))
/// </summary>
/// <param name="channelPatterns">模糊频道</param>
/// <param name="pmessage">接收器</param>
/// <returns>返回可停止模糊订阅的对象</returns>
public PSubscribeObject PSubscribe(string[] channelPatterns, Action<PSubscribePMessageEventArgs> pmessage)
{
var chans = channelPatterns.Distinct().ToArray();
List<Object<RedisClient>> redisConnections = new List<Object<RedisClient>>();
foreach (var pool in Nodes)
redisConnections.Add(pool.Value.Get());
var so = new PSubscribeObject(this, chans, redisConnections.ToArray(), pmessage);
return so;
}
public class PSubscribeObject : IDisposable
{
internal CSRedisClient Redis;
public string[] Channels { get; }
internal Action<PSubscribePMessageEventArgs> OnPMessage;
public Object<RedisClient>[] RedisConnections { get; }
public bool IsPUnsubscribed { get; private set; } = true;
internal PSubscribeObject(CSRedisClient redis, string[] channels, Object<RedisClient>[] redisConnections, Action<PSubscribePMessageEventArgs> onPMessage)
{
this.Redis = redis;
this.Channels = channels;
this.RedisConnections = redisConnections;
this.OnPMessage = onPMessage;
this.IsPUnsubscribed = false;
AppDomain.CurrentDomain.ProcessExit += (s1, e1) =>
{
this.Dispose();
};
try
{
Console.CancelKeyPress += (s1, e1) =>
{
if (e1.Cancel) return;
this.Dispose();
};
}
catch { }
foreach (var conn in this.RedisConnections)
{
new Thread(PSubscribe).Start(conn);
}
}
private void PSubscribe(object state)
{
var conn = (Object<RedisClient>)state;
var pool = conn.Pool as RedisClientPool;
var psubscribeKey = string.Join("pSpLiT", Channels);
EventHandler<RedisSubscriptionReceivedEventArgs> SubscriptionReceived = (a, b) =>
{
try
{
if (b.Message.Type == "pmessage" && this.OnPMessage != null)
{
var msgidIdx = b.Message.Body.IndexOf('|');
if (msgidIdx != -1 && long.TryParse(b.Message.Body.Substring(0, msgidIdx), out var trylong))
{
var readed = Redis.Eval($@"
ARGV[1] = redis.call('HGET', KEYS[1], '{b.Message.Channel}')
if ARGV[1] ~= ARGV[2] then
redis.call('HSET', KEYS[1], '{b.Message.Channel}', ARGV[2])
return 1
end
return 0", $"CSRedisPSubscribe{psubscribeKey}", "", trylong.ToString());
if (readed?.ToString() == "1")
this.OnPMessage(new PSubscribePMessageEventArgs
{
Body = b.Message.Body.Substring(msgidIdx + 1),
Channel = b.Message.Channel,
MessageId = trylong,
Pattern = b.Message.Pattern
});
//else
// Console.WriteLine($"消息被处理过:id:{trylong} channel:{b.Message.Channel} pattern:{b.Message.Pattern} body:{b.Message.Body.Substring(msgidIdx + 1)}");
}
else
this.OnPMessage(new PSubscribePMessageEventArgs
{
Body = b.Message.Body,
Channel = b.Message.Channel,
MessageId = 0,
Pattern = b.Message.Pattern
});
}
}
catch (Exception ex)
{
var bgcolor = Console.BackgroundColor;
var forecolor = Console.ForegroundColor;
Console.BackgroundColor = ConsoleColor.DarkRed;
Console.ForegroundColor = ConsoleColor.White;
Console.Write($"模糊订阅出错【{pool.Key}】(channels:{string.Join(",", Channels)}):{ex.Message}\r\n{ex.StackTrace}");
Console.BackgroundColor = bgcolor;
Console.ForegroundColor = forecolor;
Console.WriteLine();
}
};
conn.Value.SubscriptionReceived += SubscriptionReceived;
while (true)
{
try
{
conn.Value.Ping();
var bgcolor = Console.BackgroundColor;
var forecolor = Console.ForegroundColor;
Console.BackgroundColor = ConsoleColor.DarkGreen;
Console.ForegroundColor = ConsoleColor.White;
Console.Write($"正在模糊订阅【{pool.Key}】(channels:{string.Join(",", Channels)})");
Console.BackgroundColor = bgcolor;
Console.ForegroundColor = forecolor;
Console.WriteLine();
//conn.Value.Socket?.SetSocketOption(System.Net.Sockets.SocketOptionLevel.Socket, System.Net.Sockets.SocketOptionName.KeepAlive, 60000);
conn.Value.ReceiveTimeout = 0;
conn.Value.PSubscribe(this.Channels);
if (IsPUnsubscribed == false)
{
//服务器断开连接 IsConnected == false https://github.com/2881099/csredis/issues/37
if (conn.Value.IsConnected == false)
throw new Exception("redis-server 连接已断开");
}
}
catch (Exception ex)
{
if (IsPUnsubscribed) break;
var bgcolor = Console.BackgroundColor;
var forecolor = Console.ForegroundColor;
Console.BackgroundColor = ConsoleColor.DarkYellow;
Console.ForegroundColor = ConsoleColor.White;
Console.Write($"模糊订阅出错【{pool.Key}】(channels:{string.Join(",", Channels)}):{ex.Message},3秒后重连。。。");
Console.BackgroundColor = bgcolor;
Console.ForegroundColor = forecolor;
Console.WriteLine();
Thread.CurrentThread.Join(1000 * 3);
conn.ResetValue();
conn.Value.SubscriptionReceived += SubscriptionReceived;
}
}
}
public void PUnsubscribe()
{
this.Dispose();
}
public void Dispose()
{
this.IsPUnsubscribed = true;
if (this.RedisConnections != null)
{
foreach (var conn in this.RedisConnections)
{
//try { conn.Value.PUnsubscribe(); } catch { }
try { conn.Value.ReceiveTimeout = (conn.Pool as RedisClientPool)._policy._syncTimeout; } catch { }
conn.Pool.Return(conn, true);
}
}
}
}
public class PSubscribePMessageEventArgs : SubscribeMessageEventArgs
{
/// <summary>
/// 匹配模式
/// </summary>
public string Pattern { get; set; }
}
#endregion
#region 使用列表实现订阅发布 lpush + blpop
/// <summary>
/// 使用lpush + blpop订阅端(多端非争抢模式),都可以收到消息
/// </summary>
/// <param name="listKey">list key(不含prefix前辍)</param>
/// <param name="clientId">订阅端标识,若重复则争抢,若唯一必然收到消息</param>
/// <param name="onMessage">接收消息委托</param>
/// <returns></returns>
public SubscribeListBroadcastObject SubscribeListBroadcast(string listKey, string clientId, Action<string> onMessage)
{
this.HSetNx($"{listKey}_SubscribeListBroadcast", clientId, 1);
var subobj = new SubscribeListBroadcastObject
{
OnDispose = () =>
{
this.HDel($"{listKey}_SubscribeListBroadcast", clientId);
}
};
//订阅其他端转发的消息
subobj.SubscribeLists.Add(this.SubscribeList($"{listKey}_{clientId}", onMessage));
//订阅主消息,接收消息后分发
subobj.SubscribeLists.Add(this.SubscribeList(new[] { listKey }, (key, msg) =>
{
try
{
this.HSetNx($"{listKey}_SubscribeListBroadcast", clientId, 1);
if (msg == null) return;
var clients = this.HKeys($"{listKey}_SubscribeListBroadcast");
var pipe = this.StartPipe();
foreach (var c in clients)
if (string.Compare(clientId, c, true) != 0) //过滤本端分发
pipe.LPush($"{listKey}_{c}", msg);
pipe.EndPipe();
onMessage?.Invoke(msg);
}
catch (ObjectDisposedException)
{
}
catch (Exception ex)
{
var bgcolor = Console.BackgroundColor;
var forecolor = Console.ForegroundColor;
Console.BackgroundColor = ConsoleColor.DarkRed;
Console.ForegroundColor = ConsoleColor.White;
Console.Write($"列表订阅出错(listKey:{listKey}):{ex.Message}");
Console.BackgroundColor = bgcolor;
Console.ForegroundColor = forecolor;
Console.WriteLine();
}
}, true));
AppDomain.CurrentDomain.ProcessExit += (s1, e1) =>
{
subobj.Dispose();
};
try
{
Console.CancelKeyPress += (s1, e1) =>
{
if (e1.Cancel) return;
subobj.Dispose();
};
}
catch { }
return subobj;
}
public class SubscribeListBroadcastObject : IDisposable
{
internal Action OnDispose;
internal List<SubscribeListObject> SubscribeLists = new List<SubscribeListObject>();
public void Dispose()
{
try { OnDispose?.Invoke(); } catch (ObjectDisposedException) { }
foreach (var sub in SubscribeLists) sub.Dispose();
}
}
/// <summary>
/// 使用lpush + blpop订阅端(多端争抢模式),只有一端收到消息
/// </summary>
/// <param name="listKey">list key(不含prefix前辍)</param>
/// <param name="onMessage">接收消息委托</param>
/// <returns></returns>
public SubscribeListObject SubscribeList(string listKey, Action<string> onMessage) => SubscribeList(new[] { listKey }, (k, v) => onMessage(v), false);
/// <summary>
/// 使用lpush + blpop订阅端(多端争抢模式),只有一端收到消息
/// </summary>
/// <param name="listKeys">支持多个 key(不含prefix前辍)</param>
/// <param name="onMessage">接收消息委托,参数1:key;参数2:消息体</param>
/// <returns></returns>
public SubscribeListObject SubscribeList(string[] listKeys, Action<string, string> onMessage) => SubscribeList(listKeys, onMessage, false);
private SubscribeListObject SubscribeList(string[] listKeys, Action<string, string> onMessage, bool ignoreEmpty)
{
if (listKeys == null || listKeys.Any() == false) throw new ArgumentException("参数 listKey 不可为空");
var listKeysStr = string.Join(", ", listKeys);
var isMultiKey = listKeys.Length > 1;
var subobj = new SubscribeListObject();
var bgcolor = Console.BackgroundColor;
var forecolor = Console.ForegroundColor;
Console.BackgroundColor = ConsoleColor.DarkGreen;
Console.ForegroundColor = ConsoleColor.White;
Console.Write($"正在订阅列表(listKey:{listKeysStr})");
Console.BackgroundColor = bgcolor;
Console.ForegroundColor = forecolor;
Console.WriteLine();
new Thread(() =>
{
while (subobj.IsUnsubscribed == false)
{
try
{
if (isMultiKey)
{
var msg = this.BLPopWithKey(5, listKeys);
if (msg != null)
if (!ignoreEmpty || (ignoreEmpty && !string.IsNullOrEmpty(msg.Value.value)))
onMessage?.Invoke(msg.Value.key, msg.Value.value);
}
else
{
var msg = this.BLPop(5, listKeys);
if (!ignoreEmpty || (ignoreEmpty && !string.IsNullOrEmpty(msg)))
onMessage?.Invoke(listKeys[0], msg);
}
}
catch (ObjectDisposedException)
{
}
catch (Exception ex)
{
bgcolor = Console.BackgroundColor;
forecolor = Console.ForegroundColor;
Console.BackgroundColor = ConsoleColor.DarkRed;
Console.ForegroundColor = ConsoleColor.White;
Console.Write($"列表订阅出错(listKey:{listKeysStr}):{ex.Message}");
Console.BackgroundColor = bgcolor;
Console.ForegroundColor = forecolor;
Console.WriteLine();
Thread.CurrentThread.Join(3000);
}
}
}).Start();
AppDomain.CurrentDomain.ProcessExit += (s1, e1) =>
{
subobj.Dispose();
};
try
{
Console.CancelKeyPress += (s1, e1) =>
{
if (e1.Cancel) return;
subobj.Dispose();
};
}
catch { }
return subobj;
}
public class SubscribeListObject : IDisposable
{
internal List<SubscribeListObject> OtherSubs = new List<SubscribeListObject>();
public bool IsUnsubscribed { get; set; }
public void Dispose()
{
this.IsUnsubscribed = true;
foreach (var sub in OtherSubs) sub.Dispose();
}
}
#endregion
#region HyperLogLog
/// <summary>
/// 添加指定元素到 HyperLogLog
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="elements">元素</param>
/// <returns></returns>
public bool PfAdd<T>(string key, params T[] elements)
{
if (elements == null || elements.Any() == false) return false;
var args = elements.Select(z => this.SerializeRedisValueInternal(z)).ToArray();
return ExecuteScalar(key, (c, k) => c.Value.PfAdd(k, args));
}
/// <summary>
/// 返回给定 HyperLogLog 的基数估算值<para></para>
/// 注意:分区模式下,若keys分散在多个分区节点时,将报错
/// </summary>
/// <param name="keys">不含prefix前辍</param>
/// <returns></returns>
public long PfCount(params string[] keys) => NodesNotSupport(keys, 0, (c, k) => c.Value.PfCount(k));
/// <summary>
/// 将多个 HyperLogLog 合并为一个 HyperLogLog<para></para>
/// 注意:分区模式下,若keys分散在多个分区节点时,将报错
/// </summary>
/// <param name="destKey">新的 HyperLogLog,不含prefix前辍</param>
/// <param name="sourceKeys">源 HyperLogLog,不含prefix前辍</param>
/// <returns></returns>
public bool PfMerge(string destKey, params string[] sourceKeys) => NodesNotSupport(new[] { destKey }.Concat(sourceKeys).ToArray(), false, (c, k) => c.Value.PfMerge(k.First(), k.Skip(1).ToArray()) == "OK");
#endregion
#region Sorted Set
/// <summary>
/// [redis-server 5.0.0] 删除并返回有序集合key中的最多count个具有最高得分的成员。如未指定,count的默认值为1。指定一个大于有序集合的基数的count不会产生错误。 当返回多个元素时候,得分最高的元素将是第一个元素,然后是分数较低的元素。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="count">数量</param>
/// <returns></returns>
public (string member, decimal score)[] ZPopMax(string key, long count) => ExecuteScalar(key, (c, k) => c.Value.ZPopMax(k, count)).Select(a => (a.Item1, a.Item2)).ToArray();
/// <summary>
/// [redis-server 5.0.0] 删除并返回有序集合key中的最多count个具有最高得分的成员。如未指定,count的默认值为1。指定一个大于有序集合的基数的count不会产生错误。 当返回多个元素时候,得分最高的元素将是第一个元素,然后是分数较低的元素。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="count">数量</param>
/// <returns></returns>
public (T member, decimal score)[] ZPopMax<T>(string key, long count) => this.DeserializeRedisValueTuple1Internal<T, decimal>(ExecuteScalar(key, (c, k) => c.Value.ZPopMaxBytes(k, count)));
/// <summary>
/// [redis-server 5.0.0] 删除并返回有序集合key中的最多count个具有最低得分的成员。如未指定,count的默认值为1。指定一个大于有序集合的基数的count不会产生错误。 当返回多个元素时候,得分最低的元素将是第一个元素,然后是分数较高的元素。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="count">数量</param>
/// <returns></returns>
public (string member, decimal score)[] ZPopMin(string key, long count) => ExecuteScalar(key, (c, k) => c.Value.ZPopMin(k, count)).Select(a => (a.Item1, a.Item2)).ToArray();
/// <summary>
/// [redis-server 5.0.0] 删除并返回有序集合key中的最多count个具有最低得分的成员。如未指定,count的默认值为1。指定一个大于有序集合的基数的count不会产生错误。 当返回多个元素时候,得分最低的元素将是第一个元素,然后是分数较高的元素。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="count">数量</param>
/// <returns></returns>
public (T member, decimal score)[] ZPopMin<T>(string key, long count) => this.DeserializeRedisValueTuple1Internal<T, decimal>(ExecuteScalar(key, (c, k) => c.Value.ZPopMinBytes(k, count)));
/// <summary>
/// 向有序集合添加一个或多个成员,或者更新已存在成员的分数
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="scoreMembers">一个或多个成员分数</param>
/// <returns></returns>
public long ZAdd(string key, params (decimal, object)[] scoreMembers)
{
if (scoreMembers == null || scoreMembers.Any() == false) return 0;
var args = scoreMembers.Select(a => new Tuple<decimal, object>(a.Item1, this.SerializeRedisValueInternal(a.Item2))).ToArray();
return ExecuteScalar(key, (c, k) => c.Value.ZAdd(k, args));
}
/// <summary>
/// 获取有序集合的成员数量
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public long ZCard(string key) => ExecuteScalar(key, (c, k) => c.Value.ZCard(k));
/// <summary>
/// 计算在有序集合中指定区间分数的成员数量
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="min">分数最小值 decimal.MinValue 1</param>
/// <param name="max">分数最大值 decimal.MaxValue 10</param>
/// <returns></returns>
public long ZCount(string key, decimal min, decimal max) => ExecuteScalar(key, (c, k) => c.Value.ZCount(k, min == decimal.MinValue ? "-inf" : min.ToString(), max == decimal.MaxValue ? "+inf" : max.ToString()));
/// <summary>
/// 计算在有序集合中指定区间分数的成员数量
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="min">分数最小值 -inf (1 1</param>
/// <param name="max">分数最大值 +inf (10 10</param>
/// <returns></returns>
public long ZCount(string key, string min, string max) => ExecuteScalar(key, (c, k) => c.Value.ZCount(k, min, max));
/// <summary>
/// 有序集合中对指定成员的分数加上增量 increment
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="member">成员</param>
/// <param name="increment">增量值(默认=1)</param>
/// <returns></returns>
public decimal ZIncrBy(string key, object member, decimal increment = 1)
{
var args = this.SerializeRedisValueInternal(member);
return ExecuteScalar(key, (c, k) => c.Value.ZIncrBy(k, increment, args));
}
/// <summary>
/// 计算给定的一个或多个有序集的交集,将结果集存储在新的有序集合 destination 中
/// </summary>
/// <param name="destination">新的有序集合,不含prefix前辍</param>
/// <param name="weights">使用 WEIGHTS 选项,你可以为 每个 给定有序集 分别 指定一个乘法因子。如果没有指定 WEIGHTS 选项,乘法因子默认设置为 1 。</param>
/// <param name="aggregate">Sum | Min | Max</param>
/// <param name="keys">一个或多个有序集合,不含prefix前辍</param>
/// <returns></returns>
public long ZInterStore(string destination, decimal[] weights, RedisAggregate aggregate, params string[] keys)
{
if (keys == null || keys.Length == 0) throw new Exception("keys 参数不可为空");
if (weights != null && weights.Length != keys.Length) throw new Exception("weights 和 keys 参数长度必须相同");
return NodesNotSupport(new[] { destination }.Concat(keys).ToArray(), 0, (c, k) => c.Value.ZInterStore(k.First(), weights, aggregate, k.Skip(1).ToArray()));
}
/// <summary>
/// 通过索引区间返回有序集合成指定区间内的成员
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="start">开始位置,0表示第一个元素,-1表示最后一个元素</param>
/// <param name="stop">结束位置,0表示第一个元素,-1表示最后一个元素</param>
/// <returns></returns>
public string[] ZRange(string key, long start, long stop) => ExecuteScalar(key, (c, k) => c.Value.ZRange(k, start, stop, false));
/// <summary>
/// 通过索引区间返回有序集合成指定区间内的成员
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="start">开始位置,0表示第一个元素,-1表示最后一个元素</param>
/// <param name="stop">结束位置,0表示第一个元素,-1表示最后一个元素</param>
/// <returns></returns>
public T[] ZRange<T>(string key, long start, long stop) => this.DeserializeRedisValueArrayInternal<T>(ExecuteScalar(key, (c, k) => c.Value.ZRangeBytes(k, start, stop, false)));
/// <summary>
/// 通过索引区间返回有序集合成指定区间内的成员和分数
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="start">开始位置,0表示第一个元素,-1表示最后一个元素</param>
/// <param name="stop">结束位置,0表示第一个元素,-1表示最后一个元素</param>
/// <returns></returns>
public (string member, decimal score)[] ZRangeWithScores(string key, long start, long stop) => ExecuteScalar(key, (c, k) => c.Value.ZRangeWithScores(k, start, stop)).Select(a => (a.Item1, a.Item2)).ToArray();
/// <summary>
/// 通过索引区间返回有序集合成指定区间内的成员和分数
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="start">开始位置,0表示第一个元素,-1表示最后一个元素</param>
/// <param name="stop">结束位置,0表示第一个元素,-1表示最后一个元素</param>
/// <returns></returns>
public (T member, decimal score)[] ZRangeWithScores<T>(string key, long start, long stop) => this.DeserializeRedisValueTuple1Internal<T, decimal>(ExecuteScalar(key, (c, k) => c.Value.ZRangeBytesWithScores(k, start, stop)));
/// <summary>
/// 通过分数返回有序集合指定区间内的成员
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="min">分数最小值 decimal.MinValue 1</param>
/// <param name="max">分数最大值 decimal.MaxValue 10</param>
/// <param name="count">返回多少成员</param>
/// <param name="offset">返回条件偏移位置</param>
/// <returns></returns>
public string[] ZRangeByScore(string key, decimal min, decimal max, long? count = null, long offset = 0) =>
ExecuteScalar(key, (c, k) => c.Value.ZRangeByScore(k, min == decimal.MinValue ? "-inf" : min.ToString(), max == decimal.MaxValue ? "+inf" : max.ToString(), false, offset, count));
/// <summary>
/// 通过分数返回有序集合指定区间内的成员
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="min">分数最小值 decimal.MinValue 1</param>
/// <param name="max">分数最大值 decimal.MaxValue 10</param>
/// <param name="count">返回多少成员</param>
/// <param name="offset">返回条件偏移位置</param>
/// <returns></returns>
public T[] ZRangeByScore<T>(string key, decimal min, decimal max, long? count = null, long offset = 0) =>
this.DeserializeRedisValueArrayInternal<T>(ExecuteScalar(key, (c, k) => c.Value.ZRangeBytesByScore(k, min == decimal.MinValue ? "-inf" : min.ToString(), max == decimal.MaxValue ? "+inf" : max.ToString(), false, offset, count)));
/// <summary>
/// 通过分数返回有序集合指定区间内的成员
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="min">分数最小值 -inf (1 1</param>
/// <param name="max">分数最大值 +inf (10 10</param>
/// <param name="count">返回多少成员</param>
/// <param name="offset">返回条件偏移位置</param>
/// <returns></returns>
public string[] ZRangeByScore(string key, string min, string max, long? count = null, long offset = 0) =>
ExecuteScalar(key, (c, k) => c.Value.ZRangeByScore(k, min, max, false, offset, count));
/// <summary>
/// 通过分数返回有序集合指定区间内的成员
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="min">分数最小值 -inf (1 1</param>
/// <param name="max">分数最大值 +inf (10 10</param>
/// <param name="count">返回多少成员</param>
/// <param name="offset">返回条件偏移位置</param>
/// <returns></returns>
public T[] ZRangeByScore<T>(string key, string min, string max, long? count = null, long offset = 0) =>
this.DeserializeRedisValueArrayInternal<T>(ExecuteScalar(key, (c, k) => c.Value.ZRangeBytesByScore(k, min, max, false, offset, count)));
/// <summary>
/// 通过分数返回有序集合指定区间内的成员和分数
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="min">分数最小值 decimal.MinValue 1</param>
/// <param name="max">分数最大值 decimal.MaxValue 10</param>
/// <param name="count">返回多少成员</param>
/// <param name="offset">返回条件偏移位置</param>
/// <returns></returns>
public (string member, decimal score)[] ZRangeByScoreWithScores(string key, decimal min, decimal max, long? count = null, long offset = 0) =>
ExecuteScalar(key, (c, k) => c.Value.ZRangeByScoreWithScores(k, min == decimal.MinValue ? "-inf" : min.ToString(), max == decimal.MaxValue ? "+inf" : max.ToString(), offset, count)).Select(z => (z.Item1, z.Item2)).ToArray();
/// <summary>
/// 通过分数返回有序集合指定区间内的成员和分数
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="min">分数最小值 decimal.MinValue 1</param>
/// <param name="max">分数最大值 decimal.MaxValue 10</param>
/// <param name="count">返回多少成员</param>
/// <param name="offset">返回条件偏移位置</param>
/// <returns></returns>
public (T member, decimal score)[] ZRangeByScoreWithScores<T>(string key, decimal min, decimal max, long? count = null, long offset = 0) =>
this.DeserializeRedisValueTuple1Internal<T, decimal>(ExecuteScalar(key, (c, k) => c.Value.ZRangeBytesByScoreWithScores(k, min == decimal.MinValue ? "-inf" : min.ToString(), max == decimal.MaxValue ? "+inf" : max.ToString(), offset, count)));
/// <summary>
/// 通过分数返回有序集合指定区间内的成员和分数
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="min">分数最小值 -inf (1 1</param>
/// <param name="max">分数最大值 +inf (10 10</param>
/// <param name="count">返回多少成员</param>
/// <param name="offset">返回条件偏移位置</param>
/// <returns></returns>
public (string member, decimal score)[] ZRangeByScoreWithScores(string key, string min, string max, long? count = null, long offset = 0) =>
ExecuteScalar(key, (c, k) => c.Value.ZRangeByScoreWithScores(k, min, max, offset, count)).Select(z => (z.Item1, z.Item2)).ToArray();
/// <summary>
/// 通过分数返回有序集合指定区间内的成员和分数
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="min">分数最小值 -inf (1 1</param>
/// <param name="max">分数最大值 +inf (10 10</param>
/// <param name="count">返回多少成员</param>
/// <param name="offset">返回条件偏移位置</param>
/// <returns></returns>
public (T member, decimal score)[] ZRangeByScoreWithScores<T>(string key, string min, string max, long? count = null, long offset = 0) =>
this.DeserializeRedisValueTuple1Internal<T, decimal>(ExecuteScalar(key, (c, k) => c.Value.ZRangeBytesByScoreWithScores(k, min, max, offset, count)));
/// <summary>
/// 返回有序集合中指定成员的索引
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="member">成员</param>
/// <returns></returns>
public long? ZRank(string key, object member)
{
var args = this.SerializeRedisValueInternal(member);
return ExecuteScalar(key, (c, k) => c.Value.ZRank(k, args));
}
/// <summary>
/// 移除有序集合中的一个或多个成员
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="member">一个或多个成员</param>
/// <returns></returns>
public long ZRem<T>(string key, params T[] member)
{
if (member == null || member.Any() == false) return 0;
var args = member.Select(z => this.SerializeRedisValueInternal(z)).ToArray();
return ExecuteScalar(key, (c, k) => c.Value.ZRem(k, args));
}
/// <summary>
/// 移除有序集合中给定的排名区间的所有成员
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="start">开始位置,0表示第一个元素,-1表示最后一个元素</param>
/// <param name="stop">结束位置,0表示第一个元素,-1表示最后一个元素</param>
/// <returns></returns>
public long ZRemRangeByRank(string key, long start, long stop) => ExecuteScalar(key, (c, k) => c.Value.ZRemRangeByRank(k, start, stop));
/// <summary>
/// 移除有序集合中给定的分数区间的所有成员
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="min">分数最小值 decimal.MinValue 1</param>
/// <param name="max">分数最大值 decimal.MaxValue 10</param>
/// <returns></returns>
public long ZRemRangeByScore(string key, decimal min, decimal max) => ExecuteScalar(key, (c, k) => c.Value.ZRemRangeByScore(k, min == decimal.MinValue ? "-inf" : min.ToString(), max == decimal.MaxValue ? "+inf" : max.ToString()));
/// <summary>
/// 移除有序集合中给定的分数区间的所有成员
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="min">分数最小值 -inf (1 1</param>
/// <param name="max">分数最大值 +inf (10 10</param>
/// <returns></returns>
public long ZRemRangeByScore(string key, string min, string max) => ExecuteScalar(key, (c, k) => c.Value.ZRemRangeByScore(k, min, max));
/// <summary>
/// 返回有序集中指定区间内的成员,通过索引,分数从高到底
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="start">开始位置,0表示第一个元素,-1表示最后一个元素</param>
/// <param name="stop">结束位置,0表示第一个元素,-1表示最后一个元素</param>
/// <returns></returns>
public string[] ZRevRange(string key, long start, long stop) => ExecuteScalar(key, (c, k) => c.Value.ZRevRange(k, start, stop, false));
/// <summary>
/// 返回有序集中指定区间内的成员,通过索引,分数从高到底
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="start">开始位置,0表示第一个元素,-1表示最后一个元素</param>
/// <param name="stop">结束位置,0表示第一个元素,-1表示最后一个元素</param>
/// <returns></returns>
public T[] ZRevRange<T>(string key, long start, long stop) => this.DeserializeRedisValueArrayInternal<T>(ExecuteScalar(key, (c, k) => c.Value.ZRevRangeBytes(k, start, stop, false)));
/// <summary>
/// 返回有序集中指定区间内的成员和分数,通过索引,分数从高到底
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="start">开始位置,0表示第一个元素,-1表示最后一个元素</param>
/// <param name="stop">结束位置,0表示第一个元素,-1表示最后一个元素</param>
/// <returns></returns>
public (string member, decimal score)[] ZRevRangeWithScores(string key, long start, long stop) => ExecuteScalar(key, (c, k) => c.Value.ZRevRangeWithScores(k, start, stop)).Select(a => (a.Item1, a.Item2)).ToArray();
/// <summary>
/// 返回有序集中指定区间内的成员和分数,通过索引,分数从高到底
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="start">开始位置,0表示第一个元素,-1表示最后一个元素</param>
/// <param name="stop">结束位置,0表示第一个元素,-1表示最后一个元素</param>
/// <returns></returns>
public (T member, decimal score)[] ZRevRangeWithScores<T>(string key, long start, long stop) => this.DeserializeRedisValueTuple1Internal<T, decimal>(ExecuteScalar(key, (c, k) => c.Value.ZRevRangeBytesWithScores(k, start, stop)));
/// <summary>
/// 返回有序集中指定分数区间内的成员,分数从高到低排序
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="max">分数最大值 decimal.MaxValue 10</param>
/// <param name="min">分数最小值 decimal.MinValue 1</param>
/// <param name="count">返回多少成员</param>
/// <param name="offset">返回条件偏移位置</param>
/// <returns></returns>
public string[] ZRevRangeByScore(string key, decimal max, decimal min, long? count = null, long? offset = 0) => ExecuteScalar(key, (c, k) => c.Value.ZRevRangeByScore(k, max == decimal.MaxValue ? "+inf" : max.ToString(), min == decimal.MinValue ? "-inf" : min.ToString(), false, offset, count));
/// <summary>
/// 返回有序集中指定分数区间内的成员,分数从高到低排序
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="max">分数最大值 decimal.MaxValue 10</param>
/// <param name="min">分数最小值 decimal.MinValue 1</param>
/// <param name="count">返回多少成员</param>
/// <param name="offset">返回条件偏移位置</param>
/// <returns></returns>
public T[] ZRevRangeByScore<T>(string key, decimal max, decimal min, long? count = null, long offset = 0) =>
this.DeserializeRedisValueArrayInternal<T>(ExecuteScalar(key, (c, k) => c.Value.ZRevRangeBytesByScore(k, max == decimal.MaxValue ? "+inf" : max.ToString(), min == decimal.MinValue ? "-inf" : min.ToString(), false, offset, count)));
/// <summary>
/// 返回有序集中指定分数区间内的成员,分数从高到低排序
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="max">分数最大值 +inf (10 10</param>
/// <param name="min">分数最小值 -inf (1 1</param>
/// <param name="count">返回多少成员</param>
/// <param name="offset">返回条件偏移位置</param>
/// <returns></returns>
public string[] ZRevRangeByScore(string key, string max, string min, long? count = null, long? offset = 0) => ExecuteScalar(key, (c, k) => c.Value.ZRevRangeByScore(k, max, min, false, offset, count));
/// <summary>
/// 返回有序集中指定分数区间内的成员,分数从高到低排序
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="max">分数最大值 +inf (10 10</param>
/// <param name="min">分数最小值 -inf (1 1</param>
/// <param name="count">返回多少成员</param>
/// <param name="offset">返回条件偏移位置</param>
/// <returns></returns>
public T[] ZRevRangeByScore<T>(string key, string max, string min, long? count = null, long offset = 0) =>
this.DeserializeRedisValueArrayInternal<T>(ExecuteScalar(key, (c, k) => c.Value.ZRevRangeBytesByScore(k, max, min, false, offset, count)));
/// <summary>
/// 返回有序集中指定分数区间内的成员和分数,分数从高到低排序
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="max">分数最大值 decimal.MaxValue 10</param>
/// <param name="min">分数最小值 decimal.MinValue 1</param>
/// <param name="count">返回多少成员</param>
/// <param name="offset">返回条件偏移位置</param>
/// <returns></returns>
public (string member, decimal score)[] ZRevRangeByScoreWithScores(string key, decimal max, decimal min, long? count = null, long offset = 0) =>
ExecuteScalar(key, (c, k) => c.Value.ZRevRangeByScoreWithScores(k, max == decimal.MaxValue ? "+inf" : max.ToString(), min == decimal.MinValue ? "-inf" : min.ToString(), offset, count)).Select(z => (z.Item1, z.Item2)).ToArray();
/// <summary>
/// 返回有序集中指定分数区间内的成员和分数,分数从高到低排序
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="max">分数最大值 decimal.MaxValue 10</param>
/// <param name="min">分数最小值 decimal.MinValue 1</param>
/// <param name="count">返回多少成员</param>
/// <param name="offset">返回条件偏移位置</param>
/// <returns></returns>
public (T member, decimal score)[] ZRevRangeByScoreWithScores<T>(string key, decimal max, decimal min, long? count = null, long offset = 0) =>
this.DeserializeRedisValueTuple1Internal<T, decimal>(ExecuteScalar(key, (c, k) => c.Value.ZRevRangeBytesByScoreWithScores(k, max == decimal.MaxValue ? "+inf" : max.ToString(), min == decimal.MinValue ? "-inf" : min.ToString(), offset, count)));
/// <summary>
/// 返回有序集中指定分数区间内的成员和分数,分数从高到低排序
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="max">分数最大值 +inf (10 10</param>
/// <param name="min">分数最小值 -inf (1 1</param>
/// <param name="count">返回多少成员</param>
/// <param name="offset">返回条件偏移位置</param>
/// <returns></returns>
public (string member, decimal score)[] ZRevRangeByScoreWithScores(string key, string max, string min, long? count = null, long offset = 0) =>
ExecuteScalar(key, (c, k) => c.Value.ZRevRangeByScoreWithScores(k, max, min, offset, count)).Select(z => (z.Item1, z.Item2)).ToArray();
/// <summary>
/// 返回有序集中指定分数区间内的成员和分数,分数从高到低排序
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="max">分数最大值 +inf (10 10</param>
/// <param name="min">分数最小值 -inf (1 1</param>
/// <param name="count">返回多少成员</param>
/// <param name="offset">返回条件偏移位置</param>
/// <returns></returns>
public (T member, decimal score)[] ZRevRangeByScoreWithScores<T>(string key, string max, string min, long? count = null, long offset = 0) =>
this.DeserializeRedisValueTuple1Internal<T, decimal>(ExecuteScalar(key, (c, k) => c.Value.ZRevRangeBytesByScoreWithScores(k, max, min, offset, count)));
/// <summary>
/// 返回有序集合中指定成员的排名,有序集成员按分数值递减(从大到小)排序
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="member">成员</param>
/// <returns></returns>
public long? ZRevRank(string key, object member)
{
var args = this.SerializeRedisValueInternal(member);
return ExecuteScalar(key, (c, k) => c.Value.ZRevRank(k, args));
}
/// <summary>
/// 返回有序集中,成员的分数值
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="member">成员</param>
/// <returns></returns>
public decimal? ZScore(string key, object member)
{
var args = this.SerializeRedisValueInternal(member);
return ExecuteScalar(key, (c, k) => c.Value.ZScore(k, args));
}
/// <summary>
/// 计算给定的一个或多个有序集的并集,将结果集存储在新的有序集合 destination 中
/// </summary>
/// <param name="destination">新的有序集合,不含prefix前辍</param>
/// <param name="weights">使用 WEIGHTS 选项,你可以为 每个 给定有序集 分别 指定一个乘法因子。如果没有指定 WEIGHTS 选项,乘法因子默认设置为 1 。</param>
/// <param name="aggregate">Sum | Min | Max</param>
/// <param name="keys">一个或多个有序集合,不含prefix前辍</param>
/// <returns></returns>
public long ZUnionStore(string destination, decimal[] weights, RedisAggregate aggregate, params string[] keys)
{
if (keys == null || keys.Length == 0) throw new Exception("keys 参数不可为空");
if (weights != null && weights.Length != keys.Length) throw new Exception("weights 和 keys 参数长度必须相同");
return NodesNotSupport(new[] { destination }.Concat(keys).ToArray(), 0, (c, k) => c.Value.ZUnionStore(k.First(), weights, aggregate, k.Skip(1).ToArray()));
}
/// <summary>
/// 迭代有序集合中的元素
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="cursor">位置</param>
/// <param name="pattern">模式</param>
/// <param name="count">数量</param>
/// <returns></returns>
public RedisScan<(string member, decimal score)> ZScan(string key, long cursor, string pattern = null, long? count = null)
{
var scan = ExecuteScalar(key, (c, k) => c.Value.ZScan(k, cursor, pattern, count));
return new RedisScan<(string, decimal)>(scan.Cursor, scan.Items.Select(z => (z.Item1, z.Item2)).ToArray());
}
/// <summary>
/// 迭代有序集合中的元素
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="cursor">位置</param>
/// <param name="pattern">模式</param>
/// <param name="count">数量</param>
/// <returns></returns>
public RedisScan<(T member, decimal score)> ZScan<T>(string key, long cursor, string pattern = null, long? count = null)
{
var scan = ExecuteScalar(key, (c, k) => c.Value.ZScanBytes(k, cursor, pattern, count));
return new RedisScan<(T, decimal)>(scan.Cursor, this.DeserializeRedisValueTuple1Internal<T, decimal>(scan.Items));
}
/// <summary>
/// 当有序集合的所有成员都具有相同的分值时,有序集合的元素会根据成员的字典序来进行排序,这个命令可以返回给定的有序集合键 key 中,值介于 min 和 max 之间的成员。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="min">'(' 表示包含在范围,'[' 表示不包含在范围,'+' 正无穷大,'-' 负无限。 ZRANGEBYLEX zset - + ,命令将返回有序集合中的所有元素</param>
/// <param name="max">'(' 表示包含在范围,'[' 表示不包含在范围,'+' 正无穷大,'-' 负无限。 ZRANGEBYLEX zset - + ,命令将返回有序集合中的所有元素</param>
/// <param name="count">返回多少成员</param>
/// <param name="offset">返回条件偏移位置</param>
/// <returns></returns>
public string[] ZRangeByLex(string key, string min, string max, long? count = null, long offset = 0) =>
ExecuteScalar(key, (c, k) => c.Value.ZRangeByLex(k, min, max, offset, count));
/// <summary>
/// 当有序集合的所有成员都具有相同的分值时,有序集合的元素会根据成员的字典序来进行排序,这个命令可以返回给定的有序集合键 key 中,值介于 min 和 max 之间的成员。
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="min">'(' 表示包含在范围,'[' 表示不包含在范围,'+' 正无穷大,'-' 负无限。 ZRANGEBYLEX zset - + ,命令将返回有序集合中的所有元素</param>
/// <param name="max">'(' 表示包含在范围,'[' 表示不包含在范围,'+' 正无穷大,'-' 负无限。 ZRANGEBYLEX zset - + ,命令将返回有序集合中的所有元素</param>
/// <param name="count">返回多少成员</param>
/// <param name="offset">返回条件偏移位置</param>
/// <returns></returns>
public T[] ZRangeByLex<T>(string key, string min, string max, long? count = null, long offset = 0) =>
this.DeserializeRedisValueArrayInternal<T>(ExecuteScalar(key, (c, k) => c.Value.ZRangeBytesByLex(k, min, max, offset, count)));
/// <summary>
/// 当有序集合的所有成员都具有相同的分值时,有序集合的元素会根据成员的字典序来进行排序,这个命令可以返回给定的有序集合键 key 中,值介于 min 和 max 之间的成员。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="min">'(' 表示包含在范围,'[' 表示不包含在范围,'+' 正无穷大,'-' 负无限。 ZRANGEBYLEX zset - + ,命令将返回有序集合中的所有元素</param>
/// <param name="max">'(' 表示包含在范围,'[' 表示不包含在范围,'+' 正无穷大,'-' 负无限。 ZRANGEBYLEX zset - + ,命令将返回有序集合中的所有元素</param>
/// <returns></returns>
public long ZRemRangeByLex(string key, string min, string max) =>
ExecuteScalar(key, (c, k) => c.Value.ZRemRangeByLex(k, min, max));
/// <summary>
/// 当有序集合的所有成员都具有相同的分值时,有序集合的元素会根据成员的字典序来进行排序,这个命令可以返回给定的有序集合键 key 中,值介于 min 和 max 之间的成员。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="min">'(' 表示包含在范围,'[' 表示不包含在范围,'+' 正无穷大,'-' 负无限。 ZRANGEBYLEX zset - + ,命令将返回有序集合中的所有元素</param>
/// <param name="max">'(' 表示包含在范围,'[' 表示不包含在范围,'+' 正无穷大,'-' 负无限。 ZRANGEBYLEX zset - + ,命令将返回有序集合中的所有元素</param>
/// <returns></returns>
public long ZLexCount(string key, string min, string max) =>
ExecuteScalar(key, (c, k) => c.Value.ZLexCount(k, min, max));
#endregion
#region Set
/// <summary>
/// 向集合添加一个或多个成员
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="members">一个或多个成员</param>
/// <returns></returns>
public long SAdd<T>(string key, params T[] members)
{
if (members == null || members.Any() == false) return 0;
var args = members.Select(z => this.SerializeRedisValueInternal(z)).ToArray();
return ExecuteScalar(key, (c, k) => c.Value.SAdd(k, args));
}
/// <summary>
/// 获取集合的成员数
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public long SCard(string key) => ExecuteScalar(key, (c, k) => c.Value.SCard(k));
/// <summary>
/// 返回给定所有集合的差集
/// </summary>
/// <param name="keys">不含prefix前辍</param>
/// <returns></returns>
public string[] SDiff(params string[] keys) => NodesNotSupport(keys, new string[0], (c, k) => c.Value.SDiff(k));
/// <summary>
/// 返回给定所有集合的差集
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="keys">不含prefix前辍</param>
/// <returns></returns>
public T[] SDiff<T>(params string[] keys) => this.DeserializeRedisValueArrayInternal<T>(NodesNotSupport(keys, new byte[0][], (c, k) => c.Value.SDiffBytes(k)));
/// <summary>
/// 返回给定所有集合的差集并存储在 destination 中
/// </summary>
/// <param name="destination">新的无序集合,不含prefix前辍</param>
/// <param name="keys">一个或多个无序集合,不含prefix前辍</param>
/// <returns></returns>
public long SDiffStore(string destination, params string[] keys) => NodesNotSupport(new[] { destination }.Concat(keys).ToArray(), 0, (c, k) => c.Value.SDiffStore(k.First(), k.Skip(1).ToArray()));
/// <summary>
/// 返回给定所有集合的交集
/// </summary>
/// <param name="keys">不含prefix前辍</param>
/// <returns></returns>
public string[] SInter(params string[] keys) => NodesNotSupport(keys, new string[0], (c, k) => c.Value.SInter(k));
/// <summary>
/// 返回给定所有集合的交集
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="keys">不含prefix前辍</param>
/// <returns></returns>
public T[] SInter<T>(params string[] keys) => this.DeserializeRedisValueArrayInternal<T>(NodesNotSupport(keys, new byte[0][], (c, k) => c.Value.SInterBytes(k)));
/// <summary>
/// 返回给定所有集合的交集并存储在 destination 中
/// </summary>
/// <param name="destination">新的无序集合,不含prefix前辍</param>
/// <param name="keys">一个或多个无序集合,不含prefix前辍</param>
/// <returns></returns>
public long SInterStore(string destination, params string[] keys) => NodesNotSupport(new[] { destination }.Concat(keys).ToArray(), 0, (c, k) => c.Value.SInterStore(k.First(), k.Skip(1).ToArray()));
/// <summary>
/// 判断 member 元素是否是集合 key 的成员
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="member">成员</param>
/// <returns></returns>
public bool SIsMember(string key, object member)
{
var args = this.SerializeRedisValueInternal(member);
return ExecuteScalar(key, (c, k) => c.Value.SIsMember(k, args));
}
/// <summary>
/// 返回集合中的所有成员
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public string[] SMembers(string key) => ExecuteScalar(key, (c, k) => c.Value.SMembers(k));
/// <summary>
/// 返回集合中的所有成员
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public T[] SMembers<T>(string key) => this.DeserializeRedisValueArrayInternal<T>(ExecuteScalar(key, (c, k) => c.Value.SMembersBytes(k)));
/// <summary>
/// 将 member 元素从 source 集合移动到 destination 集合
/// </summary>
/// <param name="source">无序集合key,不含prefix前辍</param>
/// <param name="destination">目标无序集合key,不含prefix前辍</param>
/// <param name="member">成员</param>
/// <returns></returns>
public bool SMove(string source, string destination, object member)
{
string rule = string.Empty;
if (Nodes.Count > 1)
{
var rule1 = NodeRuleRaw(source);
var rule2 = NodeRuleRaw(destination);
if (rule1 != rule2)
{
if (SRem(source, member) <= 0) return false;
return SAdd(destination, member) > 0;
}
rule = rule1;
}
var pool = Nodes.TryGetValue(rule, out var b) ? b : Nodes.First().Value;
var key1 = string.Concat(pool.Prefix, source);
var key2 = string.Concat(pool.Prefix, destination);
var args = this.SerializeRedisValueInternal(member);
return GetAndExecute(pool, conn => conn.Value.SMove(key1, key2, args));
}
/// <summary>
/// 移除并返回集合中的一个随机元素
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public string SPop(string key) => ExecuteScalar(key, (c, k) => c.Value.SPop(k));
/// <summary>
/// 移除并返回集合中的一个随机元素
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public T SPop<T>(string key) => this.DeserializeRedisValueInternal<T>(ExecuteScalar(key, (c, k) => c.Value.SPopBytes(k)));
/// <summary>
/// [redis-server 3.2] 移除并返回集合中的一个或多个随机元素
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="count">移除并返回的个数</param>
/// <returns></returns>
public string[] SPop(string key, long count) => ExecuteScalar(key, (c, k) => c.Value.SPop(k, count));
/// <summary>
/// [redis-server 3.2] 移除并返回集合中的一个或多个随机元素
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="count">移除并返回的个数</param>
/// <returns></returns>
public T[] SPop<T>(string key, long count) => this.DeserializeRedisValueArrayInternal<T>(ExecuteScalar(key, (c, k) => c.Value.SPopBytes(k, count)));
/// <summary>
/// 返回集合中的一个随机元素
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public string SRandMember(string key) => ExecuteScalar(key, (c, k) => c.Value.SRandMember(k));
/// <summary>
/// 返回集合中的一个随机元素
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public T SRandMember<T>(string key) => this.DeserializeRedisValueInternal<T>(ExecuteScalar(key, (c, k) => c.Value.SRandMemberBytes(k)));
/// <summary>
/// 返回集合中一个或多个随机数的元素
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="count">返回个数</param>
/// <returns></returns>
public string[] SRandMembers(string key, int count = 1) => ExecuteScalar(key, (c, k) => c.Value.SRandMembers(k, count));
/// <summary>
/// 返回集合中一个或多个随机数的元素
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="count">返回个数</param>
/// <returns></returns>
public T[] SRandMembers<T>(string key, int count = 1) => this.DeserializeRedisValueArrayInternal<T>(ExecuteScalar(key, (c, k) => c.Value.SRandMembersBytes(k, count)));
/// <summary>
/// 移除集合中一个或多个成员
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="members">一个或多个成员</param>
/// <returns></returns>
public long SRem<T>(string key, params T[] members)
{
if (members == null || members.Any() == false) return 0;
var args = members.Select(z => this.SerializeRedisValueInternal(z)).ToArray();
return ExecuteScalar(key, (c, k) => c.Value.SRem(k, args));
}
/// <summary>
/// 返回所有给定集合的并集
/// </summary>
/// <param name="keys">不含prefix前辍</param>
/// <returns></returns>
public string[] SUnion(params string[] keys) => NodesNotSupport(keys, new string[0], (c, k) => c.Value.SUnion(k));
/// <summary>
/// 返回所有给定集合的并集
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="keys">不含prefix前辍</param>
/// <returns></returns>
public T[] SUnion<T>(params string[] keys) => this.DeserializeRedisValueArrayInternal<T>(NodesNotSupport(keys, new byte[0][], (c, k) => c.Value.SUnionBytes(k)));
/// <summary>
/// 所有给定集合的并集存储在 destination 集合中
/// </summary>
/// <param name="destination">新的无序集合,不含prefix前辍</param>
/// <param name="keys">一个或多个无序集合,不含prefix前辍</param>
/// <returns></returns>
public long SUnionStore(string destination, params string[] keys) => NodesNotSupport(new[] { destination }.Concat(keys).ToArray(), 0, (c, k) => c.Value.SUnionStore(k.First(), k.Skip(1).ToArray()));
/// <summary>
/// 迭代集合中的元素
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="cursor">位置</param>
/// <param name="pattern">模式</param>
/// <param name="count">数量</param>
/// <returns></returns>
public RedisScan<string> SScan(string key, long cursor, string pattern = null, long? count = null) => ExecuteScalar(key, (c, k) => c.Value.SScan(k, cursor, pattern, count));
/// <summary>
/// 迭代集合中的元素
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="cursor">位置</param>
/// <param name="pattern">模式</param>
/// <param name="count">数量</param>
/// <returns></returns>
public RedisScan<T> SScan<T>(string key, long cursor, string pattern = null, long? count = null)
{
var scan = ExecuteScalar(key, (c, k) => c.Value.SScanBytes(k, cursor, pattern, count));
return new RedisScan<T>(scan.Cursor, this.DeserializeRedisValueArrayInternal<T>(scan.Items));
}
#endregion
#region List
/// <summary>
/// 它是 LPOP 命令的阻塞版本,当给定列表内没有任何元素可供弹出的时候,连接将被 BLPOP 命令阻塞,直到等待超时或发现可弹出元素为止,超时返回null
/// </summary>
/// <param name="timeout">超时(秒)</param>
/// <param name="keys">一个或多个列表,不含prefix前辍</param>
/// <returns></returns>
public (string key, string value)? BLPopWithKey(int timeout, params string[] keys)
{
string[] rkeys = null;
var tuple = NodesNotSupport(keys, null, (c, k) => c.Value.BLPopWithKey(timeout, rkeys = k));
if (tuple == null) return null;
return (rkeys?.Where(b => b == tuple.Item1).First() ?? tuple.Item1, tuple.Item2);
}
/// <summary>
/// 它是 LPOP 命令的阻塞版本,当给定列表内没有任何元素可供弹出的时候,连接将被 BLPOP 命令阻塞,直到等待超时或发现可弹出元素为止,超时返回null
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="timeout">超时(秒)</param>
/// <param name="keys">一个或多个列表,不含prefix前辍</param>
/// <returns></returns>
public (string key, T value)? BLPopWithKey<T>(int timeout, params string[] keys)
{
string[] rkeys = null;
var tuple = NodesNotSupport(keys, null, (c, k) => c.Value.BLPopBytesWithKey(timeout, rkeys = k));
if (tuple == null) return null;
return (rkeys?.Where(b => b == tuple.Item1).First() ?? tuple.Item1, this.DeserializeRedisValueInternal<T>(tuple.Item2));
}
/// <summary>
/// 它是 LPOP 命令的阻塞版本,当给定列表内没有任何元素可供弹出的时候,连接将被 BLPOP 命令阻塞,直到等待超时或发现可弹出元素为止,超时返回null
/// </summary>
/// <param name="timeout">超时(秒)</param>
/// <param name="keys">一个或多个列表,不含prefix前辍</param>
/// <returns></returns>
public string BLPop(int timeout, params string[] keys) => NodesNotSupport(keys, null, (c, k) => c.Value.BLPop(timeout, k));
/// <summary>
/// 它是 LPOP 命令的阻塞版本,当给定列表内没有任何元素可供弹出的时候,连接将被 BLPOP 命令阻塞,直到等待超时或发现可弹出元素为止,超时返回null
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="timeout">超时(秒)</param>
/// <param name="keys">一个或多个列表,不含prefix前辍</param>
/// <returns></returns>
public T BLPop<T>(int timeout, params string[] keys) => this.DeserializeRedisValueInternal<T>(NodesNotSupport(keys, null, (c, k) => c.Value.BLPopBytes(timeout, k)));
/// <summary>
/// 它是 RPOP 命令的阻塞版本,当给定列表内没有任何元素可供弹出的时候,连接将被 BRPOP 命令阻塞,直到等待超时或发现可弹出元素为止,超时返回null
/// </summary>
/// <param name="timeout">超时(秒)</param>
/// <param name="keys">一个或多个列表,不含prefix前辍</param>
/// <returns></returns>
public (string key, string value)? BRPopWithKey(int timeout, params string[] keys)
{
string[] rkeys = null;
var tuple = NodesNotSupport(keys, null, (c, k) => c.Value.BRPopWithKey(timeout, rkeys = k));
if (tuple == null) return null;
return (rkeys?.Where(b => b == tuple.Item1).First() ?? tuple.Item1, tuple.Item2);
}
/// <summary>
/// 它是 RPOP 命令的阻塞版本,当给定列表内没有任何元素可供弹出的时候,连接将被 BRPOP 命令阻塞,直到等待超时或发现可弹出元素为止,超时返回null
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="timeout">超时(秒)</param>
/// <param name="keys">一个或多个列表,不含prefix前辍</param>
/// <returns></returns>
public (string key, T value)? BRPopWithKey<T>(int timeout, params string[] keys)
{
string[] rkeys = null;
var tuple = NodesNotSupport(keys, null, (c, k) => c.Value.BRPopBytesWithKey(timeout, rkeys = k));
if (tuple == null) return null;
return (rkeys?.Where(b => b == tuple.Item1).First() ?? tuple.Item1, this.DeserializeRedisValueInternal<T>(tuple.Item2));
}
/// <summary>
/// 它是 RPOP 命令的阻塞版本,当给定列表内没有任何元素可供弹出的时候,连接将被 BRPOP 命令阻塞,直到等待超时或发现可弹出元素为止,超时返回null
/// </summary>
/// <param name="timeout">超时(秒)</param>
/// <param name="keys">一个或多个列表,不含prefix前辍</param>
/// <returns></returns>
public string BRPop(int timeout, params string[] keys) => NodesNotSupport(keys, null, (c, k) => c.Value.BRPop(timeout, k));
/// <summary>
/// 它是 RPOP 命令的阻塞版本,当给定列表内没有任何元素可供弹出的时候,连接将被 BRPOP 命令阻塞,直到等待超时或发现可弹出元素为止,超时返回null
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="timeout">超时(秒)</param>
/// <param name="keys">一个或多个列表,不含prefix前辍</param>
/// <returns></returns>
public T BRPop<T>(int timeout, params string[] keys) => this.DeserializeRedisValueInternal<T>(NodesNotSupport(keys, null, (c, k) => c.Value.BRPopBytes(timeout, k)));
/// <summary>
/// BRPOPLPUSH 是 RPOPLPUSH 的阻塞版本,当给定列表 source 不为空时, BRPOPLPUSH 的表现和 RPOPLPUSH 一样。
/// 当列表 source 为空时, BRPOPLPUSH 命令将阻塞连接,直到等待超时,或有另一个客户端对 source 执行 LPUSH 或 RPUSH 命令为止。
/// </summary>
/// <param name="source">源key,不含prefix前辍</param>
/// <param name="destination">目标key,不含prefix前辍</param>
/// <param name="timeout">超时(秒)</param>
/// <returns></returns>
public string BRPopLPush(string source, string destination, int timeout) => NodesNotSupport(new[] { source, destination }, null, (c, k) => c.Value.BRPopLPush(k.First(), k.Last(), timeout));
/// <summary>
/// BRPOPLPUSH 是 RPOPLPUSH 的阻塞版本,当给定列表 source 不为空时, BRPOPLPUSH 的表现和 RPOPLPUSH 一样。
/// 当列表 source 为空时, BRPOPLPUSH 命令将阻塞连接,直到等待超时,或有另一个客户端对 source 执行 LPUSH 或 RPUSH 命令为止。
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="source">源key,不含prefix前辍</param>
/// <param name="destination">目标key,不含prefix前辍</param>
/// <param name="timeout">超时(秒)</param>
/// <returns></returns>
public T BRPopLPush<T>(string source, string destination, int timeout) => this.DeserializeRedisValueInternal<T>(NodesNotSupport(new[] { source, destination }, null, (c, k) => c.Value.BRPopBytesLPush(k.First(), k.Last(), timeout)));
/// <summary>
/// 通过索引获取列表中的元素
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="index">索引</param>
/// <returns></returns>
public string LIndex(string key, long index) => ExecuteScalar(key, (c, k) => c.Value.LIndex(k, index));
/// <summary>
/// 通过索引获取列表中的元素
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="index">索引</param>
/// <returns></returns>
public T LIndex<T>(string key, long index) => this.DeserializeRedisValueInternal<T>(ExecuteScalar(key, (c, k) => c.Value.LIndexBytes(k, index)));
/// <summary>
/// 在列表中的元素前面插入元素
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="pivot">列表的元素</param>
/// <param name="value">新元素</param>
/// <returns></returns>
public long LInsertBefore(string key, object pivot, object value)
{
var args = this.SerializeRedisValueInternal(value);
return ExecuteScalar(key, (c, k) => c.Value.LInsert(k, RedisInsert.Before, pivot, args));
}
/// <summary>
/// 在列表中的元素后面插入元素
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="pivot">列表的元素</param>
/// <param name="value">新元素</param>
/// <returns></returns>
public long LInsertAfter(string key, object pivot, object value)
{
var args = this.SerializeRedisValueInternal(value);
return ExecuteScalar(key, (c, k) => c.Value.LInsert(k, RedisInsert.After, pivot, args));
}
/// <summary>
/// 获取列表长度
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public long LLen(string key) => ExecuteScalar(key, (c, k) => c.Value.LLen(k));
/// <summary>
/// 移出并获取列表的第一个元素
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public string LPop(string key) => ExecuteScalar(key, (c, k) => c.Value.LPop(k));
/// <summary>
/// 移出并获取列表的第一个元素
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public T LPop<T>(string key) => this.DeserializeRedisValueInternal<T>(ExecuteScalar(key, (c, k) => c.Value.LPopBytes(k)));
/// <summary>
/// 将一个或多个值插入到列表头部
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="value">一个或多个值</param>
/// <returns>执行 LPUSH 命令后,列表的长度</returns>
public long LPush<T>(string key, params T[] value)
{
if (value == null || value.Any() == false) return 0;
var args = value.Select(z => this.SerializeRedisValueInternal(z)).ToArray();
return ExecuteScalar(key, (c, k) => c.Value.LPush(k, args));
}
/// <summary>
/// 将一个值插入到已存在的列表头部
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="value">值</param>
/// <returns>执行 LPUSHX 命令后,列表的长度。</returns>
public long LPushX(string key, object value)
{
var args = this.SerializeRedisValueInternal(value);
return ExecuteScalar(key, (c, k) => c.Value.LPushX(k, args));
}
/// <summary>
/// 获取列表指定范围内的元素
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="start">开始位置,0表示第一个元素,-1表示最后一个元素</param>
/// <param name="stop">结束位置,0表示第一个元素,-1表示最后一个元素</param>
/// <returns></returns>
public string[] LRange(string key, long start, long stop) => ExecuteScalar(key, (c, k) => c.Value.LRange(k, start, stop));
/// <summary>
/// 获取列表指定范围内的元素
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="start">开始位置,0表示第一个元素,-1表示最后一个元素</param>
/// <param name="stop">结束位置,0表示第一个元素,-1表示最后一个元素</param>
/// <returns></returns>
public T[] LRange<T>(string key, long start, long stop) => this.DeserializeRedisValueArrayInternal<T>(ExecuteScalar(key, (c, k) => c.Value.LRangeBytes(k, start, stop)));
/// <summary>
/// 根据参数 count 的值,移除列表中与参数 value 相等的元素
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="count">移除的数量,大于0时从表头删除数量count,小于0时从表尾删除数量-count,等于0移除所有</param>
/// <param name="value">元素</param>
/// <returns></returns>
public long LRem(string key, long count, object value)
{
var args = this.SerializeRedisValueInternal(value);
return ExecuteScalar(key, (c, k) => c.Value.LRem(k, count, args));
}
/// <summary>
/// 通过索引设置列表元素的值
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="index">索引</param>
/// <param name="value">值</param>
/// <returns></returns>
public bool LSet(string key, long index, object value)
{
var args = this.SerializeRedisValueInternal(value);
return ExecuteScalar(key, (c, k) => c.Value.LSet(k, index, args)) == "OK";
}
/// <summary>
/// 对一个列表进行修剪,让列表只保留指定区间内的元素,不在指定区间之内的元素都将被删除
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="start">开始位置,0表示第一个元素,-1表示最后一个元素</param>
/// <param name="stop">结束位置,0表示第一个元素,-1表示最后一个元素</param>
/// <returns></returns>
public bool LTrim(string key, long start, long stop) => ExecuteScalar(key, (c, k) => c.Value.LTrim(k, start, stop)) == "OK";
/// <summary>
/// 移除并获取列表最后一个元素
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public string RPop(string key) => ExecuteScalar(key, (c, k) => c.Value.RPop(k));
/// <summary>
/// 移除并获取列表最后一个元素
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public T RPop<T>(string key) => this.DeserializeRedisValueInternal<T>(ExecuteScalar(key, (c, k) => c.Value.RPopBytes(k)));
/// <summary>
/// 将列表 source 中的最后一个元素(尾元素)弹出,并返回给客户端。
/// 将 source 弹出的元素插入到列表 destination ,作为 destination 列表的的头元素。
/// </summary>
/// <param name="source">源key,不含prefix前辍</param>
/// <param name="destination">目标key,不含prefix前辍</param>
/// <returns></returns>
public string RPopLPush(string source, string destination) => NodesNotSupport(new[] { source, destination }, null, (c, k) => c.Value.RPopLPush(k.First(), k.Last()));
/// <summary>
/// 将列表 source 中的最后一个元素(尾元素)弹出,并返回给客户端。
/// 将 source 弹出的元素插入到列表 destination ,作为 destination 列表的的头元素。
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="source">源key,不含prefix前辍</param>
/// <param name="destination">目标key,不含prefix前辍</param>
/// <returns></returns>
public T RPopLPush<T>(string source, string destination) => this.DeserializeRedisValueInternal<T>(NodesNotSupport(new[] { source, destination }, null, (c, k) => c.Value.RPopBytesLPush(k.First(), k.Last())));
/// <summary>
/// 在列表中添加一个或多个值
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="value">一个或多个值</param>
/// <returns>执行 RPUSH 命令后,列表的长度</returns>
public long RPush<T>(string key, params T[] value)
{
if (value == null || value.Any() == false) return 0;
var args = value.Select(z => this.SerializeRedisValueInternal(z)).ToArray();
return ExecuteScalar(key, (c, k) => c.Value.RPush(k, args));
}
/// <summary>
/// 为已存在的列表添加值
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="value">一个或多个值</param>
/// <returns>执行 RPUSHX 命令后,列表的长度</returns>
public long RPushX(string key, object value)
{
var args = this.SerializeRedisValueInternal(value);
return ExecuteScalar(key, (c, k) => c.Value.RPushX(k, args));
}
#endregion
#region Hash
/// <summary>
/// [redis-server 3.2.0] 返回hash指定field的value的字符串长度,如果hash或者field不存在,返回0.
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="field">字段</param>
/// <returns></returns>
public long HStrLen(string key, string field) => ExecuteScalar(key, (c, k) => c.Value.HStrLen(k, field));
/// <summary>
/// 删除一个或多个哈希表字段
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="fields">字段</param>
/// <returns></returns>
public long HDel(string key, params string[] fields) => fields == null || fields.Any() == false ? 0 : ExecuteScalar(key, (c, k) => c.Value.HDel(k, fields));
/// <summary>
/// 查看哈希表 key 中,指定的字段是否存在
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="field">字段</param>
/// <returns></returns>
public bool HExists(string key, string field) => ExecuteScalar(key, (c, k) => c.Value.HExists(k, field));
/// <summary>
/// 获取存储在哈希表中指定字段的值
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="field">字段</param>
/// <returns></returns>
public string HGet(string key, string field) => ExecuteScalar(key, (c, k) => c.Value.HGet(k, field));
/// <summary>
/// 获取存储在哈希表中指定字段的值
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="field">字段</param>
/// <returns></returns>
public T HGet<T>(string key, string field) => this.DeserializeRedisValueInternal<T>(ExecuteScalar(key, (c, k) => c.Value.HGetBytes(k, field)));
/// <summary>
/// 获取在哈希表中指定 key 的所有字段和值
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public Dictionary<string, string> HGetAll(string key) => ExecuteScalar(key, (c, k) => c.Value.HGetAll(k));
/// <summary>
/// 获取在哈希表中指定 key 的所有字段和值
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public Dictionary<string, T> HGetAll<T>(string key) => this.DeserializeRedisValueDictionaryInternal<string, T>(ExecuteScalar(key, (c, k) => c.Value.HGetAllBytes(k)));
/// <summary>
/// 为哈希表 key 中的指定字段的整数值加上增量 increment
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="field">字段</param>
/// <param name="value">增量值(默认=1)</param>
/// <returns></returns>
public long HIncrBy(string key, string field, long value = 1) => ExecuteScalar(key, (c, k) => c.Value.HIncrBy(k, field, value));
/// <summary>
/// 为哈希表 key 中的指定字段的整数值加上增量 increment
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="field">字段</param>
/// <param name="value">增量值(默认=1)</param>
/// <returns></returns>
public decimal HIncrByFloat(string key, string field, decimal value) => ExecuteScalar(key, (c, k) => c.Value.HIncrByFloat(k, field, value));
/// <summary>
/// 获取所有哈希表中的字段
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public string[] HKeys(string key) => ExecuteScalar(key, (c, k) => c.Value.HKeys(k));
/// <summary>
/// 获取哈希表中字段的数量
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public long HLen(string key) => ExecuteScalar(key, (c, k) => c.Value.HLen(k));
/// <summary>
/// 获取存储在哈希表中多个字段的值
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="fields">字段</param>
/// <returns></returns>
public string[] HMGet(string key, params string[] fields) => fields == null || fields.Any() == false ? new string[0] : ExecuteScalar(key, (c, k) => c.Value.HMGet(k, fields));
/// <summary>
/// 获取存储在哈希表中多个字段的值
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="fields">一个或多个字段</param>
/// <returns></returns>
public T[] HMGet<T>(string key, params string[] fields) => fields == null || fields.Any() == false ? new T[0] : this.DeserializeRedisValueArrayInternal<T>(ExecuteScalar(key, (c, k) => c.Value.HMGetBytes(k, fields)));
/// <summary>
/// 同时将多个 field-value (域-值)对设置到哈希表 key 中
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="keyValues">key1 value1 [key2 value2]</param>
/// <returns></returns>
public bool HMSet(string key, params object[] keyValues)
{
if (keyValues == null || keyValues.Any() == false) return false;
if (keyValues.Length % 2 != 0) throw new Exception("keyValues 参数是键值对,不应该出现奇数(数量),请检查使用姿势。");
var parms = new List<object>();
for (var a = 0; a < keyValues.Length; a += 2)
{
var k = string.Concat(keyValues[a]);
var v = keyValues[a + 1];
if (string.IsNullOrEmpty(k)) throw new Exception("keyValues 参数是键值对,并且 key 不可为空");
parms.Add(k);
parms.Add(this.SerializeRedisValueInternal(v));
}
return ExecuteScalar(key, (c, k) => c.Value.HMSet(k, parms.ToArray())) == "OK";
}
/// <summary>
/// 将哈希表 key 中的字段 field 的值设为 value
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="field">字段</param>
/// <param name="value">值</param>
/// <returns>如果字段是哈希表中的一个新建字段,并且值设置成功,返回true。如果哈希表中域字段已经存在且旧值已被新值覆盖,返回false。</returns>
public bool HSet(string key, string field, object value)
{
var args = this.SerializeRedisValueInternal(value);
return ExecuteScalar(key, (c, k) => c.Value.HSet(k, field, args));
}
/// <summary>
/// 只有在字段 field 不存在时,设置哈希表字段的值
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="field">字段</param>
/// <param name="value">值(string 或 byte[])</param>
/// <returns></returns>
public bool HSetNx(string key, string field, object value)
{
var args = this.SerializeRedisValueInternal(value);
return ExecuteScalar(key, (c, k) => c.Value.HSetNx(k, field, args));
}
/// <summary>
/// 获取哈希表中所有值
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public string[] HVals(string key) => ExecuteScalar(key, (c, k) => c.Value.HVals(k));
/// <summary>
/// 获取哈希表中所有值
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public T[] HVals<T>(string key) => this.DeserializeRedisValueArrayInternal<T>(ExecuteScalar(key, (c, k) => c.Value.HValsBytes(k)));
/// <summary>
/// 迭代哈希表中的键值对
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="cursor">位置</param>
/// <param name="pattern">模式</param>
/// <param name="count">数量</param>
/// <returns></returns>
public RedisScan<(string field, string value)> HScan(string key, long cursor, string pattern = null, long? count = null)
{
var scan = ExecuteScalar(key, (c, k) => c.Value.HScan(k, cursor, pattern, count));
return new RedisScan<(string, string)>(scan.Cursor, scan.Items.Select(z => (z.Item1, z.Item2)).ToArray());
}
/// <summary>
/// 迭代哈希表中的键值对
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="cursor">位置</param>
/// <param name="pattern">模式</param>
/// <param name="count">数量</param>
/// <returns></returns>
public RedisScan<(string field, T value)> HScan<T>(string key, long cursor, string pattern = null, long? count = null)
{
var scan = ExecuteScalar(key, (c, k) => c.Value.HScanBytes(k, cursor, pattern, count));
return new RedisScan<(string, T)>(scan.Cursor, scan.Items.Select(z => (z.Item1, this.DeserializeRedisValueInternal<T>(z.Item2))).ToArray());
}
#endregion
#region String
/// <summary>
/// 如果 key 已经存在并且是一个字符串, APPEND 命令将指定的 value 追加到该 key 原来值(value)的末尾
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="value">字符串</param>
/// <returns>追加指定值之后, key 中字符串的长度</returns>
public long Append(string key, object value)
{
var args = this.SerializeRedisValueInternal(value);
return ExecuteScalar(key, (c, k) => c.Value.Append(k, args));
}
/// <summary>
/// 计算给定位置被设置为 1 的比特位的数量
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="start">开始位置</param>
/// <param name="end">结束位置</param>
/// <returns></returns>
public long BitCount(string key, long start, long end) => ExecuteScalar(key, (c, k) => c.Value.BitCount(k, start, end));
/// <summary>
/// 对一个或多个保存二进制位的字符串 key 进行位元操作,并将结果保存到 destkey 上
/// </summary>
/// <param name="op">And | Or | XOr | Not</param>
/// <param name="destKey">不含prefix前辍</param>
/// <param name="keys">不含prefix前辍</param>
/// <returns>保存到 destkey 的长度,和输入 key 中最长的长度相等</returns>
public long BitOp(RedisBitOp op, string destKey, params string[] keys)
{
if (string.IsNullOrEmpty(destKey)) throw new Exception("destKey 不能为空");
if (keys == null || keys.Length == 0) throw new Exception("keys 不能为空");
return NodesNotSupport(new[] { destKey }.Concat(keys).ToArray(), 0, (c, k) => c.Value.BitOp(op, k.First(), k.Skip(1).ToArray()));
}
/// <summary>
/// 对 key 所储存的值,查找范围内第一个被设置为1或者0的bit位
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="bit">查找值</param>
/// <param name="start">开始位置,-1是最后一个,-2是倒数第二个</param>
/// <param name="end">结果位置,-1是最后一个,-2是倒数第二个</param>
/// <returns>返回范围内第一个被设置为1或者0的bit位</returns>
public long BitPos(string key, bool bit, long? start = null, long? end = null) => ExecuteScalar(key, (c, k) => c.Value.BitPos(k, bit, start, end));
/// <summary>
/// 获取指定 key 的值
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public string Get(string key) => ExecuteScalar(key, (c, k) => c.Value.Get(k));
/// <summary>
/// 获取指定 key 的值
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public T Get<T>(string key) => this.DeserializeRedisValueInternal<T>(ExecuteScalar(key, (c, k) => c.Value.GetBytes(k)));
/// <summary>
/// 获取指定 key 的值(适用大对象返回)
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="destination">读取后写入目标流中</param>
/// <param name="bufferSize">读取缓冲区</param>
public void Get(string key, Stream destination, int bufferSize = 1024)
{
ExecuteScalar(key, (c, k) =>
{
c.Value.WriteNoneRead(new Internal.Commands.RedisString("GET", k));
c.Value._reader.ReadBulkBytes(destination, bufferSize, true);
return true;
});
}
/// <summary>
/// 对 key 所储存的值,获取指定偏移量上的位(bit)
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="offset">偏移量</param>
/// <returns></returns>
public bool GetBit(string key, uint offset) => ExecuteScalar(key, (c, k) => c.Value.GetBit(k, offset));
/// <summary>
/// 返回 key 中字符串值的子字符
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="start">开始位置,0表示第一个元素,-1表示最后一个元素</param>
/// <param name="end">结束位置,0表示第一个元素,-1表示最后一个元素</param>
/// <returns></returns>
public string GetRange(string key, long start, long end) => ExecuteScalar(key, (c, k) => c.Value.GetRange(k, start, end));
/// <summary>
/// 返回 key 中字符串值的子字符
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="start">开始位置,0表示第一个元素,-1表示最后一个元素</param>
/// <param name="end">结束位置,0表示第一个元素,-1表示最后一个元素</param>
/// <returns></returns>
public T GetRange<T>(string key, long start, long end) => this.DeserializeRedisValueInternal<T>(ExecuteScalar(key, (c, k) => c.Value.GetRangeBytes(k, start, end)));
/// <summary>
/// 将给定 key 的值设为 value ,并返回 key 的旧值(old value)
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="value">值</param>
/// <returns></returns>
public string GetSet(string key, object value)
{
var args = this.SerializeRedisValueInternal(value);
return ExecuteScalar(key, (c, k) => c.Value.GetSet(k, args));
}
/// <summary>
/// 将给定 key 的值设为 value ,并返回 key 的旧值(old value)
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="key">不含prefix前辍</param>
/// <param name="value">值</param>
/// <returns></returns>
public T GetSet<T>(string key, object value)
{
var args = this.SerializeRedisValueInternal(value);
return this.DeserializeRedisValueInternal<T>(ExecuteScalar(key, (c, k) => c.Value.GetSetBytes(k, args)));
}
/// <summary>
/// 将 key 所储存的值加上给定的增量值(increment)
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="value">增量值(默认=1)</param>
/// <returns></returns>
public long IncrBy(string key, long value = 1) => ExecuteScalar(key, (c, k) => c.Value.IncrBy(k, value));
/// <summary>
/// 将 key 所储存的值加上给定的浮点增量值(increment)
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="value">增量值(默认=1)</param>
/// <returns></returns>
public decimal IncrByFloat(string key, decimal value) => ExecuteScalar(key, (c, k) => c.Value.IncrByFloat(k, value));
/// <summary>
/// 获取多个指定 key 的值(数组)
/// </summary>
/// <param name="keys">不含prefix前辍</param>
/// <returns></returns>
public string[] MGet(params string[] keys) => ExecuteArray(keys, (c, k) => c.Value.MGet(k));
/// <summary>
/// 获取多个指定 key 的值(数组)
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="keys">不含prefix前辍</param>
/// <returns></returns>
public T[] MGet<T>(params string[] keys) => this.DeserializeRedisValueArrayInternal<T>(ExecuteArray(keys, (c, k) => c.Value.MGetBytes(k)));
/// <summary>
/// 同时设置一个或多个 key-value 对
/// </summary>
/// <param name="keyValues">key1 value1 [key2 value2]</param>
/// <returns></returns>
public bool MSet(params object[] keyValues) => MSetInternal(RedisExistence.Xx, keyValues);
/// <summary>
/// 同时设置一个或多个 key-value 对,当且仅当所有给定 key 都不存在
/// </summary>
/// <param name="keyValues">key1 value1 [key2 value2]</param>
/// <returns></returns>
public bool MSetNx(params object[] keyValues) => MSetInternal(RedisExistence.Nx, keyValues);
internal bool MSetInternal(RedisExistence exists, params object[] keyValues)
{
if (keyValues == null || keyValues.Any() == false) return false;
if (keyValues.Length % 2 != 0) throw new Exception("keyValues 参数是键值对,不应该出现奇数(数量),请检查使用姿势。");
var dic = new Dictionary<string, object>();
for (var a = 0; a < keyValues.Length; a += 2)
{
var k = string.Concat(keyValues[a]);
var v = this.SerializeRedisValueInternal(keyValues[a + 1]);
if (string.IsNullOrEmpty(k)) throw new Exception("keyValues 参数是键值对,并且 key 不可为空");
if (dic.ContainsKey(k)) dic[k] = v;
else dic.Add(k, v);
}
Func<Object<RedisClient>, string[], long> handle = (c, k) =>
{
var prefix = (c.Pool as RedisClientPool)?.Prefix;
var parms = new object[k.Length * 2];
for (var a = 0; a < k.Length; a++)
{
parms[a * 2] = k[a];
parms[a * 2 + 1] = dic[string.IsNullOrEmpty(prefix) ? k[a] : k[a].Substring(prefix.Length)];
}
if (exists == RedisExistence.Nx) return c.Value.MSetNx(parms) ? 1 : 0;
return c.Value.MSet(parms) == "OK" ? 1 : 0;
};
if (exists == RedisExistence.Nx) return NodesNotSupport(dic.Keys.ToArray(), 0, handle) > 0;
return ExecuteNonQuery(dic.Keys.ToArray(), handle) > 0;
}
/// <summary>
/// 设置指定 key 的值,所有写入参数object都支持string | byte[] | 数值 | 对象
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="value">值</param>
/// <param name="expireSeconds">过期(秒单位)</param>
/// <param name="exists">Nx, Xx</param>
/// <returns></returns>
public bool Set(string key, object value, int expireSeconds = -1, RedisExistence? exists = null)
{
object redisValule = this.SerializeRedisValueInternal(value);
if (expireSeconds <= 0 && exists == null) return ExecuteScalar(key, (c, k) => c.Value.Set(k, redisValule)) == "OK";
if (expireSeconds <= 0 && exists != null) return ExecuteScalar(key, (c, k) => c.Value.Set(k, redisValule, null, exists)) == "OK";
if (expireSeconds > 0 && exists == null) return ExecuteScalar(key, (c, k) => c.Value.Set(k, redisValule, expireSeconds, null)) == "OK";
if (expireSeconds > 0 && exists != null) return ExecuteScalar(key, (c, k) => c.Value.Set(k, redisValule, expireSeconds, exists)) == "OK";
return false;
}
public bool Set(string key, object value, TimeSpan expire, RedisExistence? exists = null)
{
object redisValule = this.SerializeRedisValueInternal(value);
if (expire <= TimeSpan.Zero && exists == null) return ExecuteScalar(key, (c, k) => c.Value.Set(k, redisValule)) == "OK";
if (expire <= TimeSpan.Zero && exists != null) return ExecuteScalar(key, (c, k) => c.Value.Set(k, redisValule, null, exists)) == "OK";
if (expire > TimeSpan.Zero && exists == null) return ExecuteScalar(key, (c, k) => c.Value.Set(k, redisValule, expire, null)) == "OK";
if (expire > TimeSpan.Zero && exists != null) return ExecuteScalar(key, (c, k) => c.Value.Set(k, redisValule, expire, exists)) == "OK";
return false;
}
/// <summary>
/// 对 key 所储存的字符串值,设置或清除指定偏移量上的位(bit)
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="offset">偏移量</param>
/// <param name="value">值</param>
/// <returns></returns>
public bool SetBit(string key, uint offset, bool value) => ExecuteScalar(key, (c, k) => c.Value.SetBit(k, offset, value));
/// <summary>
/// 只有在 key 不存在时设置 key 的值
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="value">值</param>
/// <returns></returns>
public bool SetNx(string key, object value)
{
var args = this.SerializeRedisValueInternal(value);
return ExecuteScalar(key, (c, k) => c.Value.SetNx(k, args));
}
/// <summary>
/// 用 value 参数覆写给定 key 所储存的字符串值,从偏移量 offset 开始
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="offset">偏移量</param>
/// <param name="value">值</param>
/// <returns>被修改后的字符串长度</returns>
public long SetRange(string key, uint offset, object value)
{
var args = this.SerializeRedisValueInternal(value);
return ExecuteScalar(key, (c, k) => c.Value.SetRange(k, offset, args));
}
/// <summary>
/// 返回 key 所储存的字符串值的长度
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public long StrLen(string key) => ExecuteScalar(key, (c, k) => c.Value.StrLen(k));
#endregion
#region Key
/// <summary>
/// [redis-server 3.2.1] 修改指定key(s) 最后访问时间 若key不存在,不做操作
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public long Touch(params string[] key) => ExecuteNonQuery(key, (c, k) => c.Value.Touch(k));
/// <summary>
/// [redis-server 4.0.0] Delete a key, 该命令和DEL十分相似:删除指定的key(s),若key不存在则该key被跳过。但是,相比DEL会产生阻塞,该命令会在另一个线程中回收内存,因此它是非阻塞的。 这也是该命令名字的由来:仅将keys从keyspace元数据中删除,真正的删除会在后续异步操作。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public long UnLink(params string[] key) => ExecuteNonQuery(key, (c, k) => c.Value.UnLink(k));
/// <summary>
/// 用于在 key 存在时删除 key
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public long Del(params string[] key) => ExecuteNonQuery(key, (c, k) => c.Value.Del(k));
/// <summary>
/// 序列化给定 key ,并返回被序列化的值
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public byte[] Dump(string key) => ExecuteScalar(key, (c, k) => c.Value.Dump(k));
/// <summary>
/// 检查给定 key 是否存在
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public bool Exists(string key) => ExecuteScalar(key, (c, k) => c.Value.Exists(k));
/// <summary>
/// [redis-server 3.0] 检查给定多个 key 是否存在
/// </summary>
/// <param name="keys">不含prefix前辍</param>
/// <returns></returns>
public long Exists(string[] keys) => NodesNotSupport(keys, 0, (c, k) => c.Value.Exists(k));
/// <summary>
/// 为给定 key 设置过期时间
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="seconds">过期秒数</param>
/// <returns></returns>
public bool Expire(string key, int seconds) => ExecuteScalar(key, (c, k) => c.Value.Expire(k, seconds));
/// <summary>
/// 为给定 key 设置过期时间
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="expire">过期时间</param>
/// <returns></returns>
public bool Expire(string key, TimeSpan expire) => ExecuteScalar(key, (c, k) => c.Value.Expire(k, expire));
/// <summary>
/// 为给定 key 设置过期时间
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="expire">过期时间</param>
/// <returns></returns>
public bool ExpireAt(string key, DateTime expire) => ExecuteScalar(key, (c, k) => c.Value.ExpireAt(k, expire));
/// <summary>
/// 查找所有分区节点中符合给定模式(pattern)的 key
/// </summary>
/// <param name="pattern">如:runoob*</param>
/// <returns></returns>
public string[] Keys(string pattern)
{
List<string> ret = new List<string>();
foreach (var pool in Nodes)
ret.AddRange(GetAndExecute(pool.Value, conn => conn.Value.Keys(pattern)));
return ret.ToArray();
}
/// <summary>
/// 将当前数据库的 key 移动到给定的数据库 db 当中
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="database">数据库</param>
/// <returns></returns>
public bool Move(string key, int database) => ExecuteScalar(key, (c, k) => c.Value.Move(k, database));
/// <summary>
/// 该返回给定 key 锁储存的值所使用的内部表示(representation)
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public string ObjectEncoding(string key) => ExecuteScalar(key, (c, k) => c.Value.ObjectEncoding(k));
/// <summary>
/// 该返回给定 key 引用所储存的值的次数。此命令主要用于除错
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public long? ObjectRefCount(string key) => ExecuteScalar(key, (c, k) => c.Value.Object(RedisObjectSubCommand.RefCount, k));
/// <summary>
/// 返回给定 key 自储存以来的空转时间(idle, 没有被读取也没有被写入),以秒为单位
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public long? ObjectIdleTime(string key) => ExecuteScalar(key, (c, k) => c.Value.Object(RedisObjectSubCommand.IdleTime, k));
/// <summary>
/// 移除 key 的过期时间,key 将持久保持
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public bool Persist(string key) => ExecuteScalar(key, (c, k) => c.Value.Persist(k));
/// <summary>
/// 为给定 key 设置过期时间(毫秒)
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="milliseconds">过期毫秒数</param>
/// <returns></returns>
public bool PExpire(string key, int milliseconds) => ExecuteScalar(key, (c, k) => c.Value.PExpire(k, milliseconds));
/// <summary>
/// 为给定 key 设置过期时间(毫秒)
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="expire">过期时间</param>
/// <returns></returns>
public bool PExpire(string key, TimeSpan expire) => ExecuteScalar(key, (c, k) => c.Value.PExpire(k, expire));
/// <summary>
/// 为给定 key 设置过期时间(毫秒)
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="expire">过期时间</param>
/// <returns></returns>
public bool PExpireAt(string key, DateTime expire) => ExecuteScalar(key, (c, k) => c.Value.PExpireAt(k, expire));
/// <summary>
/// 以毫秒为单位返回 key 的剩余的过期时间
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public long PTtl(string key) => ExecuteScalar(key, (c, k) => c.Value.PTtl(k));
/// <summary>
/// 从所有节点中随机返回一个 key
/// </summary>
/// <returns>返回的 key 如果包含 prefix前辍,则会去除后返回</returns>
public string RandomKey() => GetAndExecute(Nodes[NodesIndex[_rnd.Next(0, NodesIndex.Count)]], c =>
{
var rk = c.Value.RandomKey();
var prefix = (c.Pool as RedisClientPool).Prefix;
if (string.IsNullOrEmpty(prefix) == false && rk.StartsWith(prefix)) return rk.Substring(prefix.Length);
return rk;
});
/// <summary>
/// 修改 key 的名称
/// </summary>
/// <param name="key">旧名称,不含prefix前辍</param>
/// <param name="newKey">新名称,不含prefix前辍</param>
/// <returns></returns>
public bool Rename(string key, string newKey)
{
string rule = string.Empty;
if (Nodes.Count > 1)
{
var rule1 = NodeRuleRaw(key);
var rule2 = NodeRuleRaw(newKey);
if (rule1 != rule2)
{
var ret = StartPipe(a => a.Dump(key).Del(key));
int.TryParse(ret[1]?.ToString(), NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat, out var tryint);
if (ret[0] == null || tryint <= 0) return false;
return Restore(newKey, (byte[])ret[0]);
}
rule = rule1;
}
var pool = Nodes.TryGetValue(rule, out var b) ? b : Nodes.First().Value;
var key1 = string.Concat(pool.Prefix, key);
var key2 = string.Concat(pool.Prefix, newKey);
return GetAndExecute(pool, conn => conn.Value.Rename(key1, key2)) == "OK";
}
/// <summary>
/// 修改 key 的名称
/// </summary>
/// <param name="key">旧名称,不含prefix前辍</param>
/// <param name="newKey">新名称,不含prefix前辍</param>
/// <returns></returns>
public bool RenameNx(string key, string newKey) => NodesNotSupport(new[] { key, newKey }, false, (c, k) => c.Value.RenameNx(k.First(), k.Last()));
/// <summary>
/// 反序列化给定的序列化值,并将它和给定的 key 关联
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="serializedValue">序列化值</param>
/// <returns></returns>
public bool Restore(string key, byte[] serializedValue) => ExecuteScalar(key, (c, k) => c.Value.Restore(k, 0, serializedValue)) == "OK";
/// <summary>
/// 反序列化给定的序列化值,并将它和给定的 key 关联
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="ttlMilliseconds">毫秒为单位为 key 设置生存时间</param>
/// <param name="serializedValue">序列化值</param>
/// <returns></returns>
public bool Restore(string key, long ttlMilliseconds, byte[] serializedValue) => ExecuteScalar(key, (c, k) => c.Value.Restore(k, ttlMilliseconds, serializedValue)) == "OK";
/// <summary>
/// 返回给定列表、集合、有序集合 key 中经过排序的元素,参数资料:http://doc.redisfans.com/key/sort.html
/// </summary>
/// <param name="key">列表、集合、有序集合,不含prefix前辍</param>
/// <param name="count">数量</param>
/// <param name="offset">偏移量</param>
/// <param name="by">排序字段</param>
/// <param name="dir">排序方式</param>
/// <param name="isAlpha">对字符串或数字进行排序</param>
/// <param name="get">根据排序的结果来取出相应的键值</param>
/// <returns></returns>
public string[] Sort(string key, long? count = null, long offset = 0, string by = null, RedisSortDir? dir = null, bool? isAlpha = null, params string[] get) =>
NodesNotSupport(key, (c, k) => c.Value.Sort(k, offset, count, by, dir, isAlpha, get));
/// <summary>
/// 保存给定列表、集合、有序集合 key 中经过排序的元素,参数资料:http://doc.redisfans.com/key/sort.html
/// </summary>
/// <param name="key">列表、集合、有序集合,不含prefix前辍</param>
/// <param name="destination">目标key,不含prefix前辍</param>
/// <param name="count">数量</param>
/// <param name="offset">偏移量</param>
/// <param name="by">排序字段</param>
/// <param name="dir">排序方式</param>
/// <param name="isAlpha">对字符串或数字进行排序</param>
/// <param name="get">根据排序的结果来取出相应的键值</param>
/// <returns></returns>
public long SortAndStore(string key, string destination, long? count = null, long offset = 0, string by = null, RedisSortDir? dir = null, bool? isAlpha = null, params string[] get) =>
NodesNotSupport(key, (c, k) => c.Value.SortAndStore(k, (c.Pool as RedisClientPool)?.Prefix + destination, offset, count, by, dir, isAlpha, get));
/// <summary>
/// 以秒为单位,返回给定 key 的剩余生存时间
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public long Ttl(string key) => ExecuteScalar(key, (c, k) => c.Value.Ttl(k));
/// <summary>
/// 返回 key 所储存的值的类型
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public KeyType Type(string key) => Enum.TryParse(ExecuteScalar(key, (c, k) => c.Value.Type(k)), true, out KeyType tryenum) ? tryenum : KeyType.None;
/// <summary>
/// 迭代当前数据库中的数据库键
/// </summary>
/// <param name="cursor">位置</param>
/// <param name="pattern">模式</param>
/// <param name="count">数量</param>
/// <returns></returns>
public RedisScan<string> Scan(long cursor, string pattern = null, long? count = null) => NodesNotSupport("Scan", (c, k) => c.Value.Scan(cursor, pattern, count));
/// <summary>
/// 迭代当前数据库中的数据库键
/// </summary>
/// <typeparam name="T">byte[] 或其他类型</typeparam>
/// <param name="cursor">位置</param>
/// <param name="pattern">模式</param>
/// <param name="count">数量</param>
/// <returns></returns>
public RedisScan<T> Scan<T>(long cursor, string pattern = null, long? count = null)
{
var scan = NodesNotSupport("Scan<T>", (c, k) => c.Value.ScanBytes(cursor, pattern, count));
return new RedisScan<T>(scan.Cursor, this.DeserializeRedisValueArrayInternal<T>(scan.Items));
}
#endregion
#region Geo redis-server 3.2
/// <summary>
/// 将指定的地理空间位置(纬度、经度、成员)添加到指定的key中。这些数据将会存储到sorted set这样的目的是为了方便使用GEORADIUS或者GEORADIUSBYMEMBER命令对数据进行半径查询等操作。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="longitude">经度</param>
/// <param name="latitude">纬度</param>
/// <param name="member">成员</param>
/// <returns>是否成功</returns>
public bool GeoAdd(string key, decimal longitude, decimal latitude, object member) => GeoAdd(key, (longitude, latitude, member)) == 1;
/// <summary>
/// 将指定的地理空间位置(纬度、经度、成员)添加到指定的key中。这些数据将会存储到sorted set这样的目的是为了方便使用GEORADIUS或者GEORADIUSBYMEMBER命令对数据进行半径查询等操作。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="values">批量添加的值</param>
/// <returns>添加到sorted set元素的数目,但不包括已更新score的元素。</returns>
public long GeoAdd(string key, params (decimal longitude, decimal latitude, object member)[] values)
{
if (values == null || values.Any() == false) return 0;
var args = values.Select(z => (z.longitude, z.latitude, this.SerializeRedisValueInternal(z.member))).ToArray();
return ExecuteScalar(key, (c, k) => c.Value.GeoAdd(k, args));
}
/// <summary>
/// 返回两个给定位置之间的距离。如果两个位置之间的其中一个不存在, 那么命令返回空值。GEODIST 命令在计算距离时会假设地球为完美的球形, 在极限情况下, 这一假设最大会造成 0.5% 的误差。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="member1">成员1</param>
/// <param name="member2">成员2</param>
/// <param name="unit">m 表示单位为米;km 表示单位为千米;mi 表示单位为英里;ft 表示单位为英尺;</param>
/// <returns>计算出的距离会以双精度浮点数的形式被返回。 如果给定的位置元素不存在, 那么命令返回空值。</returns>
public decimal? GeoDist(string key, object member1, object member2, GeoUnit unit = GeoUnit.m)
{
var args1 = this.SerializeRedisValueInternal(member1);
var args2 = this.SerializeRedisValueInternal(member2);
return ExecuteScalar(key, (c, k) => c.Value.GeoDist(k, args1, args2, unit));
}
/// <summary>
/// 返回一个或多个位置元素的 Geohash 表示。通常使用表示位置的元素使用不同的技术,使用Geohash位置52点整数编码。由于编码和解码过程中所使用的初始最小和最大坐标不同,编码的编码也不同于标准。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="members">多个查询的成员</param>
/// <returns>一个数组, 数组的每个项都是一个 geohash 。 命令返回的 geohash 的位置与用户给定的位置元素的位置一一对应。</returns>
public string[] GeoHash(string key, object[] members)
{
if (members == null || members.Any() == false) return new string[0];
var args = members.Select(z => this.SerializeRedisValueInternal(z)).ToArray();
return ExecuteScalar(key, (c, k) => c.Value.GeoHash(k, args));
}
/// <summary>
/// 从key里返回所有给定位置元素的位置(经度和纬度)。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="members">多个查询的成员</param>
/// <returns>GEOPOS 命令返回一个数组, 数组中的每个项都由两个元素组成: 第一个元素为给定位置元素的经度, 而第二个元素则为给定位置元素的纬度。当给定的位置元素不存在时, 对应的数组项为空值。</returns>
public (decimal longitude, decimal latitude)?[] GeoPos(string key, object[] members)
{
if (members == null || members.Any() == false) return new (decimal, decimal)?[0];
var args = members.Select(z => this.SerializeRedisValueInternal(z)).ToArray();
return ExecuteScalar(key, (c, k) => c.Value.GeoPos(k, args));
}
/// <summary>
/// 以给定的经纬度为中心, 返回键包含的位置元素当中, 与中心的距离不超过给定最大距离的所有位置元素。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="longitude">经度</param>
/// <param name="latitude">纬度</param>
/// <param name="radius">距离</param>
/// <param name="unit">m 表示单位为米;km 表示单位为千米;mi 表示单位为英里;ft 表示单位为英尺;</param>
/// <param name="count">虽然用户可以使用 COUNT 选项去获取前 N 个匹配元素, 但是因为命令在内部可能会需要对所有被匹配的元素进行处理, 所以在对一个非常大的区域进行搜索时, 即使只使用 COUNT 选项去获取少量元素, 命令的执行速度也可能会非常慢。 但是从另一方面来说, 使用 COUNT 选项去减少需要返回的元素数量, 对于减少带宽来说仍然是非常有用的。</param>
/// <param name="sorting">排序</param>
/// <returns></returns>
public string[] GeoRadius(string key, decimal longitude, decimal latitude, decimal radius, GeoUnit unit = GeoUnit.m, long? count = null, GeoOrderBy? sorting = null) =>
ExecuteScalar(key, (c, k) => c.Value.GeoRadius(k, longitude, latitude, radius, unit, count, sorting, false, false, false)).Select(a => a.member).ToArray();
/// <summary>
/// 以给定的经纬度为中心, 返回键包含的位置元素当中, 与中心的距离不超过给定最大距离的所有位置元素。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="longitude">经度</param>
/// <param name="latitude">纬度</param>
/// <param name="radius">距离</param>
/// <param name="unit">m 表示单位为米;km 表示单位为千米;mi 表示单位为英里;ft 表示单位为英尺;</param>
/// <param name="count">虽然用户可以使用 COUNT 选项去获取前 N 个匹配元素, 但是因为命令在内部可能会需要对所有被匹配的元素进行处理, 所以在对一个非常大的区域进行搜索时, 即使只使用 COUNT 选项去获取少量元素, 命令的执行速度也可能会非常慢。 但是从另一方面来说, 使用 COUNT 选项去减少需要返回的元素数量, 对于减少带宽来说仍然是非常有用的。</param>
/// <param name="sorting">排序</param>
/// <returns></returns>
public T[] GeoRadius<T>(string key, decimal longitude, decimal latitude, decimal radius, GeoUnit unit = GeoUnit.m, long? count = null, GeoOrderBy? sorting = null) =>
ExecuteScalar(key, (c, k) => c.Value.GeoRadiusBytes(k, longitude, latitude, radius, unit, count, sorting, false, false, false)).Select(a => this.DeserializeRedisValueInternal<T>(a.member)).ToArray();
/// <summary>
/// 以给定的经纬度为中心, 返回键包含的位置元素当中, 与中心的距离不超过给定最大距离的所有位置元素(包含距离)。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="longitude">经度</param>
/// <param name="latitude">纬度</param>
/// <param name="radius">距离</param>
/// <param name="unit">m 表示单位为米;km 表示单位为千米;mi 表示单位为英里;ft 表示单位为英尺;</param>
/// <param name="count">虽然用户可以使用 COUNT 选项去获取前 N 个匹配元素, 但是因为命令在内部可能会需要对所有被匹配的元素进行处理, 所以在对一个非常大的区域进行搜索时, 即使只使用 COUNT 选项去获取少量元素, 命令的执行速度也可能会非常慢。 但是从另一方面来说, 使用 COUNT 选项去减少需要返回的元素数量, 对于减少带宽来说仍然是非常有用的。</param>
/// <param name="sorting">排序</param>
/// <returns></returns>
public (string member, decimal dist)[] GeoRadiusWithDist(string key, decimal longitude, decimal latitude, decimal radius, GeoUnit unit = GeoUnit.m, long? count = null, GeoOrderBy? sorting = null) =>
ExecuteScalar(key, (c, k) => c.Value.GeoRadius(k, longitude, latitude, radius, unit, count, sorting, false, true, false)).Select(a => (a.member, a.dist)).ToArray();
/// <summary>
/// 以给定的经纬度为中心, 返回键包含的位置元素当中, 与中心的距离不超过给定最大距离的所有位置元素(包含距离)。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="longitude">经度</param>
/// <param name="latitude">纬度</param>
/// <param name="radius">距离</param>
/// <param name="unit">m 表示单位为米;km 表示单位为千米;mi 表示单位为英里;ft 表示单位为英尺;</param>
/// <param name="count">虽然用户可以使用 COUNT 选项去获取前 N 个匹配元素, 但是因为命令在内部可能会需要对所有被匹配的元素进行处理, 所以在对一个非常大的区域进行搜索时, 即使只使用 COUNT 选项去获取少量元素, 命令的执行速度也可能会非常慢。 但是从另一方面来说, 使用 COUNT 选项去减少需要返回的元素数量, 对于减少带宽来说仍然是非常有用的。</param>
/// <param name="sorting">排序</param>
/// <returns></returns>
public (T member, decimal dist)[] GeoRadiusWithDist<T>(string key, decimal longitude, decimal latitude, decimal radius, GeoUnit unit = GeoUnit.m, long? count = null, GeoOrderBy? sorting = null) =>
ExecuteScalar(key, (c, k) => c.Value.GeoRadiusBytes(k, longitude, latitude, radius, unit, count, sorting, false, true, false)).Select(a => (this.DeserializeRedisValueInternal<T>(a.member), a.dist)).ToArray();
/// <summary>
/// 以给定的经纬度为中心, 返回键包含的位置元素当中, 与中心的距离不超过给定最大距离的所有位置元素(包含经度、纬度)。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="longitude">经度</param>
/// <param name="latitude">纬度</param>
/// <param name="radius">距离</param>
/// <param name="unit">m 表示单位为米;km 表示单位为千米;mi 表示单位为英里;ft 表示单位为英尺;</param>
/// <param name="count">虽然用户可以使用 COUNT 选项去获取前 N 个匹配元素, 但是因为命令在内部可能会需要对所有被匹配的元素进行处理, 所以在对一个非常大的区域进行搜索时, 即使只使用 COUNT 选项去获取少量元素, 命令的执行速度也可能会非常慢。 但是从另一方面来说, 使用 COUNT 选项去减少需要返回的元素数量, 对于减少带宽来说仍然是非常有用的。</param>
/// <param name="sorting">排序</param>
/// <returns></returns>
private (string member, decimal longitude, decimal latitude)[] GeoRadiusWithCoord(string key, decimal longitude, decimal latitude, decimal radius, GeoUnit unit = GeoUnit.m, long? count = null, GeoOrderBy? sorting = null) =>
ExecuteScalar(key, (c, k) => c.Value.GeoRadius(k, longitude, latitude, radius, unit, count, sorting, true, false, false)).Select(a => (a.member, a.longitude, a.latitude)).ToArray();
/// <summary>
/// 以给定的经纬度为中心, 返回键包含的位置元素当中, 与中心的距离不超过给定最大距离的所有位置元素(包含经度、纬度)。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="longitude">经度</param>
/// <param name="latitude">纬度</param>
/// <param name="radius">距离</param>
/// <param name="unit">m 表示单位为米;km 表示单位为千米;mi 表示单位为英里;ft 表示单位为英尺;</param>
/// <param name="count">虽然用户可以使用 COUNT 选项去获取前 N 个匹配元素, 但是因为命令在内部可能会需要对所有被匹配的元素进行处理, 所以在对一个非常大的区域进行搜索时, 即使只使用 COUNT 选项去获取少量元素, 命令的执行速度也可能会非常慢。 但是从另一方面来说, 使用 COUNT 选项去减少需要返回的元素数量, 对于减少带宽来说仍然是非常有用的。</param>
/// <param name="sorting">排序</param>
/// <returns></returns>
private (T member, decimal longitude, decimal latitude)[] GeoRadiusWithCoord<T>(string key, decimal longitude, decimal latitude, decimal radius, GeoUnit unit = GeoUnit.m, long? count = null, GeoOrderBy? sorting = null) =>
ExecuteScalar(key, (c, k) => c.Value.GeoRadiusBytes(k, longitude, latitude, radius, unit, count, sorting, true, false, false)).Select(a => (this.DeserializeRedisValueInternal<T>(a.member), a.longitude, a.latitude)).ToArray();
/// <summary>
/// 以给定的经纬度为中心, 返回键包含的位置元素当中, 与中心的距离不超过给定最大距离的所有位置元素(包含距离、经度、纬度)。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="longitude">经度</param>
/// <param name="latitude">纬度</param>
/// <param name="radius">距离</param>
/// <param name="unit">m 表示单位为米;km 表示单位为千米;mi 表示单位为英里;ft 表示单位为英尺;</param>
/// <param name="count">虽然用户可以使用 COUNT 选项去获取前 N 个匹配元素, 但是因为命令在内部可能会需要对所有被匹配的元素进行处理, 所以在对一个非常大的区域进行搜索时, 即使只使用 COUNT 选项去获取少量元素, 命令的执行速度也可能会非常慢。 但是从另一方面来说, 使用 COUNT 选项去减少需要返回的元素数量, 对于减少带宽来说仍然是非常有用的。</param>
/// <param name="sorting">排序</param>
/// <returns></returns>
public (string member, decimal dist, decimal longitude, decimal latitude)[] GeoRadiusWithDistAndCoord(string key, decimal longitude, decimal latitude, decimal radius, GeoUnit unit = GeoUnit.m, long? count = null, GeoOrderBy? sorting = null) =>
ExecuteScalar(key, (c, k) => c.Value.GeoRadius(k, longitude, latitude, radius, unit, count, sorting, true, true, false)).Select(a => (a.member, a.dist, a.longitude, a.latitude)).ToArray();
/// <summary>
/// 以给定的经纬度为中心, 返回键包含的位置元素当中, 与中心的距离不超过给定最大距离的所有位置元素(包含距离、经度、纬度)。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="longitude">经度</param>
/// <param name="latitude">纬度</param>
/// <param name="radius">距离</param>
/// <param name="unit">m 表示单位为米;km 表示单位为千米;mi 表示单位为英里;ft 表示单位为英尺;</param>
/// <param name="count">虽然用户可以使用 COUNT 选项去获取前 N 个匹配元素, 但是因为命令在内部可能会需要对所有被匹配的元素进行处理, 所以在对一个非常大的区域进行搜索时, 即使只使用 COUNT 选项去获取少量元素, 命令的执行速度也可能会非常慢。 但是从另一方面来说, 使用 COUNT 选项去减少需要返回的元素数量, 对于减少带宽来说仍然是非常有用的。</param>
/// <param name="sorting">排序</param>
/// <returns></returns>
public (T member, decimal dist, decimal longitude, decimal latitude)[] GeoRadiusWithDistAndCoord<T>(string key, decimal longitude, decimal latitude, decimal radius, GeoUnit unit = GeoUnit.m, long? count = null, GeoOrderBy? sorting = null) =>
ExecuteScalar(key, (c, k) => c.Value.GeoRadiusBytes(k, longitude, latitude, radius, unit, count, sorting, true, true, false)).Select(a => (this.DeserializeRedisValueInternal<T>(a.member), a.dist, a.longitude, a.latitude)).ToArray();
/// <summary>
/// 以给定的成员为中心, 返回键包含的位置元素当中, 与中心的距离不超过给定最大距离的所有位置元素。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="member">成员</param>
/// <param name="radius">距离</param>
/// <param name="unit">m 表示单位为米;km 表示单位为千米;mi 表示单位为英里;ft 表示单位为英尺;</param>
/// <param name="count">虽然用户可以使用 COUNT 选项去获取前 N 个匹配元素, 但是因为命令在内部可能会需要对所有被匹配的元素进行处理, 所以在对一个非常大的区域进行搜索时, 即使只使用 COUNT 选项去获取少量元素, 命令的执行速度也可能会非常慢。 但是从另一方面来说, 使用 COUNT 选项去减少需要返回的元素数量, 对于减少带宽来说仍然是非常有用的。</param>
/// <param name="sorting">排序</param>
/// <returns></returns>
public string[] GeoRadiusByMember(string key, object member, decimal radius, GeoUnit unit = GeoUnit.m, long? count = null, GeoOrderBy? sorting = null) =>
ExecuteScalar(key, (c, k) => c.Value.GeoRadiusByMember(k, member, radius, unit, count, sorting, false, false, false)).Select(a => a.member).ToArray();
/// <summary>
/// 以给定的成员为中心, 返回键包含的位置元素当中, 与中心的距离不超过给定最大距离的所有位置元素。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="member">成员</param>
/// <param name="radius">距离</param>
/// <param name="unit">m 表示单位为米;km 表示单位为千米;mi 表示单位为英里;ft 表示单位为英尺;</param>
/// <param name="count">虽然用户可以使用 COUNT 选项去获取前 N 个匹配元素, 但是因为命令在内部可能会需要对所有被匹配的元素进行处理, 所以在对一个非常大的区域进行搜索时, 即使只使用 COUNT 选项去获取少量元素, 命令的执行速度也可能会非常慢。 但是从另一方面来说, 使用 COUNT 选项去减少需要返回的元素数量, 对于减少带宽来说仍然是非常有用的。</param>
/// <param name="sorting">排序</param>
/// <returns></returns>
public T[] GeoRadiusByMember<T>(string key, object member, decimal radius, GeoUnit unit = GeoUnit.m, long? count = null, GeoOrderBy? sorting = null) =>
this.DeserializeRedisValueArrayInternal<T>(ExecuteScalar(key, (c, k) => c.Value.GeoRadiusBytesByMember(k, member, radius, unit, count, sorting, false, false, false)).Select(a => a.member).ToArray());
/// <summary>
/// 以给定的成员为中心, 返回键包含的位置元素当中, 与中心的距离不超过给定最大距离的所有位置元素(包含距离)。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="member">成员</param>
/// <param name="radius">距离</param>
/// <param name="unit">m 表示单位为米;km 表示单位为千米;mi 表示单位为英里;ft 表示单位为英尺;</param>
/// <param name="count">虽然用户可以使用 COUNT 选项去获取前 N 个匹配元素, 但是因为命令在内部可能会需要对所有被匹配的元素进行处理, 所以在对一个非常大的区域进行搜索时, 即使只使用 COUNT 选项去获取少量元素, 命令的执行速度也可能会非常慢。 但是从另一方面来说, 使用 COUNT 选项去减少需要返回的元素数量, 对于减少带宽来说仍然是非常有用的。</param>
/// <param name="sorting">排序</param>
/// <returns></returns>
public (string member, decimal dist)[] GeoRadiusByMemberWithDist(string key, object member, decimal radius, GeoUnit unit = GeoUnit.m, long? count = null, GeoOrderBy? sorting = null) =>
ExecuteScalar(key, (c, k) => c.Value.GeoRadiusByMember(k, member, radius, unit, count, sorting, false, true, false)).Select(a => (a.member, a.dist)).ToArray();
/// <summary>
/// 以给定的成员为中心, 返回键包含的位置元素当中, 与中心的距离不超过给定最大距离的所有位置元素(包含距离)。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="member">成员</param>
/// <param name="radius">距离</param>
/// <param name="unit">m 表示单位为米;km 表示单位为千米;mi 表示单位为英里;ft 表示单位为英尺;</param>
/// <param name="count">虽然用户可以使用 COUNT 选项去获取前 N 个匹配元素, 但是因为命令在内部可能会需要对所有被匹配的元素进行处理, 所以在对一个非常大的区域进行搜索时, 即使只使用 COUNT 选项去获取少量元素, 命令的执行速度也可能会非常慢。 但是从另一方面来说, 使用 COUNT 选项去减少需要返回的元素数量, 对于减少带宽来说仍然是非常有用的。</param>
/// <param name="sorting">排序</param>
/// <returns></returns>
public (T member, decimal dist)[] GeoRadiusByMemberWithDist<T>(string key, object member, decimal radius, GeoUnit unit = GeoUnit.m, long? count = null, GeoOrderBy? sorting = null) =>
ExecuteScalar(key, (c, k) => c.Value.GeoRadiusBytesByMember(k, member, radius, unit, count, sorting, false, true, false)).Select(a => (this.DeserializeRedisValueInternal<T>(a.member), a.dist)).ToArray();
/// <summary>
/// 以给定的成员为中心, 返回键包含的位置元素当中, 与中心的距离不超过给定最大距离的所有位置元素(包含经度、纬度)。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="member">成员</param>
/// <param name="radius">距离</param>
/// <param name="unit">m 表示单位为米;km 表示单位为千米;mi 表示单位为英里;ft 表示单位为英尺;</param>
/// <param name="count">虽然用户可以使用 COUNT 选项去获取前 N 个匹配元素, 但是因为命令在内部可能会需要对所有被匹配的元素进行处理, 所以在对一个非常大的区域进行搜索时, 即使只使用 COUNT 选项去获取少量元素, 命令的执行速度也可能会非常慢。 但是从另一方面来说, 使用 COUNT 选项去减少需要返回的元素数量, 对于减少带宽来说仍然是非常有用的。</param>
/// <param name="sorting">排序</param>
/// <returns></returns>
private (string member, decimal longitude, decimal latitude)[] GeoRadiusByMemberWithCoord(string key, object member, decimal radius, GeoUnit unit = GeoUnit.m, long? count = null, GeoOrderBy? sorting = null) =>
ExecuteScalar(key, (c, k) => c.Value.GeoRadiusByMember(k, member, radius, unit, count, sorting, true, false, false)).Select(a => (a.member, a.longitude, a.latitude)).ToArray();
/// <summary>
/// 以给定的成员为中心, 返回键包含的位置元素当中, 与中心的距离不超过给定最大距离的所有位置元素(包含经度、纬度)。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="member">成员</param>
/// <param name="radius">距离</param>
/// <param name="unit">m 表示单位为米;km 表示单位为千米;mi 表示单位为英里;ft 表示单位为英尺;</param>
/// <param name="count">虽然用户可以使用 COUNT 选项去获取前 N 个匹配元素, 但是因为命令在内部可能会需要对所有被匹配的元素进行处理, 所以在对一个非常大的区域进行搜索时, 即使只使用 COUNT 选项去获取少量元素, 命令的执行速度也可能会非常慢。 但是从另一方面来说, 使用 COUNT 选项去减少需要返回的元素数量, 对于减少带宽来说仍然是非常有用的。</param>
/// <param name="sorting">排序</param>
/// <returns></returns>
private (T member, decimal longitude, decimal latitude)[] GeoRadiusByMemberWithCoord<T>(string key, object member, decimal radius, GeoUnit unit = GeoUnit.m, long? count = null, GeoOrderBy? sorting = null) =>
ExecuteScalar(key, (c, k) => c.Value.GeoRadiusBytesByMember(k, member, radius, unit, count, sorting, true, false, false)).Select(a => (this.DeserializeRedisValueInternal<T>(a.member), a.longitude, a.latitude)).ToArray();
/// <summary>
/// 以给定的成员为中心, 返回键包含的位置元素当中, 与中心的距离不超过给定最大距离的所有位置元素(包含距离、经度、纬度)。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="member">成员</param>
/// <param name="radius">距离</param>
/// <param name="unit">m 表示单位为米;km 表示单位为千米;mi 表示单位为英里;ft 表示单位为英尺;</param>
/// <param name="count">虽然用户可以使用 COUNT 选项去获取前 N 个匹配元素, 但是因为命令在内部可能会需要对所有被匹配的元素进行处理, 所以在对一个非常大的区域进行搜索时, 即使只使用 COUNT 选项去获取少量元素, 命令的执行速度也可能会非常慢。 但是从另一方面来说, 使用 COUNT 选项去减少需要返回的元素数量, 对于减少带宽来说仍然是非常有用的。</param>
/// <param name="sorting">排序</param>
/// <returns></returns>
public (string member, decimal dist, decimal longitude, decimal latitude)[] GeoRadiusByMemberWithDistAndCoord(string key, object member, decimal radius, GeoUnit unit = GeoUnit.m, long? count = null, GeoOrderBy? sorting = null) =>
ExecuteScalar(key, (c, k) => c.Value.GeoRadiusByMember(k, member, radius, unit, count, sorting, true, true, false)).Select(a => (a.member, a.dist, a.longitude, a.latitude)).ToArray();
/// <summary>
/// 以给定的成员为中心, 返回键包含的位置元素当中, 与中心的距离不超过给定最大距离的所有位置元素(包含距离、经度、纬度)。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="member">成员</param>
/// <param name="radius">距离</param>
/// <param name="unit">m 表示单位为米;km 表示单位为千米;mi 表示单位为英里;ft 表示单位为英尺;</param>
/// <param name="count">虽然用户可以使用 COUNT 选项去获取前 N 个匹配元素, 但是因为命令在内部可能会需要对所有被匹配的元素进行处理, 所以在对一个非常大的区域进行搜索时, 即使只使用 COUNT 选项去获取少量元素, 命令的执行速度也可能会非常慢。 但是从另一方面来说, 使用 COUNT 选项去减少需要返回的元素数量, 对于减少带宽来说仍然是非常有用的。</param>
/// <param name="sorting">排序</param>
/// <returns></returns>
public (T member, decimal dist, decimal longitude, decimal latitude)[] GeoRadiusByMemberWithDistAndCoord<T>(string key, object member, decimal radius, GeoUnit unit = GeoUnit.m, long? count = null, GeoOrderBy? sorting = null) =>
ExecuteScalar(key, (c, k) => c.Value.GeoRadiusBytesByMember(k, member, radius, unit, count, sorting, true, true, false)).Select(a => (this.DeserializeRedisValueInternal<T>(a.member), a.dist, a.longitude, a.latitude)).ToArray();
#endregion
#region Stream redis-server 5.0
/// <summary>
/// XACK命令用于从流的消费者组的待处理条目列表(简称PEL)中删除一条或多条消息。 当一条消息交付到某个消费者时,它将被存储在PEL中等待处理, 这通常出现在作为调用XREADGROUP命令的副作用,或者一个消费者通过调用XCLAIM命令接管消息的时候。 待处理消息被交付到某些消费者,但是服务器尚不确定它是否至少被处理了一次。 因此对新调用XREADGROUP来获取消费者的消息历史记录(比如用0作为ID)将返回此类消息。 类似地,待处理的消息将由检查PEL的XPENDING命令列出。
/// <para></para>
/// 一旦消费者成功地处理完一条消息,它应该调用XACK,这样这个消息就不会被再次处理, 且作为一个副作用,关于此消息的PEL条目也会被清除,从Redis服务器释放内存。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="group">组</param>
/// <param name="id">消息id</param>
/// <returns></returns>
public long XAck(string key, string group, string id) => ExecuteScalar(key, (c, k) => c.Value.XAck(k, group, id));
/// <summary>
/// 将指定的流条目追加到指定key的流中。 如果key不存在,作为运行这个命令的副作用,将使用流的条目自动创建key。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="fieldValues">键值对数组</param>
/// <returns></returns>
public string XAdd(string key, params (string, string)[] fieldValues) => XAdd(key, 0, "*", fieldValues);
/// <summary>
/// 将指定的流条目追加到指定key的流中。 如果key不存在,作为运行这个命令的副作用,将使用流的条目自动创建key。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="id">消息id,如果指定的id参数是字符*(星号ASCII字符),XADD命令会自动为您生成一个唯一的ID。 但是,也可以指定一个良好格式的ID,以便新的条目以指定的ID准确存储</param>
/// <param name="fieldValues">键值对数组</param>
/// <returns></returns>
public string XAdd(string key, string id = "*", params (string, string)[] fieldValues) => XAdd(key, 0, id, fieldValues);
/// <summary>
/// 将指定的流条目追加到指定key的流中。 如果key不存在,作为运行这个命令的副作用,将使用流的条目自动创建key。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="maxLen">上限流,当小于0时~</param>
/// <param name="id">消息id,如果指定的id参数是字符*(星号ASCII字符),XADD命令会自动为您生成一个唯一的ID。 但是,也可以指定一个良好格式的ID,以便新的条目以指定的ID准确存储</param>
/// <param name="fieldValues">键值对数组</param>
/// <returns></returns>
public string XAdd(string key, long maxLen, string id = "*", params (string, string)[] fieldValues) => ExecuteScalar(key, (c, k) => c.Value.XAdd(k, maxLen, id, fieldValues));
/// <summary>
/// 在流的消费者组上下文中,此命令改变待处理消息的所有权
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="group">组</param>
/// <param name="consumer">消费者</param>
/// <param name="minIdleTime">耗秒</param>
/// <param name="id">消息id</param>
/// <returns></returns>
public (string id, string[] items)[] XClaim(string key, string group, string consumer, long minIdleTime, params string[] id) =>
ExecuteScalar(key, (c, k) => c.Value.XClaim(k, group, consumer, minIdleTime, id));
/// <summary>
/// 在流的消费者组上下文中,此命令改变待处理消息的所有权
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="group">组</param>
/// <param name="consumer">消费者</param>
/// <param name="minIdleTime">耗秒</param>
/// <param name="id">消息id</param>
/// <param name="idle">耗秒, 设置消息的空闲时间(自最后一次交付到目前的时间)。如果没有指定IDLE,则假设IDLE值为0,即时间计数被重置,因为消息现在有新的所有者来尝试处理它。</param>
/// <param name="retryCount">将重试计数器设置为指定的值。这个计数器在每一次消息被交付的时候递增。</param>
/// <param name="force">在待处理条目列表(PEL)中创建待处理消息条目,即使某些指定的ID尚未在分配给不同客户端的待处理条目列表(PEL)中。但是消息必须存在于流中,否则不存在的消息ID将会被忽略。</param>
/// <returns></returns>
public (string id, string[] items)[] XClaim(string key, string group, string consumer, long minIdleTime, string[] id, long idle, long retryCount, bool force) =>
ExecuteScalar(key, (c, k) => c.Value.XClaim(k, group, consumer, minIdleTime, id, idle, retryCount, force));
/// <summary>
/// 在流的消费者组上下文中,此命令改变待处理消息的所有权
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="group">组</param>
/// <param name="consumer">消费者</param>
/// <param name="minIdleTime">耗秒</param>
/// <param name="id">消息id</param>
/// <returns>只返回消息id</returns>
public string[] XClaimJustId(string key, string group, string consumer, long minIdleTime, params string[] id) =>
ExecuteScalar(key, (c, k) => c.Value.XClaimJustId(k, group, consumer, minIdleTime, id));
/// <summary>
/// 在流的消费者组上下文中,此命令改变待处理消息的所有权
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="group">组</param>
/// <param name="consumer">消费者</param>
/// <param name="minIdleTime">耗秒</param>
/// <param name="id">消息id</param>
/// <param name="idle">耗秒, 设置消息的空闲时间(自最后一次交付到目前的时间)。如果没有指定IDLE,则假设IDLE值为0,即时间计数被重置,因为消息现在有新的所有者来尝试处理它。</param>
/// <param name="retryCount">将重试计数器设置为指定的值。这个计数器在每一次消息被交付的时候递增。</param>
/// <param name="force">在待处理条目列表(PEL)中创建待处理消息条目,即使某些指定的ID尚未在分配给不同客户端的待处理条目列表(PEL)中。但是消息必须存在于流中,否则不存在的消息ID将会被忽略。</param>
/// <returns>只返回消息id</returns>
public string[] XClaimJustId(string key, string group, string consumer, long minIdleTime, string[] id, long idle, long retryCount, bool force) =>
ExecuteScalar(key, (c, k) => c.Value.XClaimJustId(k, group, consumer, minIdleTime, id, idle, retryCount, force));
/// <summary>
/// 从指定流中移除指定的条目,并返回成功删除的条目的数量,在传递的ID不存在的情况下, 返回的数量可能与传递的ID数量不同。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="id">消息id</param>
/// <returns></returns>
public long XDel(string key, params string[] id) => ExecuteScalar(key, (c, k) => c.Value.XDel(k, id));
/// <summary>
/// 创建一个新的消费者组
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="group">组名</param>
/// <param name="id">特殊的ID ‘$’(这表示:流中最后一项的ID)。在这种情况下,从该消费者组获取数据的消费者只能看到到达流的新元素。但如果你希望消费者组获取整个流的历史记录,使用0作为消费者组的开始ID。</param>
/// <param name="MkStream">create the empty stream if it does not exist.</param>
/// <returns>如果指定的消费者组已经存在,则该命令将返回-BUSYGROUP错误。</returns>
public string XGroupCreate(string key, string group, string id = "$", bool MkStream = false) => ExecuteScalar(key, (c, k) => c.Value.XGroupCreate(k, group, id, MkStream));
/// <summary>
/// 设置要传递的下一条消息。 通常情况下,在消费者创建时设置下一个ID,作为XGROUP CREATE的最后一个参数。 但是使用这种形式,可以在以后修改下一个ID,而无需再次删除和创建使用者组。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="group">组名</param>
/// <param name="id">特殊的ID ‘$’(这表示:流中最后一项的ID)。在这种情况下,从该消费者组获取数据的消费者只能看到到达流的新元素。但如果你希望消费者组获取整个流的历史记录,使用0作为消费者组的开始ID。</param>
/// <returns></returns>
public string XGroupSetId(string key, string group, string id = "$") => ExecuteScalar(key, (c, k) => c.Value.XGroupSetId(k, group, id));
/// <summary>
/// 销毁消费者组,即使存在活动的消费者和待处理消息,消费者组也将被销毁,因此请确保仅在真正需要时才调用此命令。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="group">组名</param>
/// <returns></returns>
public bool XGroupDestroy(string key, string group) => ExecuteScalar(key, (c, k) => c.Value.XGroupDestroy(k, group));
/// <summary>
/// 仅从消费者组中移除给定的消费者
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="group">组名</param>
/// <param name="consumer">消费者</param>
/// <returns></returns>
public bool XGroupDelConsumer(string key, string group, string consumer) => ExecuteScalar(key, (c, k) => c.Value.XGroupDelConsumer(k, group, consumer));
/// <summary>
/// 返回有关存储在特定键的流的一般信息
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public (long length, long radixTreeKeys, long radixTreeNodes, long groups, string lastGeneratedId, (string id, string[] items) firstEntry, (string id, string[] items) lastEntry) XInfoStream(string key) =>
ExecuteScalar(key, (c, k) => c.Value.XInfoStream(k));
/// <summary>
/// 获得与流关联的所有消费者组数据,该命令显示该组中已知的消费者数量,以及该组中的待处理消息(已传递但尚未确认)数量
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public (string name, long consumers, long pending, string lastDeliveredId)[] XInfoGroups(string key) =>
ExecuteScalar(key, (c, k) => c.Value.XInfoGroups(k));
/// <summary>
/// 取得指定消费者组中的消费者列表,返回每个消息者的空闲毫秒时间(最后一个字段)以及消费者名称和待处理消息数量
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="group"></param>
/// <returns></returns>
public (string name, long pending, long idle)[] XInfoConsumers(string key, string group) =>
ExecuteScalar(key, (c, k) => c.Value.XInfoConsumers(k, group));
/// <summary>
/// 返回流中的条目数。如果指定的key不存在,则此命令返回0,就好像该流为空。 但是请注意,与其他的Redis类型不同,零长度流是可能的,所以你应该调用TYPE 或者 EXISTS 来检查一个key是否存在。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <returns></returns>
public long XLen(string key) => ExecuteScalar(key, (c, k) => c.Value.XLen(k));
/// <summary>
/// XPENDING命令是检查待处理消息列表的接口,因此它是一个非常重要的命令,用于观察和了解消费者组正在发生的事情:哪些客户端是活跃的,哪些消息在等待消费,或者查看是否有空闲的消息。此外,该命令与XCLAIM一起使用,用于实现长时间故障的消费者的恢复,因此不处理某些消息:不同的消费者可以认领该消息并继续处理。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="group"></param>
/// <returns></returns>
public (long count, string minId, string maxId, (string consumer, long count)[] pendings) XPending(string key, string group) =>
ExecuteScalar(key, (c, k) => c.Value.XPending(k, group));
/// <summary>
/// XPENDING命令是检查待处理消息列表的接口,因此它是一个非常重要的命令,用于观察和了解消费者组正在发生的事情:哪些客户端是活跃的,哪些消息在等待消费,或者查看是否有空闲的消息。此外,该命令与XCLAIM一起使用,用于实现长时间故障的消费者的恢复,因此不处理某些消息:不同的消费者可以认领该消息并继续处理。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="group"></param>
/// <param name="start"></param>
/// <param name="end"></param>
/// <param name="count"></param>
/// <param name="consumer"></param>
/// <returns></returns>
public (string id, string consumer, long idle, long transferTimes)[] XPending(string key, string group, string start, string end, long count, string consumer = null) =>
ExecuteScalar(key, (c, k) => c.Value.XPending(k, group, start, end, count, consumer = null));
/// <summary>
/// 返回流中满足给定ID范围的条目。范围由最小和最大ID指定。所有ID在指定的两个ID之间或与其中一个ID相等(闭合区间)的条目将会被返回。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="start">开始id,特殊:- 和 +</param>
/// <param name="end">结束id,特殊:- 和 +</param>
/// <param name="count">数量</param>
/// <returns></returns>
public (string id, string[] items)[] XRange(string key, string start, string end, long count = 1) =>
ExecuteScalar(key, (c, k) => c.Value.XRange(k, start, end, count));
/// <summary>
/// 与XRANGE完全相同,但显著的区别是以相反的顺序返回条目,并以相反的顺序获取开始-结束参数:在XREVRANGE中,你需要先指定结束ID,再指定开始ID,该命令就会从结束ID侧开始生成两个ID之间(或完全相同)的所有元素。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="end">结束id,特殊:- 和 +</param>
/// <param name="start">开始id,特殊:- 和 +</param>
/// <param name="count">数量</param>
/// <returns></returns>
public (string id, string[] items)[] XRevRange(string key, string end, string start, long count = 1) =>
ExecuteScalar(key, (c, k) => c.Value.XRevRange(k, end, start, count));
/// <summary>
/// 从一个或者多个流中读取数据,仅返回ID大于调用者报告的最后接收ID的条目。此命令有一个阻塞选项,用于等待可用的项目,类似于BRPOP或者BZPOPMIN等等。
/// </summary>
/// <param name="count">数量</param>
/// <param name="block">阻塞选项,毫秒</param>
/// <param name="streams">(key,id) 数组</param>
/// <returns></returns>
public (string key, (string id, string[] items)[] data)[] XRead(long count, long block, params (string key, string id)[] streams) =>
NodesNotSupport(streams.Select(a => a.key).ToArray(), null, (c, k) => c.Value.XRead(count, block, streams.Select((a, i) => (k[i], a.id)).ToArray()));
/// <summary>
/// XREADGROUP命令是XREAD命令的特殊版本,支持消费者组。
/// </summary>
/// <param name="group">组</param>
/// <param name="consumer">消费者</param>
/// <param name="count">数量</param>
/// <param name="block">阻塞选项,毫秒</param>
/// <param name="streams">(key,id) 数组</param>
/// <returns></returns>
public (string key, (string id, string[] items)[] data)[] XReadGroup(string group, string consumer, long count, long block, params (string key, string id)[] streams) =>
NodesNotSupport(streams.Select(a => a.key).ToArray(), null, (c, k) => c.Value.XReadGroup(group, consumer, count, block, streams.Select((a, i) => (k[i], a.id)).ToArray()));
/// <summary>
/// XTRIM将流裁剪为指定数量的项目,如有需要,将驱逐旧的项目(ID较小的项目)。此命令被设想为接受多种修整策略,但目前只实现了一种,即MAXLEN,并且与XADD中的MAXLEN选项完全相同。
/// </summary>
/// <param name="key">不含prefix前辍</param>
/// <param name="maxLen">上限流,当小于0时~</param>
/// <returns></returns>
public long XTrim(string key, long maxLen) => ExecuteScalar(key, (c, k) => c.Value.XTrim(k, maxLen));
#endregion
#region Bloom Filter 4.0
public bool BfReserve(string key, decimal errorRate, long capacity, int expansion = 2, bool nonScaling = false) =>
ExecuteScalar(key, (c, k) => c.Value.Write(RedisCommands.BfReserve(k, errorRate, capacity, expansion, nonScaling))) == "OK";
public bool BfAdd(string key, object item) =>
ExecuteScalar(key, (c, k) => c.Value.Write(RedisCommands.BfAdd(k, this.SerializeRedisValueInternal(item))));
public bool[] BfMAdd(string key, object[] items) =>
ExecuteScalar(key, (c, k) => c.Value.Write(RedisCommands.BfMAdd(k, items.Select(item => this.SerializeRedisValueInternal(item)).ToArray())));
public bool[] BfInsert(string key, object[] items, long? capacity = null, string error = null, int expansion = 2, bool noCreate = false, bool nonScaling = false) =>
ExecuteScalar(key, (c, k) => c.Value.Write(RedisCommands.BfInsert(k, items.Select(item => this.SerializeRedisValueInternal(item)).ToArray(), capacity, error, expansion, noCreate, nonScaling)));
public bool BfExists(string key, object item) =>
ExecuteScalar(key, (c, k) => c.Value.Write(RedisCommands.BfExists(k, this.SerializeRedisValueInternal(item))));
public bool[] BfMExists(string key, object[] items) =>
ExecuteScalar(key, (c, k) => c.Value.Write(RedisCommands.BfMExists(k, items.Select(item => this.SerializeRedisValueInternal(item)).ToArray())));
public RedisScan<byte[]> BfScanDump<T>(string key, long iter) =>
ExecuteScalar(key, (c, k) => c.Value.Write(RedisCommands.BfScanDump(k, iter)));
public bool BfLoadChunk(string key, long iter, byte[] data) =>
ExecuteScalar(key, (c, k) => c.Value.Write(RedisCommands.BfLoadChunk(k, iter, data))) == "OK";
public (long capacity, long size, long numberOfFilters, long numberOfItemsInserted, long expansionRate) BfInfo(string key) =>
ExecuteScalar(key, (c, k) => c.Value.Write(RedisCommands.BfInfo(k)));
#endregion
#region RedisBloom Cuckoo Filter 4.0
public bool CfReserve(string key, long capacity, long? bucketSize = null, long? maxIterations = null, int? expansion = null) =>
ExecuteScalar(key, (c, k) => c.Value.Write(RedisCommands.CfReserve(k, capacity, bucketSize, maxIterations, expansion))) == "OK";
public bool CfAdd(string key, object item) =>
ExecuteScalar(key, (c, k) => c.Value.Write(RedisCommands.CfAdd(false, k, this.SerializeRedisValueInternal(item))));
public bool CfAddNx(string key, object item) =>
ExecuteScalar(key, (c, k) => c.Value.Write(RedisCommands.CfAdd(true, k, this.SerializeRedisValueInternal(item))));
public bool[] CfInsert(string key, object[] items, long? capacity = null, bool noCreate = false) =>
ExecuteScalar(key, (c, k) => c.Value.Write(RedisCommands.CfInsert(false, k, items.Select(item => this.SerializeRedisValueInternal(item)).ToArray(), capacity, noCreate)));
public bool[] CfInsertNx(string key, object[] items, long? capacity = null, bool noCreate = false) =>
ExecuteScalar(key, (c, k) => c.Value.Write(RedisCommands.CfInsert(true, k, items.Select(item => this.SerializeRedisValueInternal(item)).ToArray(), capacity, noCreate)));
public bool CfExists(string key, object item) =>
ExecuteScalar(key, (c, k) => c.Value.Write(RedisCommands.CfExists(k, this.SerializeRedisValueInternal(item))));
public bool CfDel(string key, object item) =>
ExecuteScalar(key, (c, k) => c.Value.Write(RedisCommands.CfDel(k, this.SerializeRedisValueInternal(item))));
public long CfCount(string key, object item) =>
ExecuteScalar(key, (c, k) => c.Value.Write(RedisCommands.CfCount(k, this.SerializeRedisValueInternal(item))));
public RedisScan<byte[]> CfScanDump<T>(string key, long iter) =>
ExecuteScalar(key, (c, k) => c.Value.Write(RedisCommands.CfScanDump(k, iter)));
public bool CfLoadChunk(string key, long iter, byte[] data) =>
ExecuteScalar(key, (c, k) => c.Value.Write(RedisCommands.CfLoadChunk(k, iter, data))) == "OK";
public (long size, long numberOfBuckets, long numberOfFilter, long numberOfItemsInserted, long numberOfItemsDeleted, long bucketSize, long expansionRate, long maxIteration) CfInfo(string key) =>
ExecuteScalar(key, (c, k) => c.Value.Write(RedisCommands.CfInfo(k)));
#endregion
#region RedisBloom Count-Min Sketch 4.0
public bool CmsInitByDim(string key, long width, long depth) =>
ExecuteScalar(key, (c, k) => c.Value.Write(RedisCommands.CmsInitByDim(k, width, depth))) == "OK";
public bool CmsInitByProb(string key, decimal error, decimal probability) =>
ExecuteScalar(key, (c, k) => c.Value.Write(RedisCommands.CmsInitByProb(k, error, probability))) == "OK";
public long[] CmsIncrBy(string key, params (object item, long increment)[] items) =>
ExecuteScalar(key, (c, k) => c.Value.Write(RedisCommands.CmsIncrBy(k, items.Select(item => (this.SerializeRedisValueInternal(item.item), item.increment)).ToArray())));
public long[] CmsQuery(string key, params object[] items) =>
ExecuteScalar(key, (c, k) => c.Value.Write(RedisCommands.CmsQuery(k, items.Select(item => this.SerializeRedisValueInternal(item)).ToArray())));
public bool CmsMerge(string dest, long numKeys, string[] src, long[] weights) =>
NodesNotSupport(new[] { dest }.Concat(src).ToArray(), null, (c, k) => c.Value.Write(RedisCommands.CmsMerge(k.FirstOrDefault(), numKeys, k.Where((_, idx) => idx > 0).ToArray(), weights))) == "OK";
public (long width, long depth, long count) CmsInfo(string key) =>
ExecuteScalar(key, (c, k) => c.Value.Write(RedisCommands.CmsInfo(k)));
#endregion
#region RedisBloom TopK Filter 4.0
public bool TopkReserve(string key, long topk, long width, long depth, decimal decay) =>
ExecuteScalar(key, (c, k) => c.Value.Write(RedisCommands.TopkReserve(k, topk, width, depth, decay))) == "OK";
public string[] TopkAdd(string key, object[] items) =>
ExecuteScalar(key, (c, k) => c.Value.Write(RedisCommands.TopkAdd(k, items.Select(item => this.SerializeRedisValueInternal(item)).ToArray())));
public string[] TopkIncrBy(string key, params (object item, long increment)[] items) =>
ExecuteScalar(key, (c, k) => c.Value.Write(RedisCommands.TopkIncrBy(k, items.Select(item => (this.SerializeRedisValueInternal(item.item), item.increment)).ToArray())));
public bool[] TopkQuery(string key, object[] items) =>
ExecuteScalar(key, (c, k) => c.Value.Write(RedisCommands.TopkQuery(k, items.Select(item => this.SerializeRedisValueInternal(item)).ToArray())));
public long[] TopkCount(string key, object[] items) =>
ExecuteScalar(key, (c, k) => c.Value.Write(RedisCommands.TopkCount(k, items.Select(item => this.SerializeRedisValueInternal(item)).ToArray())));
public string[] TopkList(string key) =>
ExecuteScalar(key, (c, k) => c.Value.Write(RedisCommands.TopkList(k)));
public (long k, long width, long depth, decimal decay) TopkInfo(string key) =>
ExecuteScalar(key, (c, k) => c.Value.Write(RedisCommands.TopkInfo(k)));
#endregion
/// <summary>
/// 开启分布式锁,若超时返回null
/// </summary>
/// <param name="name">锁名称</param>
/// <param name="timeoutSeconds">超时(秒)</param>
/// <param name="autoDelay">自动延长锁超时时间,看门狗线程的超时时间为timeoutSeconds/2 , 在看门狗线程超时时间时自动延长锁的时间为timeoutSeconds。除非程序意外退出,否则永不超时。</param>
/// <returns></returns>
public CSRedisClientLock Lock(string name, int timeoutSeconds, bool autoDelay = true)
{
name = $"CSRedisClientLock:{name}";
var startTime = DateTime.Now;
while (DateTime.Now.Subtract(startTime).TotalSeconds < timeoutSeconds)
{
var value = Guid.NewGuid().ToString();
if (this.Set(name, value, timeoutSeconds, RedisExistence.Nx) == true)
{
double refreshSeconds = (double)timeoutSeconds / 2.0;
return new CSRedisClientLock(this, name, value, timeoutSeconds, refreshSeconds, autoDelay);
}
Thread.CurrentThread.Join(3);
}
return null;
}
/// <summary>
/// 尝试开启分布式锁,若失败立刻返回null
/// </summary>
/// <param name="name">锁名称</param>
/// <param name="timeoutSeconds">超时(秒)</param>
/// <param name="autoDelay">自动延长锁超时时间,看门狗线程的超时时间为timeoutSeconds/2 , 在看门狗线程超时时间时自动延长锁的时间为timeoutSeconds。除非程序意外退出,否则永不超时。</param>
/// <returns></returns>
public CSRedisClientLock TryLock(string name, int timeoutSeconds, bool autoDelay = true)
{
name = $"CSRedisClientLock:{name}";
var value = Guid.NewGuid().ToString();
if (this.Set(name, value, timeoutSeconds, RedisExistence.Nx) == true)
{
double refreshSeconds = (double)timeoutSeconds / 2.0;
return new CSRedisClientLock(this, name, value, timeoutSeconds, refreshSeconds, autoDelay);
}
return null;
}
}
public class CSRedisClientLock : IDisposable
{
CSRedisClient _client;
string _name;
string _value;
int _timeoutSeconds;
Timer _autoDelayTimer;
CancellationTokenSource _handleLostTokenSource;
public CSRedisClientLock(CSRedisClient rds, string name, string value, int timeoutSeconds, double refreshSeconds, bool autoDelay)
{
_client = rds;
_name = name;
_value = value;
_timeoutSeconds = timeoutSeconds;
if (autoDelay)
{
_handleLostTokenSource = new CancellationTokenSource();
HandleLostToken = _handleLostTokenSource.Token;
var refreshMilli = (int)(refreshSeconds * 1000);
var timeoutMilli = timeoutSeconds * 1000;
_autoDelayTimer = new Timer(state2 => Refresh(timeoutMilli), null, refreshMilli, refreshMilli);
}
}
/// <summary>
/// 当刷新锁时间的看门狗线程失去与Redis连接时,导致无法刷新延长锁时间时,触发此HandelLostToken Cancel
/// </summary>
public CancellationToken? HandleLostToken { get; }
/// <summary>
/// 延长锁时间,锁在占用期内操作时返回true,若因锁超时被其他使用者占用则返回false
/// </summary>
/// <param name="milliseconds">延长的毫秒数</param>
/// <returns>成功/失败</returns>
public bool Delay(int milliseconds)
{
var ret = _client.Eval(@"local gva = redis.call('GET', KEYS[1])
if gva == ARGV[1] then
local ttlva = redis.call('PTTL', KEYS[1])
redis.call('PEXPIRE', KEYS[1], ARGV[2] + ttlva)
return 1
end
return 0", _name, _value, milliseconds)?.ToString() == "1";
if (ret == false) _autoDelayTimer?.Dispose(); //未知情况,关闭定时器
return ret;
}
/// <summary>
/// 刷新锁时间,把key的ttl重新设置为milliseconds,锁在占用期内操作时返回true,若因锁超时被其他使用者占用则返回false
/// </summary>
/// <param name="milliseconds">刷新的毫秒数</param>
/// <returns>成功/失败</returns>
public bool Refresh(int milliseconds)
{
try
{
var ret = _client.Eval(@"local gva = redis.call('GET', KEYS[1])
if gva == ARGV[1] then
redis.call('PEXPIRE', KEYS[1], ARGV[2])
return 1
end
return 0", _name, _value, milliseconds)?.ToString() == "1";
if (ret == false)
{
_handleLostTokenSource?.Cancel();
_autoDelayTimer?.Dispose(); //未知情况,关闭定时器
}
return ret;
}
catch
{
_handleLostTokenSource?.Cancel();
_autoDelayTimer?.Dispose(); //未知情况,关闭定时器
return false;//这里必须要吞掉异常,否则会导致整个程序崩溃,因为Timer的异常没有地方去处理
}
}
/// <summary>
/// 释放分布式锁
/// </summary>
/// <returns>成功/失败</returns>
public bool Unlock()
{
_handleLostTokenSource?.Dispose();
_autoDelayTimer?.Dispose();
return _client.Eval(@"local gva = redis.call('GET', KEYS[1])
if gva == ARGV[1] then
redis.call('DEL', KEYS[1])
return 1
end
return 0", _name, _value)?.ToString() == "1";
}
public void Dispose() => this.Unlock();
}
public enum KeyType { None, String, List, Set, ZSet, Hash, Stream }
public enum InfoSection { Server, Clients, Memory, Persistence, Stats, Replication, CPU, CommandStats, Cluster, Keyspace }
public enum ClientKillType { normal, slave, pubsub }
}
|
2877025939/PlanADScrollView | 1,232 | PlanADScrollView/PlanADScrollViewUITests/PlanADScrollViewUITests.m | //
// PlanADScrollViewUITests.m
// PlanADScrollViewUITests
//
// Created by anan on 2017/10/18.
// Copyright © 2017年 Plan. All rights reserved.
//
#import <XCTest/XCTest.h>
@interface PlanADScrollViewUITests : XCTestCase
@end
@implementation PlanADScrollViewUITests
- (void)setUp {
[super setUp];
// Put setup code here. This method is called before the invocation of each test method in the class.
// In UI tests it is usually best to stop immediately when a failure occurs.
self.continueAfterFailure = NO;
// UI tests must launch the application that they test. Doing this in setup will make sure it happens for each test method.
[[[XCUIApplication alloc] init] launch];
// In UI tests it’s important to set the initial state - such as interface orientation - required for your tests before they run. The setUp method is a good place to do this.
}
- (void)tearDown {
// Put teardown code here. This method is called after the invocation of each test method in the class.
[super tearDown];
}
- (void)testExample {
// Use recording to get started writing UI tests.
// Use XCTAssert and related functions to verify your tests produce the correct results.
}
@end
|
27182812/ChatGLM-LLaMA-chinese-insturct | 65,796 | src/transformers/models/m2m_100/modeling_m2m_100.py | # coding=utf-8
# Copyright 2021 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch M2M100 model."""
import math
import random
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ...activations import ACT2FN
from ...deepspeed import is_deepspeed_zero3_enabled
from ...modeling_outputs import (
BaseModelOutput,
BaseModelOutputWithPastAndCrossAttentions,
Seq2SeqLMOutput,
Seq2SeqModelOutput,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from .configuration_m2m_100 import M2M100Config
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "M2M100Config"
_CHECKPOINT_FOR_DOC = "facebook/m2m100_418M"
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST = [
"facebook/m2m100_418M",
# See all M2M100 models at https://huggingface.co/models?filter=m2m_100
]
# Copied from transformers.models.bart.modeling_bart.shift_tokens_right
def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):
"""
Shift input ids one token to the right.
"""
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()
shifted_input_ids[:, 0] = decoder_start_token_id
if pad_token_id is None:
raise ValueError("self.model.config.pad_token_id has to be defined.")
# replace possible -100 values in labels by `pad_token_id`
shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
return shifted_input_ids
# Copied from transformers.models.bart.modeling_bart._make_causal_mask
def _make_causal_mask(input_ids_shape: torch.Size, dtype: torch.dtype, past_key_values_length: int = 0):
"""
Make causal mask used for bi-directional self-attention.
"""
bsz, tgt_len = input_ids_shape
mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min))
mask_cond = torch.arange(mask.size(-1))
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
mask = mask.to(dtype)
if past_key_values_length > 0:
mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype), mask], dim=-1)
return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
# Copied from transformers.models.bart.modeling_bart._expand_mask
def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
"""
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
"""
bsz, src_len = mask.size()
tgt_len = tgt_len if tgt_len is not None else src_len
expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
inverted_mask = 1.0 - expanded_mask
return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
"""
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
are ignored. This is modified from fairseq's `utils.make_positions`.
"""
# The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
mask = input_ids.ne(padding_idx).int()
incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
return incremental_indices.long() + padding_idx
class M2M100SinusoidalPositionalEmbedding(nn.Module):
"""This module produces sinusoidal positional embeddings of any length."""
def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None):
super().__init__()
self.offset = 2
self.embedding_dim = embedding_dim
self.padding_idx = padding_idx
self.make_weights(num_positions + self.offset, embedding_dim, padding_idx)
def make_weights(self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None):
emb_weights = self.get_embedding(num_embeddings, embedding_dim, padding_idx)
if hasattr(self, "weights"):
# in forward put the weights on the correct dtype and device of the param
emb_weights = emb_weights.to(dtype=self.weights.dtype, device=self.weights.device)
self.register_buffer("weights", emb_weights)
@staticmethod
def get_embedding(num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None):
"""
Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of
"Attention Is All You Need".
"""
half_dim = embedding_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb)
emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(1) * emb.unsqueeze(0)
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1)
if embedding_dim % 2 == 1:
# zero pad
emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
if padding_idx is not None:
emb[padding_idx, :] = 0
return emb.to(torch.get_default_dtype())
@torch.no_grad()
def forward(
self, input_ids: torch.Tensor = None, inputs_embeds: torch.Tensor = None, past_key_values_length: int = 0
):
if input_ids is not None:
bsz, seq_len = input_ids.size()
# Create the position ids from the input token ids. Any padded tokens remain padded.
position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length).to(
input_ids.device
)
else:
bsz, seq_len = inputs_embeds.size()[:-1]
position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds, past_key_values_length)
# expand embeddings if needed
max_pos = self.padding_idx + 1 + seq_len + past_key_values_length
if max_pos > self.weights.size(0):
self.make_weights(max_pos + self.offset, self.embedding_dim, self.padding_idx)
return self.weights.index_select(0, position_ids.view(-1)).view(bsz, seq_len, self.weights.shape[-1]).detach()
def create_position_ids_from_inputs_embeds(self, inputs_embeds, past_key_values_length):
"""
We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
Args:
inputs_embeds: torch.Tensor
Returns: torch.Tensor
"""
input_shape = inputs_embeds.size()[:-1]
sequence_length = input_shape[1]
position_ids = torch.arange(
self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
)
return position_ids.unsqueeze(0).expand(input_shape).contiguous() + past_key_values_length
# Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->M2M100
class M2M100Attention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
is_decoder: bool = False,
bias: bool = True,
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
if (self.head_dim * num_heads) != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
f" and `num_heads`: {num_heads})."
)
self.scaling = self.head_dim**-0.5
self.is_decoder = is_decoder
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def forward(
self,
hidden_states: torch.Tensor,
key_value_states: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
bsz, tgt_len, _ = hidden_states.size()
# get query proj
query_states = self.q_proj(hidden_states) * self.scaling
# get key, value proj
# `past_key_value[0].shape[2] == key_value_states.shape[1]`
# is checking that the `sequence_length` of the `past_key_value` is the same as
# the provided `key_value_states` to support prefix tuning
if (
is_cross_attention
and past_key_value is not None
and past_key_value[0].shape[2] == key_value_states.shape[1]
):
# reuse k,v, cross_attentions
key_states = past_key_value[0]
value_states = past_key_value[1]
elif is_cross_attention:
# cross_attentions
key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
elif past_key_value is not None:
# reuse k, v, self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
key_states = torch.cat([past_key_value[0], key_states], dim=2)
value_states = torch.cat([past_key_value[1], value_states], dim=2)
else:
# self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_states, value_states)
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
key_states = key_states.reshape(*proj_shape)
value_states = value_states.reshape(*proj_shape)
src_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
raise ValueError(
f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
f" {attn_weights.size()}"
)
if attention_mask is not None:
if attention_mask.size() != (bsz, 1, tgt_len, src_len):
raise ValueError(
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
)
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
if layer_head_mask is not None:
if layer_head_mask.size() != (self.num_heads,):
raise ValueError(
f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
f" {layer_head_mask.size()}"
)
attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if output_attentions:
# this operation is a bit awkward, but it's required to
# make sure that attn_weights keeps its gradient.
# In order to do so, attn_weights have to be reshaped
# twice and have to be reused in the following
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
else:
attn_weights_reshaped = None
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_probs, value_states)
if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is"
f" {attn_output.size()}"
)
attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
attn_output = attn_output.transpose(1, 2)
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# partitioned across GPUs when using tensor-parallelism.
attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights_reshaped, past_key_value
# Copied from transformers.models.mbart.modeling_mbart.MBartEncoderLayer with MBart->M2M100
class M2M100EncoderLayer(nn.Module):
def __init__(self, config: M2M100Config):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = M2M100Attention(
embed_dim=self.embed_dim,
num_heads=config.encoder_attention_heads,
dropout=config.attention_dropout,
)
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
layer_head_mask: torch.Tensor,
output_attentions: bool = False,
) -> torch.Tensor:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
`(encoder_attention_heads,)`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
hidden_states, attn_weights, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
if hidden_states.dtype == torch.float16 and (
torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()
):
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
# Copied from transformers.models.mbart.modeling_mbart.MBartDecoderLayer with MBart->M2M100
class M2M100DecoderLayer(nn.Module):
def __init__(self, config: M2M100Config):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = M2M100Attention(
embed_dim=self.embed_dim,
num_heads=config.decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.encoder_attn = M2M100Attention(
self.embed_dim,
config.decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
)
self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
cross_attn_layer_head_mask: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = True,
) -> torch.Tensor:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
encoder_hidden_states (`torch.FloatTensor`):
cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
`(encoder_attention_heads,)`.
cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of
size `(decoder_attention_heads,)`.
past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
# Self Attention
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
# add present self-attn cache to positions 1,2 of present_key_value tuple
hidden_states, self_attn_weights, present_key_value = self.self_attn(
hidden_states=hidden_states,
past_key_value=self_attn_past_key_value,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
# Cross-Attention Block
cross_attn_present_key_value = None
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
hidden_states = self.encoder_attn_layer_norm(hidden_states)
# cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
hidden_states=hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
layer_head_mask=cross_attn_layer_head_mask,
past_key_value=cross_attn_past_key_value,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
# add cross-attn to positions 3,4 of present_key_value tuple
present_key_value = present_key_value + cross_attn_present_key_value
# Fully Connected
residual = hidden_states
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights, cross_attn_weights)
if use_cache:
outputs += (present_key_value,)
return outputs
class M2M100PreTrainedModel(PreTrainedModel):
config_class = M2M100Config
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["M2M100Attention"]
def _init_weights(self, module):
std = self.config.init_std
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, (M2M100Decoder, M2M100Encoder)):
module.gradient_checkpointing = value
M2M_100_START_DOCSTRING = r"""
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
and behavior.
Parameters:
config ([`M2M100Config`]):
Model configuration class with all the parameters of the model. Initializing with a config file does not
load the weights associated with the model, only the configuration. Check out the
[`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
M2M_100_GENERATION_EXAMPLE = r"""
Translation example:
```python
>>> from transformers import AutoTokenizer, M2M100ForConditionalGeneration
>>> model = M2M100ForConditionalGeneration.from_pretrained("facebook/m2m100_418M")
>>> tokenizer = AutoTokenizer.from_pretrained("facebook/m2m100_418M")
>>> text_to_translate = "Life is like a box of chocolates"
>>> model_inputs = tokenizer(text_to_translate, return_tensors="pt")
>>> # translate to French
>>> gen_tokens = model.generate(**model_inputs, forced_bos_token_id=tokenizer.get_lang_id("fr"))
>>> print(tokenizer.batch_decode(gen_tokens, skip_special_tokens=True))
```
"""
M2M_100_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
M2M100 uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If
`past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
`past_key_values`).
decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0,
1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
`last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
`decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape
`(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you
can choose to directly pass an embedded representation. This is useful if you want more control over how to
convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be
input (see `past_key_values`). This is useful if you want more control over how to convert
`decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value
of `inputs_embeds`.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
`past_key_values`).
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
class M2M100Encoder(M2M100PreTrainedModel):
"""
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
[`M2M100EncoderLayer`].
Args:
config: M2M100Config
embed_tokens (nn.Embedding): output embedding
"""
def __init__(self, config: M2M100Config, embed_tokens: Optional[nn.Embedding] = None):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.encoder_layerdrop
embed_dim = config.d_model
self.padding_idx = config.pad_token_id
self.max_source_positions = config.max_position_embeddings
self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim, self.padding_idx)
if embed_tokens is not None:
self.embed_tokens.weight = embed_tokens.weight
self.embed_positions = M2M100SinusoidalPositionalEmbedding(
config.max_position_embeddings,
embed_dim,
self.padding_idx,
)
self.layers = nn.ModuleList([M2M100EncoderLayer(config) for _ in range(config.encoder_layers)])
self.layer_norm = nn.LayerNorm(config.d_model)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
):
r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# retrieve input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
embed_pos = self.embed_positions(input_ids, inputs_embeds)
embed_pos = embed_pos.to(inputs_embeds.device)
hidden_states = inputs_embeds + embed_pos
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
# expand attention_mask
if attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
attention_mask = _expand_mask(attention_mask, inputs_embeds.dtype)
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
# check if head_mask has a correct number of layers specified if desired
if head_mask is not None:
if head_mask.size()[0] != len(self.layers):
raise ValueError(
f"The head_mask should be specified for {len(self.layers)} layers, but it is for"
f" {head_mask.size()[0]}."
)
deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled()
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = random.uniform(0, 1)
skip_the_layer = True if self.training and (dropout_probability < self.layerdrop) else False
if not skip_the_layer or deepspeed_zero3_is_enabled:
# under deepspeed zero3 all gpus must run in sync
if self.gradient_checkpointing and self.training:
# create gradient checkpointing function
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(encoder_layer),
hidden_states,
attention_mask,
(head_mask[idx] if head_mask is not None else None),
)
else:
layer_outputs = encoder_layer(
hidden_states,
attention_mask,
layer_head_mask=(head_mask[idx] if head_mask is not None else None),
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if skip_the_layer:
layer_outputs = (None, None)
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
hidden_states = self.layer_norm(hidden_states)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
)
class M2M100Decoder(M2M100PreTrainedModel):
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`M2M100DecoderLayer`]
Args:
config: M2M100Config
embed_tokens (nn.Embedding): output embedding
"""
def __init__(self, config: M2M100Config, embed_tokens: Optional[nn.Embedding] = None):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.decoder_layerdrop
self.padding_idx = config.pad_token_id
self.max_target_positions = config.max_position_embeddings
self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx)
if embed_tokens is not None:
self.embed_tokens.weight = embed_tokens.weight
self.embed_positions = M2M100SinusoidalPositionalEmbedding(
config.max_position_embeddings,
config.d_model,
self.padding_idx,
)
self.layers = nn.ModuleList([M2M100DecoderLayer(config) for _ in range(config.decoder_layers)])
self.layer_norm = nn.LayerNorm(config.d_model)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
cross_attn_head_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.Tensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
):
r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing
cross-attention on hidden heads. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of
shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing
`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more
control over how to convert `input_ids` indices into associated vectors than the model's internal
embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# retrieve input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
# create causal mask
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
combined_attention_mask = None
if input_shape[-1] > 1:
combined_attention_mask = _make_causal_mask(
input_shape, inputs_embeds.dtype, past_key_values_length=past_key_values_length
).to(inputs_embeds.device)
if attention_mask is not None and combined_attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
combined_attention_mask = combined_attention_mask + _expand_mask(
attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]
)
# expand encoder attention mask
if encoder_hidden_states is not None and encoder_attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
encoder_attention_mask = _expand_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
# embed positions
positions = self.embed_positions(input_ids, inputs_embeds, past_key_values_length)
positions = positions.to(inputs_embeds.device)
hidden_states = inputs_embeds + positions
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing. Setting" " `use_cache=False`..."
)
use_cache = False
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attentions = () if output_attentions else None
next_decoder_cache = () if use_cache else None
# check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired
for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]):
if attn_mask is not None:
if attn_mask.size()[0] != len(self.layers):
raise ValueError(
f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for"
f" {head_mask.size()[0]}."
)
deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled()
for idx, decoder_layer in enumerate(self.layers):
if output_hidden_states:
all_hidden_states += (hidden_states,)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = random.uniform(0, 1)
skip_the_layer = True if self.training and (dropout_probability < self.layerdrop) else False
if not skip_the_layer or deepspeed_zero3_is_enabled:
# under deepspeed zero3 all gpus must run in sync
past_key_value = past_key_values[idx] if past_key_values is not None else None
if self.gradient_checkpointing and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
# None for past_key_value
return module(*inputs, output_attentions, use_cache)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(decoder_layer),
hidden_states,
combined_attention_mask,
encoder_hidden_states,
encoder_attention_mask,
head_mask[idx] if head_mask is not None else None,
cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None,
None,
)
else:
layer_outputs = decoder_layer(
hidden_states,
attention_mask=combined_attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
layer_head_mask=(head_mask[idx] if head_mask is not None else None),
cross_attn_layer_head_mask=(
cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None
),
past_key_value=past_key_value,
output_attentions=output_attentions,
use_cache=use_cache,
)
hidden_states = layer_outputs[0]
if skip_the_layer:
continue
if use_cache:
next_decoder_cache += (layer_outputs[3 if output_attentions else 1],)
if output_attentions:
all_self_attns += (layer_outputs[1],)
all_cross_attentions += (layer_outputs[2],)
hidden_states = self.layer_norm(hidden_states)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
next_cache = next_decoder_cache if use_cache else None
if not return_dict:
return tuple(
v
for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_cache,
hidden_states=all_hidden_states,
attentions=all_self_attns,
cross_attentions=all_cross_attentions,
)
@add_start_docstrings(
"The bare M2M100 Model outputting raw hidden-states without any specific head on top.",
M2M_100_START_DOCSTRING,
)
class M2M100Model(M2M100PreTrainedModel):
_keys_to_ignore_on_load_missing = [
"encoder.embed_tokens.weight",
"decoder.embed_tokens.weight",
"encoder.embed_positions.weights",
"encoder.embed_positions.bias",
"decoder.embed_positions.weights",
"decoder.embed_positions.bias",
]
def __init__(self, config: M2M100Config):
super().__init__(config)
padding_idx, vocab_size = config.pad_token_id, config.vocab_size
self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx)
self.encoder = M2M100Encoder(config, self.shared)
self.decoder = M2M100Decoder(config, self.shared)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, value):
self.shared = value
self.encoder.embed_tokens = self.shared
self.decoder.embed_tokens = self.shared
def get_encoder(self):
return self.encoder
def get_decoder(self):
return self.decoder
@add_start_docstrings_to_model_forward(M2M_100_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=Seq2SeqModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.Tensor] = None,
decoder_head_mask: Optional[torch.Tensor] = None,
cross_attn_head_mask: Optional[torch.Tensor] = None,
encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple[torch.Tensor], Seq2SeqModelOutput]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if encoder_outputs is None:
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
# If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
# decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
encoder_hidden_states=encoder_outputs[0],
encoder_attention_mask=attention_mask,
head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
past_key_values=past_key_values,
inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if not return_dict:
return decoder_outputs + encoder_outputs
return Seq2SeqModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
@add_start_docstrings(
"The M2M100 Model with a language modeling head. Can be used for summarization.", M2M_100_START_DOCSTRING
)
class M2M100ForConditionalGeneration(M2M100PreTrainedModel):
base_model_prefix = "model"
_keys_to_ignore_on_load_missing = [
r"encoder.version",
r"decoder.version",
r"lm_head.weight",
r"encoder.embed_tokens.weight",
r"decoder.embed_tokens.weight",
r"encoder.embed_positions.weights",
r"encoder.embed_positions.bias",
r"decoder.embed_positions.weights",
r"decoder.embed_positions.bias",
]
def __init__(self, config: M2M100Config):
super().__init__(config)
self.model = M2M100Model(config)
self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False)
# Initialize weights and apply final processing
self.post_init()
def get_encoder(self):
return self.model.get_encoder()
def get_decoder(self):
return self.model.get_decoder()
def resize_token_embeddings(self, new_num_tokens: int) -> nn.Embedding:
new_embeddings = super().resize_token_embeddings(new_num_tokens)
return new_embeddings
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
@add_start_docstrings_to_model_forward(M2M_100_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
@add_end_docstrings(M2M_100_GENERATION_EXAMPLE)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.Tensor] = None,
decoder_head_mask: Optional[torch.Tensor] = None,
cross_attn_head_mask: Optional[torch.Tensor] = None,
encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple[torch.Tensor], Seq2SeqLMOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Returns:
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
if decoder_input_ids is None:
decoder_input_ids = shift_tokens_right(
labels, self.config.pad_token_id, self.config.decoder_start_token_id
)
outputs = self.model(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
encoder_outputs=encoder_outputs,
decoder_attention_mask=decoder_attention_mask,
head_mask=head_mask,
decoder_head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
lm_logits = self.lm_head(outputs[0])
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (lm_logits,) + outputs[1:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return Seq2SeqLMOutput(
loss=masked_lm_loss,
logits=lm_logits,
past_key_values=outputs.past_key_values,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
)
def prepare_inputs_for_generation(
self,
decoder_input_ids,
past_key_values=None,
attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
use_cache=None,
encoder_outputs=None,
**kwargs,
):
# cut decoder_input_ids if past is used
if past_key_values is not None:
decoder_input_ids = decoder_input_ids[:, -1:]
return {
"input_ids": None, # encoder_outputs is defined. input_ids not needed
"encoder_outputs": encoder_outputs,
"past_key_values": past_key_values,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
"use_cache": use_cache, # change this to avoid caching (presumably for debugging)
}
@staticmethod
def _reorder_cache(past_key_values, beam_idx):
reordered_past = ()
for layer_past in past_key_values:
reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
return reordered_past
|
2877025939/PlanADScrollView | 1,454 | PlanADScrollView/PlanADScrollView/PlanADScrollView/PlanADCollectionViewCell.m | //
// PlanADCollectionViewCell.m
// PlanADScrollView
//
// Created by anan on 2017/10/18.
// Copyright © 2017年 Plan. All rights reserved.
//
#import "PlanADCollectionViewCell.h"
#import "UIImageView+WebCache.h"
@interface PlanADCollectionViewCell ()
@property (nonatomic,strong) UIImageView *PlanADimageView;
@end
@implementation PlanADCollectionViewCell
- (void)awakeFromNib {
[super awakeFromNib];
}
- (instancetype)initWithCoder:(NSCoder *)coder
{
self = [super initWithCoder:coder];
if (self) {
}
return self;
}
- (instancetype)initWithFrame:(CGRect)frame
{
self = [super initWithFrame:frame];
if (self) {
self.backgroundColor = [UIColor whiteColor];
self.PlanADimageView = [[UIImageView alloc] init];
self.PlanADimageView.contentMode = UIViewContentModeScaleToFill;
[self.contentView addSubview:self.PlanADimageView];
}
return self;
}
- (void)layoutSubviews{
[super layoutSubviews];
self.PlanADimageView.frame = CGRectMake(0, 0, self.frame.size.width, self.frame.size.height);
}
-(void)imageStr:(NSString*)imageStr placeholderimage:(UIImage *)placeholderimage;{
if ([imageStr hasPrefix:@"http"]) {
[self.PlanADimageView sd_setImageWithURL:[NSURL URLWithString:imageStr] placeholderImage:placeholderimage];
}else{
self.PlanADimageView.image = [UIImage imageNamed:imageStr];
}
}
@end
|
2881099/csredis | 75,583 | src/CSRedisCore/IRedisClientAsync.cs | using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace CSRedis
{
/// <summary>
/// Interface for asyncronous RedisClient methods
/// </summary>
public interface IRedisClientAsync : IRedisClient
{
#if net40
#else
/// <summary>
/// Open connection to redis server
/// </summary>
/// <returns>True on success</returns>
Task<bool> ConnectAsync();
/// <summary>
/// Call arbitrary redis command
/// </summary>
/// <param name="command"></param>
/// <param name="args"></param>
/// <returns></returns>
Task<object> CallAsync(string command, params string[] args);
#region Connection
/// <summary>
/// Authenticate to the server
/// </summary>
/// <param name="password">Server password</param>
/// <returns>Task associated with status message</returns>
Task<string> AuthAsync(string password);
/// <summary>
/// Echo the given string
/// </summary>
/// <param name="message">Message to echo</param>
/// <returns>Task associated with echo response</returns>
Task<string> EchoAsync(string message);
/// <summary>
/// Ping the server
/// </summary>
/// <returns>Task associated with status message</returns>
Task<string> PingAsync();
/// <summary>
/// Close the connection
/// </summary>
/// <returns>Task associated with status message</returns>
Task<string> QuitAsync();
/// <summary>
/// Change the selected database for the current connection
/// </summary>
/// <param name="index">Zero-based database index</param>
/// <returns>Status message</returns>
Task<string> SelectAsync(int index);
#endregion
#region Keys
/// <summary>
/// Delete a key
/// </summary>
/// <param name="keys">Keys to delete</param>
/// <returns></returns>
Task<long> DelAsync(params string[] keys);
/// <summary>
/// Return a serialized version of the value stored at the specified key
/// </summary>
/// <param name="key">Key to dump</param>
/// <returns></returns>
Task<byte[]> DumpAsync(string key);
/// <summary>
/// Determine if a key exists
/// </summary>
/// <param name="key">Key to check</param>
/// <returns></returns>
Task<bool> ExistsAsync(string key);
/// <summary>
/// Set a key's time to live in seconds
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="expiration">Expiration (nearest second)</param>
/// <returns></returns>
Task<bool> ExpireAsync(string key, int expiration);
/// <summary>
/// Set a key's time to live in seconds
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="expiration">Expiration in seconds</param>
/// <returns></returns>
Task<bool> ExpireAsync(string key, TimeSpan expiration);
/// <summary>
/// Set the expiration for a key (nearest second);
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="expirationDate">Date of expiration, to nearest second</param>
/// <returns></returns>
Task<bool> ExpireAtAsync(string key, DateTime expirationDate);
/// <summary>
/// Set the expiration for a key as a UNIX timestamp
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="timestamp"></param>
/// <returns></returns>
Task<bool> ExpireAtAsync(string key, int timestamp);
/// <summary>
/// Find all keys matching the given pattern
/// </summary>
/// <param name="pattern">Pattern to match</param>
/// <returns></returns>
Task<string[]> KeysAsync(string pattern);
/// <summary>
/// Atomically transfer a key from a Redis instance to another one
/// </summary>
/// <param name="host">Remote Redis host</param>
/// <param name="port">Remote Redis port</param>
/// <param name="key">Key to migrate</param>
/// <param name="destinationDb">Remote database ID</param>
/// <param name="timeout">Timeout in milliseconds</param>
/// <returns></returns>
Task<string> MigrateAsync(string host, int port, string key, int destinationDb, int timeout);
/// <summary>
/// Atomically transfer a key from a Redis instance to another one
/// </summary>
/// <param name="host">Remote Redis host</param>
/// <param name="port">Remote Redis port</param>
/// <param name="key">Key to migrate</param>
/// <param name="destinationDb">Remote database ID</param>
/// <param name="timeout">Timeout in milliseconds</param>
/// <returns></returns>
Task<string> MigrateAsync(string host, int port, string key, int destinationDb, TimeSpan timeout);
/// <summary>
/// Move a key to another database
/// </summary>
/// <param name="key">Key to move</param>
/// <param name="database">Database destination ID</param>
/// <returns></returns>
Task<bool> MoveAsync(string key, int database);
/// <summary>
/// Get the number of references of the value associated with the specified key
/// </summary>
/// <param name="arguments">Subcommand arguments</param>
/// <returns>The type of internal representation used to store the value at the specified key</returns>
Task<string> ObjectEncodingAsync(params string[] arguments);
/// <summary>
/// Inspect the internals of Redis objects
/// </summary>
/// <param name="subCommand">Type of Object command to send</param>
/// <param name="arguments">Subcommand arguments</param>
/// <returns>Varies depending on subCommand</returns>
Task<long?> ObjectAsync(RedisObjectSubCommand subCommand, params string[] arguments);
/// <summary>
/// Remove the expiration from a key
/// </summary>
/// <param name="key">Key to modify</param>
/// <returns></returns>
Task<bool> PersistAsync(string key);
/// <summary>
/// Set a key's time to live in milliseconds
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="expiration">Expiration (nearest millisecond)</param>
/// <returns></returns>
Task<bool> PExpireAsync(string key, TimeSpan expiration);
/// <summary>
/// Set a key's time to live in milliseconds
/// </summary>
/// <param name="key">Key</param>
/// <param name="milliseconds">Expiration in milliseconds</param>
/// <returns></returns>
Task<bool> PExpireAsync(string key, long milliseconds);
/// <summary>
/// Set the expiration for a key (nearest millisecond);
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="date">Expiration date</param>
/// <returns></returns>
Task<bool> PExpireAtAsync(string key, DateTime date);
/// <summary>
/// Set the expiration for a key as a UNIX timestamp specified in milliseconds
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="timestamp">Expiration timestamp (milliseconds)</param>
/// <returns></returns>
Task<bool> PExpireAtAsync(string key, long timestamp);
/// <summary>
/// Get the time to live for a key in milliseconds
/// </summary>
/// <param name="key">Key to check</param>
/// <returns></returns>
Task<long> PTtlAsync(string key);
/// <summary>
/// Return a random key from the keyspace
/// </summary>
/// <returns></returns>
Task<string> RandomKeyAsync();
/// <summary>
/// Rename a key
/// </summary>
/// <param name="key">Key to rename</param>
/// <param name="newKey">New key name</param>
/// <returns></returns>
Task<string> RenameAsync(string key, string newKey);
/// <summary>
/// Rename a key, only if the new key does not exist
/// </summary>
/// <param name="key">Key to rename</param>
/// <param name="newKey">New key name</param>
/// <returns></returns>
Task<bool> RenameNxAsync(string key, string newKey);
/// <summary>
/// Create a key using the provided serialized value, previously obtained using dump
/// </summary>
/// <param name="key">Key to restore</param>
/// <param name="ttlMilliseconds">Time-to-live in milliseconds</param>
/// <param name="serializedValue">Serialized value from DUMP</param>
/// <returns></returns>
Task<string> RestoreAsync(string key, long ttlMilliseconds, byte[] serializedValue);
/// <summary>
/// Sort the elements in a list, set or sorted set
/// </summary>
/// <param name="key">Key to sort</param>
/// <param name="offset">Number of elements to skip</param>
/// <param name="count">Number of elements to return</param>
/// <param name="by">Sort by external key</param>
/// <param name="dir">Sort direction</param>
/// <param name="isAlpha">Sort lexicographically</param>
/// <param name="get">Retrieve external keys</param>
/// <returns></returns>
Task<string[]> SortAsync(string key, long? offset = null, long? count = null, string by = null, RedisSortDir? dir = null, bool? isAlpha = null, params string[] get);
/// <summary>
/// Sort the elements in a list, set or sorted set, then store the result in a new list
/// </summary>
/// <param name="key">Key to sort</param>
/// <param name="destination">Destination key name of stored sort</param>
/// <param name="offset">Number of elements to skip</param>
/// <param name="count">Number of elements to return</param>
/// <param name="by">Sort by external key</param>
/// <param name="dir">Sort direction</param>
/// <param name="isAlpha">Sort lexicographically</param>
/// <param name="get">Retrieve external keys</param>
/// <returns></returns>
Task<long> SortAndStoreAsync(string key, string destination, long? offset = null, long? count = null, string by = null, RedisSortDir? dir = null, bool? isAlpha = null, params string[] get);
/// <summary>
/// Get the time to live for a key
/// </summary>
/// <param name="key">Key to check</param>
/// <returns></returns>
Task<long> TtlAsync(string key);
/// <summary>
/// Determine the type stored at key
/// </summary>
/// <param name="key">Key to check</param>
/// <returns></returns>
Task<string> TypeAsync(string key);
/// <summary>
/// Iterate the set of keys in the currently selected Redis database
/// </summary>
/// <param name="cursor">The cursor returned by the server in the previous call, or 0 if this is the first call</param>
/// <param name="pattern">Glob-style pattern to filter returned elements</param>
/// <param name="count">Set the maximum number of elements to return</param>
/// <returns>Updated cursor and result set</returns>
Task<RedisScan<string>> ScanAsync(long cursor, string pattern = null, long? count = null);
Task<RedisScan<byte[]>> ScanBytesAsync(long cursor, string pattern = null, long? count = null);
#endregion
#region Hashes
/// <summary>
/// Delete one or more hash fields
/// </summary>
/// <param name="key">Hash key</param>
/// <param name="fields">Fields to delete</param>
/// <returns>Number of fields removed from hash</returns>
Task<long> HDelAsync(string key, params string[] fields);
/// <summary>
/// Determine if a hash field exists
/// </summary>
/// <param name="key">Hash key</param>
/// <param name="field">Field to check</param>
/// <returns>True if hash field exists</returns>
Task<bool> HExistsAsync(string key, string field);
/// <summary>
/// Get the value of a hash field
/// </summary>
/// <param name="key">Hash key</param>
/// <param name="field">Field to get</param>
/// <returns>Value of hash field</returns>
Task<string> HGetAsync(string key, string field);
Task<byte[]> HGetBytesAsync(string key, string field);
/// <summary>
/// Get all the fields and values in a hash
/// </summary>
/// <typeparam name="T">Object to map hash</typeparam>
/// <param name="key">Hash key</param>
/// <returns>Strongly typed object mapped from hash</returns>
Task<T> HGetAllAsync<T>(string key)
where T : class;
/// <summary>
/// Get all the fields and values in a hash
/// </summary>
/// <param name="key">Hash key</param>
/// <returns>Dictionary mapped from string</returns>
Task<Dictionary<string, string>> HGetAllAsync(string key);
Task<Dictionary<string, byte[]>> HGetAllBytesAsync(string key);
/// <summary>
/// Increment the integer value of a hash field by the given number
/// </summary>
/// <param name="key">Hash key</param>
/// <param name="field">Field to increment</param>
/// <param name="increment">Increment value</param>
/// <returns>Value of field after increment</returns>
Task<long> HIncrByAsync(string key, string field, long increment);
/// <summary>
/// Increment the float value of a hash field by the given number
/// </summary>
/// <param name="key">Hash key</param>
/// <param name="field">Field to increment</param>
/// <param name="increment">Increment value</param>
/// <returns>Value of field after increment</returns>
Task<decimal> HIncrByFloatAsync(string key, string field, decimal increment);
/// <summary>
/// Get all the fields in a hash
/// </summary>
/// <param name="key">Hash key</param>
/// <returns>All hash field names</returns>
Task<string[]> HKeysAsync(string key);
/// <summary>
/// Get the number of fields in a hash
/// </summary>
/// <param name="key">Hash key</param>
/// <returns>Number of fields in hash</returns>
Task<long> HLenAsync(string key);
/// <summary>
/// Get the values of all the given hash fields
/// </summary>
/// <param name="key">Hash key</param>
/// <param name="fields">Fields to return</param>
/// <returns>Values of given fields</returns>
Task<string[]> HMGetAsync(string key, params string[] fields);
Task<byte[][]> HMGetBytesAsync(string key, params string[] fields);
/// <summary>
/// Set multiple hash fields to multiple values
/// </summary>
/// <param name="key">Hash key</param>
/// <param name="dict">Dictionary mapping of hash</param>
/// <returns>Status code</returns>
Task<string> HMSetAsync(string key, Dictionary<string, object> dict);
/// <summary>
/// Set multiple hash fields to multiple values
/// </summary>
/// <typeparam name="T">Type of object to map hash</typeparam>
/// <param name="key">Hash key</param>
/// <param name="obj">Object mapping of hash</param>
/// <returns>Status code</returns>
Task<string> HMSetAsync<T>(string key, T obj)
where T : class;
/// <summary>
/// Set multiple hash fields to multiple values
/// </summary>
/// <param name="key">Hash key</param>
/// <param name="keyValues">Array of [key,value,key,value,..]</param>
/// <returns>Status code</returns>
Task<string> HMSetAsync(string key, params object[] keyValues);
/// <summary>
/// Set the value of a hash field
/// </summary>
/// <param name="key">Hash key</param>
/// <param name="field">Hash field to set</param>
/// <param name="value">Value to set</param>
/// <returns>True if field is new</returns>
Task<bool> HSetAsync(string key, string field, object value);
/// <summary>
/// Set the value of a hash field, only if the field does not exist
/// </summary>
/// <param name="key">Hash key</param>
/// <param name="field">Hash field to set</param>
/// <param name="value">Value to set</param>
/// <returns>True if field was set to value</returns>
Task<bool> HSetNxAsync(string key, string field, object value);
/// <summary>
/// Get all the values in a hash
/// </summary>
/// <param name="key">Hash key</param>
/// <returns>Array of all values in hash</returns>
Task<string[]> HValsAsync(string key);
Task<byte[][]> HValsBytesAsync(string key);
/// <summary>
/// Iterate the keys and values of a hash field
/// </summary>
/// <param name="key">Hash key</param>
/// <param name="cursor">The cursor returned by the server in the previous call, or 0 if this is the first call</param>
/// <param name="pattern">Glob-style pattern to filter returned elements</param>
/// <param name="count">Maximum number of elements to return</param>
/// <returns>Updated cursor and result set</returns>
Task<RedisScan<Tuple<string, string>>> HScanAsync(string key, long cursor, string pattern = null, long? count = null);
Task<RedisScan<Tuple<string, byte[]>>> HScanBytesAsync(string key, long cursor, string pattern = null, long? count = null);
#endregion
#region Lists
/// <summary>
/// Get an element from a list by its index
/// </summary>
/// <param name="key">List key</param>
/// <param name="index">Zero-based index of item to return</param>
/// <returns>Element at index</returns>
Task<string> LIndexAsync(string key, long index);
Task<byte[]> LIndexBytesAsync(string key, long index);
/// <summary>
/// Insert an element before or after another element in a list
/// </summary>
/// <param name="key">List key</param>
/// <param name="insertType">Relative position</param>
/// <param name="pivot">Relative element</param>
/// <param name="value">Element to insert</param>
/// <returns>Length of list after insert or -1 if pivot not found</returns>
Task<long> LInsertAsync(string key, RedisInsert insertType, object pivot, object value);
/// <summary>
/// Get the length of a list
/// </summary>
/// <param name="key">List key</param>
/// <returns>Length of list at key</returns>
Task<long> LLenAsync(string key);
/// <summary>
/// Remove and get the first element in a list
/// </summary>
/// <param name="key">List key</param>
/// <returns>First element in list</returns>
Task<string> LPopAsync(string key);
Task<byte[]> LPopBytesAsync(string key);
/// <summary>
/// Prepend one or multiple values to a list
/// </summary>
/// <param name="key">List key</param>
/// <param name="values">Values to push</param>
/// <returns>Length of list after push</returns>
Task<long> LPushAsync(string key, params object[] values);
/// <summary>
/// Prepend a value to a list, only if the list exists
/// </summary>
/// <param name="key">List key</param>
/// <param name="value">Value to push</param>
/// <returns>Length of list after push</returns>
Task<long> LPushXAsync(string key, object value);
/// <summary>
/// Get a range of elements from a list
/// </summary>
/// <param name="key">List key</param>
/// <param name="start">Start offset</param>
/// <param name="stop">Stop offset</param>
/// <returns>List of elements in range</returns>
Task<string[]> LRangeAsync(string key, long start, long stop);
Task<byte[][]> LRangeBytesAsync(string key, long start, long stop);
/// <summary>
/// Remove elements from a list
/// </summary>
/// <param name="key">List key</param>
/// <param name="count">>0: remove N elements from head to tail; <0: remove N elements from tail to head; =0: remove all elements</param>
/// <param name="value">Remove elements equal to value</param>
/// <returns>Number of removed elements</returns>
Task<long> LRemAsync(string key, long count, object value);
/// <summary>
/// Set the value of an element in a list by its index
/// </summary>
/// <param name="key">List key</param>
/// <param name="index">List index to modify</param>
/// <param name="value">New element value</param>
/// <returns>Status code</returns>
Task<string> LSetAsync(string key, long index, object value);
/// <summary>
/// Trim a list to the specified range
/// </summary>
/// <param name="key">List key</param>
/// <param name="start">Zero-based start index</param>
/// <param name="stop">Zero-based stop index</param>
/// <returns>Status code</returns>
Task<string> LTrimAsync(string key, long start, long stop);
/// <summary>
/// Remove and get the last elment in a list
/// </summary>
/// <param name="key">List key</param>
/// <returns>Value of last list element</returns>
Task<string> RPopAsync(string key);
Task<byte[]> RPopBytesAsync(string key);
/// <summary>
/// Remove the last elment in a list, append it to another list and return it
/// </summary>
/// <param name="source">List source key</param>
/// <param name="destination">Destination key</param>
/// <returns>Element being popped and pushed</returns>
Task<string> RPopLPushAsync(string source, string destination);
Task<byte[]> RPopBytesLPushAsync(string source, string destination);
/// <summary>
/// Append one or multiple values to a list
/// </summary>
/// <param name="key">List key</param>
/// <param name="values">Values to push</param>
/// <returns>Length of list after push</returns>
Task<long> RPushAsync(string key, params object[] values);
/// <summary>
/// Append a value to a list, only if the list exists
/// </summary>
/// <param name="key">List key</param>
/// <param name="value">Value to push</param>
/// <returns>Length of list after push</returns>
Task<long> RPushXAsync(string key, object value);
#endregion
#region Sets
/// <summary>
/// Add one or more members to a set
/// </summary>
/// <param name="key">Set key</param>
/// <param name="members">Members to add to set</param>
/// <returns>Number of elements added to set</returns>
Task<long> SAddAsync(string key, params object[] members);
/// <summary>
/// Get the number of members in a set
/// </summary>
/// <param name="key">Set key</param>
/// <returns>Number of elements in set</returns>
Task<long> SCardAsync(string key);
/// <summary>
/// Subtract multiple sets
/// </summary>
/// <param name="keys">Set keys to subtract</param>
/// <returns>Array of elements in resulting set</returns>
Task<string[]> SDiffAsync(params string[] keys);
Task<byte[][]> SDiffBytesAsync(params string[] keys);
/// <summary>
/// Subtract multiple sets and store the resulting set in a key
/// </summary>
/// <param name="destination">Destination key</param>
/// <param name="keys">Set keys to subtract</param>
/// <returns>Number of elements in the resulting set</returns>
Task<long> SDiffStoreAsync(string destination, params string[] keys);
/// <summary>
/// Intersect multiple sets
/// </summary>
/// <param name="keys">Set keys to intersect</param>
/// <returns>Array of elements in resulting set</returns>
Task<string[]> SInterAsync(params string[] keys);
Task<byte[][]> SInterBytesAsync(params string[] keys);
/// <summary>
/// Intersect multiple sets and store the resulting set in a key
/// </summary>
/// <param name="destination">Destination key</param>
/// <param name="keys">Set keys to intersect</param>
/// <returns>Number of elements in resulting set</returns>
Task<long> SInterStoreAsync(string destination, params string[] keys);
/// <summary>
/// Determine if a given value is a member of a set
/// </summary>
/// <param name="key">Set key</param>
/// <param name="member">Member to lookup</param>
/// <returns>True if member exists in set</returns>
Task<bool> SIsMemberAsync(string key, object member);
/// <summary>
/// Get all the members in a set
/// </summary>
/// <param name="key">Set key</param>
/// <returns>All elements in the set</returns>
Task<string[]> SMembersAsync(string key);
Task<byte[][]> SMembersBytesAsync(string key);
/// <summary>
/// Move a member from one set to another
/// </summary>
/// <param name="source">Source key</param>
/// <param name="destination">Destination key</param>
/// <param name="member">Member to move</param>
/// <returns>True if element was moved</returns>
Task<bool> SMoveAsync(string source, string destination, object member);
/// <summary>
/// Remove and return a random member from a set
/// </summary>
/// <param name="key">Set key</param>
/// <returns>The removed element</returns>
Task<string> SPopAsync(string key);
Task<byte[]> SPopBytesAsync(string key);
/// <summary>
/// Remove and return one or more random members from a set
/// </summary>
/// <param name="key">Set key</param>
/// <param name="count">Number of elements to remove and return</param>
/// <returns></returns>
Task<string[]> SPopAsync(string key, long count);
Task<byte[][]> SPopBytesAsync(string key, long count);
/// <summary>
/// Get a random member from a set
/// </summary>
/// <param name="key">Set key</param>
/// <returns>One random element from set</returns>
Task<string> SRandMemberAsync(string key);
Task<byte[]> SRandMemberBytesAsync(string key);
/// <summary>
/// Get one or more random members from a set
/// </summary>
/// <param name="key">Set key</param>
/// <param name="count">Number of elements to return</param>
/// <returns>One or more random elements from set</returns>
Task<string[]> SRandMembersAsync(string key, long count);
Task<byte[][]> SRandMembersBytesAsync(string key, long count);
/// <summary>
/// Remove one or more members from a set
/// </summary>
/// <param name="key">Set key</param>
/// <param name="members">Set members to remove</param>
/// <returns>Number of elements removed from set</returns>
Task<long> SRemAsync(string key, params object[] members);
/// <summary>
/// Add multiple sets
/// </summary>
/// <param name="keys">Set keys to union</param>
/// <returns>Array of elements in resulting set</returns>
Task<string[]> SUnionAsync(params string[] keys);
Task<byte[][]> SUnionBytesAsync(params string[] keys);
/// <summary>
/// Add multiple sets and store the resulting set in a key
/// </summary>
/// <param name="destination">Destination key</param>
/// <param name="keys">Set keys to union</param>
/// <returns>Number of elements in resulting set</returns>
Task<long> SUnionStoreAsync(string destination, params string[] keys);
/// <summary>
/// Iterate the elements of a set field
/// </summary>
/// <param name="key">Set key</param>
/// <param name="cursor">The cursor returned by the server in the previous call, or 0 if this is the first call</param>
/// <param name="pattern">Glob-style pattern to filter returned elements</param>
/// <param name="count">Maximum number of elements to return</param>
/// <returns>Updated cursor and result set</returns>
Task<RedisScan<string>> SScanAsync(string key, long cursor, string pattern = null, long? count = null);
Task<RedisScan<byte[]>> SScanBytesAsync(string key, long cursor, string pattern = null, long? count = null);
#endregion
#region Sorted Sets
/// <summary>
/// Add one or more members to a sorted set, or update its score if it already exists
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="scoreMembers">Array of member scores to add to sorted set</param>
/// <returns>Number of elements added to the sorted set (not including member updates)</returns>
Task<long> ZAddAsync<TScore, TMember>(string key, params Tuple<TScore, TMember>[] scoreMembers);
/// <summary>
/// Add one or more members to a sorted set, or update its score if it already exists
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="scoreMembers">Array of member scores [s1, m1, s2, m2, ..]</param>
/// <returns>Number of elements added to the sorted set (not including member updates)</returns>
Task<long> ZAddAsync(string key, params object[] scoreMembers);
/// <summary>
/// Get the number of members in a sorted set
/// </summary>
/// <param name="key">Sorted set key</param>
/// <returns>Number of elements in the sorted set</returns>
Task<long> ZCardAsync(string key);
/// <summary>
/// Count the members in a sorted set with scores within the given values
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="min">Minimum score</param>
/// <param name="max">Maximum score</param>
/// <param name="exclusiveMin">Minimum score is exclusive</param>
/// <param name="exclusiveMax">Maximum score is exclusive</param>
/// <returns>Number of elements in the specified score range</returns>
Task<long> ZCountAsync(string key, decimal min, decimal max, bool exclusiveMin = false, bool exclusiveMax = false);
/// <summary>
/// Count the members in a sorted set with scores within the given values
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="min">Minimum score</param>
/// <param name="max">Maximum score</param>
/// <returns>Number of elements in the specified score range</returns>
Task<long> ZCountAsync(string key, string min, string max);
/// <summary>
/// Increment the score of a member in a sorted set
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="increment">Increment by value</param>
/// <param name="member">Sorted set member to increment</param>
/// <returns>New score of member</returns>
Task<decimal> ZIncrByAsync(string key, decimal increment, object member);
/// <summary>
/// Intersect multiple sorted sets and store the resulting set in a new key
/// </summary>
/// <param name="destination">Destination key</param>
/// <param name="weights">Multiplication factor for each input set</param>
/// <param name="aggregate">Aggregation function of resulting set</param>
/// <param name="keys">Sorted set keys to intersect</param>
/// <returns>Number of elements in the resulting sorted set</returns>
Task<long> ZInterStoreAsync(string destination, decimal[] weights = null, RedisAggregate? aggregate = null, params string[] keys);
/// <summary>
/// Intersect multiple sorted sets and store the resulting set in a new key
/// </summary>
/// <param name="destination">Destination key</param>
/// <param name="keys">Sorted set keys to intersect</param>
/// <returns>Number of elements in the resulting sorted set</returns>
Task<long> ZInterStoreAsync(string destination, params string[] keys);
/// <summary>
/// Return a range of members in a sorted set, by index
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="start">Start offset</param>
/// <param name="stop">Stop offset</param>
/// <param name="withScores">Include scores in result</param>
/// <returns>Array of elements in the specified range (with optional scores)</returns>
Task<string[]> ZRangeAsync(string key, long start, long stop, bool withScores = false);
Task<byte[][]> ZRangeBytesAsync(string key, long start, long stop, bool withScores = false);
/// <summary>
/// Return a range of members in a sorted set, by index, with scores
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="start">Start offset</param>
/// <param name="stop">Stop offset</param>
/// <returns>Array of elements in the specified range with scores</returns>
Task<Tuple<string, decimal>[]> ZRangeWithScoresAsync(string key, long start, long stop);
Task<Tuple<byte[], decimal>[]> ZRangeBytesWithScoresAsync(string key, long start, long stop);
/// <summary>
/// Return a range of members in a sorted set, by score
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="min">Minimum score</param>
/// <param name="max">Maximum score</param>
/// <param name="withScores">Include scores in result</param>
/// <param name="exclusiveMin">Minimum score is exclusive</param>
/// <param name="exclusiveMax">Maximum score is exclusive</param>
/// <param name="offset">Start offset</param>
/// <param name="count">Number of elements to return</param>
/// <returns>List of elements in the specified range (with optional scores)</returns>
Task<string[]> ZRangeByScoreAsync(string key, decimal min, decimal max, bool withScores = false, bool exclusiveMin = false, bool exclusiveMax = false, long? offset = null, long? count = null);
Task<byte[][]> ZRangeBytesByScoreAsync(string key, decimal min, decimal max, bool withScores = false, bool exclusiveMin = false, bool exclusiveMax = false, long? offset = null, long? count = null);
/// <summary>
/// Return a range of members in a sorted set, by score
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="min">Minimum score</param>
/// <param name="max">Maximum score</param>
/// <param name="withScores">Include scores in result</param>
/// <param name="offset">Start offset</param>
/// <param name="count">Number of elements to return</param>
/// <returns>List of elements in the specified range (with optional scores)</returns>
Task<string[]> ZRangeByScoreAsync(string key, string min, string max, bool withScores = false, long? offset = null, long? count = null);
Task<byte[][]> ZRangeBytesByScoreAsync(string key, string min, string max, bool withScores = false, long? offset = null, long? count = null);
/// <summary>
/// Return a range of members in a sorted set, by score, with scores
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="min">Minimum score</param>
/// <param name="max">Maximum score</param>
/// <param name="exclusiveMin">Minimum score is exclusive</param>
/// <param name="exclusiveMax">Maximum score is exclusive</param>
/// <param name="offset">Start offset</param>
/// <param name="count">Number of elements to return</param>
/// <returns>List of elements in the specified range (with optional scores)</returns>
Task<Tuple<string, decimal>[]> ZRangeByScoreWithScoresAsync(string key, decimal min, decimal max, bool exclusiveMin = false, bool exclusiveMax = false, long? offset = null, long? count = null);
Task<Tuple<byte[], decimal>[]> ZRangeBytesByScoreWithScoresAsync(string key, decimal min, decimal max, bool exclusiveMin = false, bool exclusiveMax = false, long? offset = null, long? count = null);
/// <summary>
/// Return a range of members in a sorted set, by score, with scores
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="min">Minimum score</param>
/// <param name="max">Maximum score</param>
/// <param name="offset">Start offset</param>
/// <param name="count">Number of elements to return</param>
/// <returns>List of elements in the specified range (with optional scores)</returns>
Task<Tuple<string, decimal>[]> ZRangeByScoreWithScoresAsync(string key, string min, string max, long? offset = null, long? count = null);
Task<Tuple<byte[], decimal>[]> ZRangeBytesByScoreWithScoresAsync(string key, string min, string max, long? offset = null, long? count = null);
/// <summary>
/// Determine the index of a member in a sorted set
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="member">Member to lookup</param>
/// <returns>Rank of member or null if key does not exist</returns>
Task<long?> ZRankAsync(string key, object member);
/// <summary>
/// Remove one or more members from a sorted set
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="members">Members to remove</param>
/// <returns>Number of elements removed</returns>
Task<long> ZRemAsync(string key, params object[] members);
/// <summary>
/// Remove all members in a sorted set within the given indexes
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="start">Start offset</param>
/// <param name="stop">Stop offset</param>
/// <returns>Number of elements removed</returns>
Task<long> ZRemRangeByRankAsync(string key, long start, long stop);
/// <summary>
/// Remove all members in a sorted set within the given scores
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="min">Minimum score</param>
/// <param name="max">Maximum score</param>
/// <param name="exclusiveMin">Minimum score is exclusive</param>
/// <param name="exclusiveMax">Maximum score is exclusive</param>
/// <returns>Number of elements removed</returns>
Task<long> ZRemRangeByScoreAsync(string key, decimal min, decimal max, bool exclusiveMin = false, bool exclusiveMax = false);
Task<long> ZRemRangeByScoreAsync(string key, string min, string max);
/// <summary>
/// Return a range of members in a sorted set, by index, with scores ordered from high to low
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="start">Start offset</param>
/// <param name="stop">Stop offset</param>
/// <param name="withScores">Include scores in result</param>
/// <returns>List of elements in the specified range (with optional scores)</returns>
Task<string[]> ZRevRangeAsync(string key, long start, long stop, bool withScores = false);
Task<byte[][]> ZRevRangeBytesAsync(string key, long start, long stop, bool withScores = false);
/// <summary>
/// Return a range of members in a sorted set, by index, with scores ordered from high to low
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="start">Start offset</param>
/// <param name="stop">Stop offset</param>
/// <returns>List of elements in the specified range (with optional scores)</returns>
Task<Tuple<string, decimal>[]> ZRevRangeWithScoresAsync(string key, long start, long stop);
Task<Tuple<byte[], decimal>[]> ZRevRangeBytesWithScoresAsync(string key, long start, long stop);
/// <summary>
/// Return a range of members in a sorted set, by score, with scores ordered from high to low
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="max">Maximum score</param>
/// <param name="min">Minimum score</param>
/// <param name="withScores">Include scores in result</param>
/// <param name="exclusiveMax">Maximum score is exclusive</param>
/// <param name="exclusiveMin">Minimum score is exclusive</param>
/// <param name="offset">Start offset</param>
/// <param name="count">Number of elements to return</param>
/// <returns>List of elements in the specified score range (with optional scores)</returns>
Task<string[]> ZRevRangeByScoreAsync(string key, decimal max, decimal min, bool withScores = false, bool exclusiveMax = false, bool exclusiveMin = false, long? offset = null, long? count = null);
Task<byte[][]> ZRevRangeBytesByScoreAsync(string key, decimal max, decimal min, bool withScores = false, bool exclusiveMax = false, bool exclusiveMin = false, long? offset = null, long? count = null);
/// <summary>
/// Return a range of members in a sorted set, by score, with scores ordered from high to low
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="max">Maximum score</param>
/// <param name="min">Minimum score</param>
/// <param name="withScores">Include scores in result</param>
/// <param name="offset">Start offset</param>
/// <param name="count">Number of elements to return</param>
/// <returns>List of elements in the specified score range (with optional scores)</returns>
Task<string[]> ZRevRangeByScoreAsync(string key, string max, string min, bool withScores = false, long? offset = null, long? count = null);
Task<byte[][]> ZRevRangeBytesByScoreAsync(string key, string max, string min, bool withScores = false, long? offset = null, long? count = null);
/// <summary>
/// Return a range of members in a sorted set, by score, with scores ordered from high to low
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="max">Maximum score</param>
/// <param name="min">Minimum score</param>
/// <param name="exclusiveMax">Maximum score is exclusive</param>
/// <param name="exclusiveMin">Minimum score is exclusive</param>
/// <param name="offset">Start offset</param>
/// <param name="count">Number of elements to return</param>
/// <returns>List of elements in the specified score range (with optional scores)</returns>
Task<Tuple<string, decimal>[]> ZRevRangeByScoreWithScoresAsync(string key, decimal max, decimal min, bool exclusiveMax = false, bool exclusiveMin = false, long? offset = null, long? count = null);
Task<Tuple<byte[], decimal>[]> ZRevRangeBytesByScoreWithScoresAsync(string key, decimal max, decimal min, bool exclusiveMax = false, bool exclusiveMin = false, long? offset = null, long? count = null);
/// <summary>
/// Return a range of members in a sorted set, by score, with scores ordered from high to low
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="max">Maximum score</param>
/// <param name="min">Minimum score</param>
/// <param name="offset">Start offset</param>
/// <param name="count">Number of elements to return</param>
/// <returns>List of elements in the specified score range (with optional scores)</returns>
Task<Tuple<string, decimal>[]> ZRevRangeByScoreWithScoresAsync(string key, string max, string min, long? offset = null, long? count = null);
Task<Tuple<byte[], decimal>[]> ZRevRangeBytesByScoreWithScoresAsync(string key, string max, string min, long? offset = null, long? count = null);
/// <summary>
/// Determine the index of a member in a sorted set, with scores ordered from high to low
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="member">Member to lookup</param>
/// <returns>Rank of member, or null if member does not exist</returns>
Task<long?> ZRevRankAsync(string key, object member);
/// <summary>
/// Get the score associated with the given member in a sorted set
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="member">Member to lookup</param>
/// <returns>Score of member, or null if member does not exist</returns>
Task<decimal?> ZScoreAsync(string key, object member);
/// <summary>
/// Add multiple sorted sets and store the resulting sorted set in a new key
/// </summary>
/// <param name="destination">Destination key</param>
/// <param name="weights">Multiplication factor for each input set</param>
/// <param name="aggregate">Aggregation function of resulting set</param>
/// <param name="keys">Sorted set keys to union</param>
/// <returns>Number of elements in the resulting sorted set</returns>
Task<long> ZUnionStoreAsync(string destination, decimal[] weights = null, RedisAggregate? aggregate = null, params string[] keys);
/// <summary>
/// Iterate the scores and elements of a sorted set field
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="cursor">The cursor returned by the server in the previous call, or 0 if this is the first call</param>
/// <param name="pattern">Glob-style pattern to filter returned elements</param>
/// <param name="count">Maximum number of elements to return</param>
/// <returns>Updated cursor and result set</returns>
Task<RedisScan<Tuple<string, decimal>>> ZScanAsync(string key, long cursor, string pattern = null, long? count = null);
Task<RedisScan<Tuple<byte[], decimal>>> ZScanBytesAsync(string key, long cursor, string pattern = null, long? count = null);
/// <summary>
/// Retrieve all the elements in a sorted set with a value between min and max
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="min">Lexagraphic start value. Prefix value with '(' to indicate exclusive; '[' to indicate inclusive. Use '-' or '+' to specify infinity.</param>
/// <param name="max">Lexagraphic stop value. Prefix value with '(' to indicate exclusive; '[' to indicate inclusive. Use '-' or '+' to specify infinity.</param>
/// <param name="offset">Limit result set by offset</param>
/// <param name="count">Limimt result set by size</param>
/// <returns>List of elements in the specified range</returns>
Task<string[]> ZRangeByLexAsync(string key, string min, string max, long? offset = null, long? count = null);
Task<byte[][]> ZRangeBytesByLexAsync(string key, string min, string max, long? offset = null, long? count = null);
/// <summary>
/// Remove all elements in the sorted set with a value between min and max
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="min">Lexagraphic start value. Prefix value with '(' to indicate exclusive; '[' to indicate inclusive. Use '-' or '+' to specify infinity.</param>
/// <param name="max">Lexagraphic stop value. Prefix value with '(' to indicate exclusive; '[' to indicate inclusive. Use '-' or '+' to specify infinity.</param>
/// <returns>Number of elements removed</returns>
Task<long> ZRemRangeByLexAsync(string key, string min, string max);
/// <summary>
/// Returns the number of elements in the sorted set with a value between min and max.
/// </summary>
/// <param name="key">Sorted set key</param>
/// <param name="min">Lexagraphic start value. Prefix value with '(' to indicate exclusive; '[' to indicate inclusive. Use '-' or '+' to specify infinity.</param>
/// <param name="max">Lexagraphic stop value. Prefix value with '(' to indicate exclusive; '[' to indicate inclusive. Use '-' or '+' to specify infinity.</param>
/// <returns>Number of elements in the specified score range</returns>
Task<long> ZLexCountAsync(string key, string min, string max);
#endregion
#region Pub/Sub
/// <summary>
/// Post a message to a channel
/// </summary>
/// <param name="channel">Channel to post message</param>
/// <param name="message">Message to send</param>
/// <returns>Number of clients that received the message</returns>
Task<long> PublishAsync(string channel, string message);
/// <summary>
/// List the currently active channels
/// </summary>
/// <param name="pattern">Glob-style channel pattern</param>
/// <returns>Active channel names</returns>
Task<string[]> PubSubChannelsAsync(string pattern = null);
/// <summary>
/// Return the number of subscribers (not counting clients subscribed to patterns) for the specified channels
/// </summary>
/// <param name="channels">Channels to query</param>
/// <returns>Channel names and counts</returns>
Task<Tuple<string, long>[]> PubSubNumSubAsync(params string[] channels);
/// <summary>
/// Return the number of subscriptions to patterns
/// </summary>
/// <returns>The number of patterns all the clients are subscribed to</returns>
Task<long> PubSubNumPatAsync();
#endregion
#region Scripting
/// <summary>
/// Execute a Lua script server side
/// </summary>
/// <param name="script">Script to run on server</param>
/// <param name="keys">Keys used by script</param>
/// <param name="arguments">Arguments to pass to script</param>
/// <returns>Redis object</returns>
Task<object> EvalAsync(string script, string[] keys, params object[] arguments);
/// <summary>
/// Execute a Lua script server side, sending only the script's cached SHA hash
/// </summary>
/// <param name="sha1">SHA1 hash of script</param>
/// <param name="keys">Keys used by script</param>
/// <param name="arguments">Arguments to pass to script</param>
/// <returns>Redis object</returns>
Task<object> EvalSHAAsync(string sha1, string[] keys, params object[] arguments);
/// <summary>
/// Check existence of script SHA hashes in the script cache
/// </summary>
/// <param name="sha1s">SHA1 script hashes</param>
/// <returns>Array of boolean values indicating script existence on server</returns>
Task<bool[]> ScriptExistsAsync(params string[] sha1s);
/// <summary>
/// Remove all scripts from the script cache
/// </summary>
/// <returns>Status code</returns>
Task<string> ScriptFlushAsync();
/// <summary>
/// Kill the script currently in execution
/// </summary>
/// <returns>Status code</returns>
Task<string> ScriptKillAsync();
/// <summary>
/// Load the specified Lua script into the script cache
/// </summary>
/// <param name="script">Lua script to load</param>
/// <returns>SHA1 hash of script</returns>
Task<string> ScriptLoadAsync(string script);
#endregion
#region Strings
/// <summary>
/// Append a value to a key
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="value">Value to append to key</param>
/// <returns>Length of string after append</returns>
Task<long> AppendAsync(string key, object value);
/// <summary>
/// Count set bits in a string
/// </summary>
/// <param name="key">Key to check</param>
/// <param name="start">Start offset</param>
/// <param name="end">Stop offset</param>
/// <returns>Number of bits set to 1</returns>
Task<long> BitCountAsync(string key, long? start = null, long? end = null);
/// <summary>
/// Perform bitwise operations between strings
/// </summary>
/// <param name="operation">Bit command to execute</param>
/// <param name="destKey">Store result in destination key</param>
/// <param name="keys">Keys to operate</param>
/// <returns>Size of string stored in the destination key</returns>
Task<long> BitOpAsync(RedisBitOp operation, string destKey, params string[] keys);
/// <summary>
/// Find first bit set or clear in a string
/// </summary>
/// <param name="key">Key to examine</param>
/// <param name="bit">Bit value (1 or 0)</param>
/// <param name="start">Examine string at specified byte offset</param>
/// <param name="end">Examine string to specified byte offset</param>
/// <returns>Position of the first bit set to the specified value</returns>
Task<long> BitPosAsync(string key, bool bit, long? start = null, long? end = null);
/// <summary>
/// Decrement the integer value of a key by one
/// </summary>
/// <param name="key">Key to modify</param>
/// <returns>Value of key after decrement</returns>
Task<long> DecrAsync(string key);
/// <summary>
/// Decrement the integer value of a key by the given number
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="decrement">Decrement value</param>
/// <returns>Value of key after decrement</returns>
Task<long> DecrByAsync(string key, long decrement);
/// <summary>
/// Get the value of a key
/// </summary>
/// <param name="key">Key to lookup</param>
/// <returns>Value of key</returns>
Task<string> GetAsync(string key);
Task<byte[]> GetBytesAsync(string key);
/// <summary>
/// Returns the bit value at offset in the string value stored at key
/// </summary>
/// <param name="key">Key to lookup</param>
/// <param name="offset">Offset of key to check</param>
/// <returns>Bit value stored at offset</returns>
Task<bool> GetBitAsync(string key, uint offset);
/// <summary>
/// Get a substring of the string stored at a key
/// </summary>
/// <param name="key">Key to lookup</param>
/// <param name="start">Start offset</param>
/// <param name="end">End offset</param>
/// <returns>Substring in the specified range</returns>
Task<string> GetRangeAsync(string key, long start, long end);
Task<byte[]> GetRangeBytesAsync(string key, long start, long end);
/// <summary>
/// Set the string value of a key and return its old value
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="value">Value to set</param>
/// <returns>Old value stored at key, or null if key did not exist</returns>
Task<string> GetSetAsync(string key, object value);
Task<byte[]> GetSetBytesAsync(string key, object value);
/// <summary>
/// Increment the integer value of a key by one
/// </summary>
/// <param name="key">Key to modify</param>
/// <returns>Value of key after increment</returns>
Task<long> IncrAsync(string key);
/// <summary>
/// Increment the integer value of a key by the given amount
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="increment">Increment amount</param>
/// <returns>Value of key after increment</returns>
Task<long> IncrByAsync(string key, long increment);
/// <summary>
/// Increment the float value of a key by the given amount
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="increment">Increment amount</param>
/// <returns>Value of key after increment</returns>
Task<decimal> IncrByFloatAsync(string key, decimal increment);
/// <summary>
/// Get the values of all the given keys
/// </summary>
/// <param name="keys">Keys to lookup</param>
/// <returns>Array of values at the specified keys</returns>
Task<string[]> MGetAsync(params string[] keys);
Task<byte[][]> MGetBytesAsync(params string[] keys);
/// <summary>
/// Set multiple keys to multiple values
/// </summary>
/// <param name="keyValues">Key values to set</param>
/// <returns>Status code</returns>
Task<string> MSetAsync(params Tuple<string, object>[] keyValues);
/// <summary>
/// Set multiple keys to multiple values
/// </summary>
/// <param name="keyValues">Key values to set [k1, v1, k2, v2, ..]</param>
/// <returns>Status code</returns>
Task<string> MSetAsync(params object[] keyValues);
/// <summary>
/// Set multiple keys to multiple values, only if none of the keys exist
/// </summary>
/// <param name="keyValues">Key values to set</param>
/// <returns>True if all keys were set</returns>
Task<bool> MSetNxAsync(params Tuple<string, object>[] keyValues);
/// <summary>
/// Set multiple keys to multiple values, only if none of the keys exist
/// </summary>
/// <param name="keyValues">Key values to set [k1, v1, k2, v2, ..]</param>
/// <returns>True if all keys were set</returns>
Task<bool> MSetNxAsync(params object[] keyValues);
/// <summary>
/// Set the value and expiration in milliseconds of a key
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="milliseconds">Expiration in milliseconds</param>
/// <param name="value">Value to set</param>
/// <returns>Status code</returns>
Task<string> PSetExAsync(string key, long milliseconds, object value);
/// <summary>
/// Set the string value of a key
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="value">Value to set</param>
/// <returns>Status code</returns>
Task<string> SetAsync(string key, object value);
/// <summary>
/// Set the string value of a key with atomic expiration and existence condition
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="value">Value to set</param>
/// <param name="expiration">Set expiration to nearest millisecond</param>
/// <param name="condition">Set key if existence condition</param>
/// <returns>Status code, or null if condition not met</returns>
Task<string> SetAsync(string key, object value, TimeSpan expiration, RedisExistence? condition = null);
/// <summary>
/// Set the string value of a key with atomic expiration and existence condition
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="value">Value to set</param>
/// <param name="expirationSeconds">Set expiration to nearest second</param>
/// <param name="condition">Set key if existence condition</param>
/// <returns>Status code, or null if condition not met</returns>
Task<string> SetAsync(string key, object value, int? expirationSeconds = null, RedisExistence? condition = null);
/// <summary>
/// Set the string value of a key with atomic expiration and existence condition
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="value">Value to set</param>
/// <param name="expirationMilliseconds">Set expiration to nearest millisecond</param>
/// <param name="condition">Set key if existence condition</param>
/// <returns>Status code, or null if condition not met</returns>
Task<string> SetAsync(string key, object value, long? expirationMilliseconds = null, RedisExistence? condition = null);
/// <summary>
/// Sets or clears the bit at offset in the string value stored at key
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="offset">Modify key at offset</param>
/// <param name="value">Value to set (on or off)</param>
/// <returns>Original bit stored at offset</returns>
Task<bool> SetBitAsync(string key, uint offset, bool value);
/// <summary>
/// Set the value and expiration of a key
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="seconds">Expiration in seconds</param>
/// <param name="value">Value to set</param>
/// <returns>Status code</returns>
Task<string> SetExAsync(string key, long seconds, object value);
/// <summary>
/// Set the value of a key, only if the key does not exist
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="value">Value to set</param>
/// <returns>True if key was set</returns>
Task<bool> SetNxAsync(string key, object value);
/// <summary>
/// Overwrite part of a string at key starting at the specified offset
/// </summary>
/// <param name="key">Key to modify</param>
/// <param name="offset">Start offset</param>
/// <param name="value">Value to write at offset</param>
/// <returns>Length of string after operation</returns>
Task<long> SetRangeAsync(string key, uint offset, object value);
/// <summary>
/// Get the length of the value stored in a key
/// </summary>
/// <param name="key">Key to lookup</param>
/// <returns>Length of string at key</returns>
Task<long> StrLenAsync(string key);
#endregion
#region Server
/// <summary>
/// Asyncronously rewrite the append-only file
/// </summary>
/// <returns>Status code</returns>
Task<string> BgRewriteAofAsync();
/// <summary>
/// Asynchronously save the dataset to disk
/// </summary>
/// <returns>Status code</returns>
Task<string> BgSaveAsync();
/// <summary>
/// Get the current connection name
/// </summary>
/// <returns>Connection name</returns>
Task<string> ClientGetNameAsync();
/// <summary>
/// Kill the connection of a client
/// </summary>
/// <param name="ip">Client IP returned from CLIENT LIST</param>
/// <param name="port">Client port returned from CLIENT LIST</param>
/// <returns>Status code</returns>
Task<string> ClientKillAsync(string ip, int port);
/// <summary>
/// Kill the connection of a client
/// </summary>
/// <param name="addr">Client address</param>
/// <param name="id">Client ID</param>
/// <param name="type">Client type</param>
/// <param name="skipMe">Set to true to skip calling client</param>
/// <returns>The number of clients killed</returns>
Task<long> ClientKillAsync(string addr = null, string id = null, string type = null, bool? skipMe = null);
/// <summary>
/// Get the list of client connections
/// </summary>
/// <returns>Formatted string of clients</returns>
Task<string> ClientListAsync();
/// <summary>
/// Suspend all the Redis clients for the specified amount of time
/// </summary>
/// <param name="milliseconds">Time in milliseconds to suspend</param>
/// <returns>Status code</returns>
Task<string> ClientPauseAsync(int milliseconds);
/// <summary>
/// Suspend all the Redis clients for the specified amount of time
/// </summary>
/// <param name="timeout">Time to suspend</param>
/// <returns>Status code</returns>
Task<string> ClientPauseAsync(TimeSpan timeout);
/// <summary>
/// Set the current connection name
/// </summary>
/// <param name="connectionName">Name of connection (no spaces)</param>
/// <returns>Status code</returns>
Task<string> ClientSetNameAsync(string connectionName);
/// <summary>
/// Get the value of a configuration paramter
/// </summary>
/// <param name="parameter">Configuration parameter to lookup</param>
/// <returns>Configuration value</returns>
Task<Tuple<string, string>[]> ConfigGetAsync(string parameter);
/// <summary>
/// Reset the stats returned by INFO
/// </summary>
/// <returns>Status code</returns>
Task<string> ConfigResetStatAsync();
/// <summary>
/// Rewrites the redis.conf file
/// </summary>
/// <returns>Status code</returns>
Task<string> ConfigRewriteAsync();
/// <summary>
/// Set a configuration parameter to the given value
/// </summary>
/// <param name="parameter">Parameter to set</param>
/// <param name="value">Value to set</param>
/// <returns>Status code</returns>
Task<string> ConfigSetAsync(string parameter, string value);
/// <summary>
/// Return the number of keys in the selected database
/// </summary>
/// <returns>Number of keys</returns>
Task<long> DbSizeAsync();
/// <summary>
/// Make the server crash :(
/// </summary>
/// <returns>Status code</returns>
Task<string> DebugSegFaultAsync();
/// <summary>
/// Remove all keys from all databases
/// </summary>
/// <returns>Status code</returns>
Task<string> FlushAllAsync();
/// <summary>
/// Remove all keys from the current database
/// </summary>
/// <returns>Status code</returns>
Task<string> FlushDbAsync();
/// <summary>
/// Get information and statistics about the server
/// </summary>
/// <param name="section">all|default|server|clients|memory|persistence|stats|replication|cpu|commandstats|cluster|keyspace</param>
/// <returns>Formatted string</returns>
Task<string> InfoAsync(string section = null);
/// <summary>
/// Get the timestamp of the last successful save to disk
/// </summary>
/// <returns>Date of last save</returns>
Task<DateTime> LastSaveAsync();
/// <summary>
/// Provide information on the role of a Redis instance in the context of replication
/// </summary>
/// <returns>Role information</returns>
Task<RedisRole> RoleAsync();
/// <summary>
/// Syncronously save the dataset to disk
/// </summary>
/// <returns>Status code</returns>
Task<string> SaveAsync();
/// <summary>
/// Syncronously save the dataset to disk an then shut down the server
/// </summary>
/// <param name="save">Force a DB saving operation even if no save points are configured</param>
/// <returns>Status code</returns>
Task<string> ShutdownAsync(bool? save = null);
/// <summary>
/// Make the server a slave of another instance or promote it as master
/// </summary>
/// <param name="host">Master host</param>
/// <param name="port">master port</param>
/// <returns>Status code</returns>
Task<string> SlaveOfAsync(string host, int port);
/// <summary>
/// Turn off replication, turning the Redis server into a master
/// </summary>
/// <returns>Status code</returns>
Task<string> SlaveOfNoOneAsync();
/// <summary>
/// Get latest entries from the slow log
/// </summary>
/// <param name="count">Limit entries returned</param>
/// <returns>Slow log entries</returns>
Task<RedisSlowLogEntry[]> SlowLogGetAsync(long? count = null);
/// <summary>
/// Get the length of the slow log
/// </summary>
/// <returns>Slow log length</returns>
Task<long> SlowLogLenAsync();
/// <summary>
/// Reset the slow log
/// </summary>
/// <returns>Status code</returns>
Task<string> SlowLogResetAsync();
/// <summary>
/// Internal command used for replication
/// </summary>
/// <returns>Byte array of Redis sync data</returns>
Task<byte[]> SyncAsync();
/// <summary>
/// Return the current server time
/// </summary>
/// <returns>Server time</returns>
Task<DateTime> TimeAsync();
#endregion
#region Transactions
/// <summary>
/// Mark the start of a transaction block
/// </summary>
/// <returns>Status code</returns>
Task<string> MultiAsync();
/// <summary>
/// Discard all commands issued after MULTI
/// </summary>
/// <returns>Status code</returns>
Task<string> DiscardAsync();
/// <summary>
/// Execute all commands issued after MULTI
/// </summary>
/// <returns>Array of output from all transaction commands</returns>
Task<object[]> ExecAsync();
/// <summary>
/// Forget about all watched keys
/// </summary>
/// <returns>Status code</returns>
Task<string> UnwatchAsync();
/// <summary>
/// Watch the given keys to determine execution of the MULTI/EXEC block
/// </summary>
/// <param name="keys">Keys to watch</param>
/// <returns>Status code</returns>
Task<string> WatchAsync(params string[] keys);
#endregion
#region HyperLogLog
/// <summary>
/// Adds the specified elements to the specified HyperLogLog.
/// </summary>
/// <param name="key">Key to update</param>
/// <param name="elements">Elements to add</param>
/// <returns>1 if at least 1 HyperLogLog internal register was altered. 0 otherwise.</returns>
Task<bool> PfAddAsync(string key, params object[] elements);
/// <summary>
/// Return the approximated cardinality of the set(s) observed by the HyperLogLog at key(s);
/// </summary>
/// <param name="keys">One or more HyperLogLog keys to examine</param>
/// <returns>Approximated number of unique elements observed via PFADD</returns>
Task<long> PfCountAsync(params string[] keys);
/// <summary>
/// Merge N different HyperLogLogs into a single key.
/// </summary>
/// <param name="destKey">Where to store the merged HyperLogLogs</param>
/// <param name="sourceKeys">The HyperLogLogs keys that will be combined</param>
/// <returns>Status code</returns>
Task<string> PfMergeAsync(string destKey, params string[] sourceKeys);
#endregion
#region Geo redis-server 3.2
Task<long> GeoAddAsync(string key, params (decimal longitude, decimal latitude, object member)[] values);
Task<decimal?> GeoDistAsync(string key, object member1, object member2, GeoUnit unit = GeoUnit.m);
Task<string[]> GeoHashAsync(string key, object[] members);
Task<(decimal longitude, decimal latitude)?[]> GeoPosAsync(string key, object[] members);
Task<(string member, decimal dist, decimal longitude, decimal latitude, long hash)[]> GeoRadiusAsync(string key, decimal longitude, decimal latitude, decimal radius, GeoUnit unit = GeoUnit.m, long? count = null, GeoOrderBy? sorting = null, bool withCoord = false, bool withDist = false, bool withHash = false);
Task<(byte[] member, decimal dist, decimal longitude, decimal latitude, long hash)[]> GeoRadiusBytesAsync(string key, decimal longitude, decimal latitude, decimal radius, GeoUnit unit = GeoUnit.m, long? count = null, GeoOrderBy? sorting = null, bool withCoord = false, bool withDist = false, bool withHash = false);
Task<(string member, decimal dist, decimal longitude, decimal latitude, long hash)[]> GeoRadiusByMemberAsync(string key, object member, decimal radius, GeoUnit unit = GeoUnit.m, long? count = null, GeoOrderBy? sorting = null, bool withCoord = false, bool withDist = false, bool withHash = false);
Task<(byte[] member, decimal dist, decimal longitude, decimal latitude, long hash)[]> GeoRadiusBytesByMemberAsync(string key, object member, decimal radius, GeoUnit unit = GeoUnit.m, long? count = null, GeoOrderBy? sorting = null, bool withCoord = false, bool withDist = false, bool withHash = false);
#endregion
#endif
}
}
|
2877025939/PlanADScrollView | 1,129 | PlanADScrollView/PlanADScrollView/PlanADScrollView/PlanADScrollView.h | //
// PlanADScrollView.h
// PlanADScrollView
//
// Created by anan on 2017/10/18.
// Copyright © 2017年 Plan. All rights reserved.
//
#import <UIKit/UIKit.h>
//可以定义PageContol圆点的类型
typedef enum {
PlanPageContolStyleNone, //圆点
PlanPageContolStyleRectangle,//条状
PlanPageContolStyleImage, //图片
} PlanPageContolStyle;
@protocol PlanADScrollViewDelegate <NSObject>
/**
代理回调方法,点击图片回调
*/
- (void)PlanADScrollViewdidSelectAtIndex:(NSInteger )index;
@end
@interface PlanADScrollView : UIView
@property(nonatomic,weak) id<PlanADScrollViewDelegate> delegate;
/**
pageContol点的样式
*/
@property (nonatomic)PlanPageContolStyle pageContolStyle;
/**
初始化方法
imageUrls 需要加载的图片数组,可以是本地的,也可以是网络的图片
placeholderimage 占位图片
*/
- (instancetype)initWithFrame:(CGRect)frame
imageUrls:(NSArray *)imageUrls
placeholderimage:(UIImage*)placeholderimage;
/**
当选中的PlanPageContolStyle 是PlanPageContolStyleImage, 图片类型的时候调用,
如果不调用使用默认图片
currentImage 选中图片
pageImage 默认图片
*/
-(void)currentImage:(UIImage *)currentImage
pageImage:(UIImage*)pageImage;
@end
|
Subsets and Splits
PyTorch Neural Network Imports
This query filters for code examples containing a specific PyTorch import pattern, which is useful for finding code snippets that use PyTorch's neural network module but doesn't provide deeper analytical insights about the dataset.
HTML Files in Train Set
Retrieves all records from the dataset where the file path ends with .html or .htm, providing a basic filter for HTML files.
SQL Console for nick007x/github-code-2025
Retrieves 200 file paths that end with '.html' or '.htm', providing a basic overview of HTML files in the dataset.
Top HTML Files
The query retrieves a sample of HTML file paths, providing basic filtering but limited analytical value.
CSharp Repositories Excluding Unity
Retrieves all records for repositories that contain C# files but are not related to Unity, providing a basic filter of the dataset.
C# File Count per Repository
Counts the total number of C# files across distinct repositories, providing a basic measure of C# file presence.
SQL Console for nick007x/github-code-2025
Lists unique repository IDs containing C# files, providing basic filtering to understand which repositories have C# code.
Select Groovy Files: Train Set
Retrieves the first 1000 entries from the 'train' dataset where the file path ends with '.groovy', providing a basic sample of Groovy files.
GitHub Repos with WiFiClientSecure
Finds specific file paths in repositories that contain particular code snippets related to WiFiClientSecure and ChatGPT, providing basic filtering of relevant files.