repo_id stringlengths 6 101 | size int64 367 5.14M | file_path stringlengths 2 269 | content stringlengths 367 5.14M |
|---|---|---|---|
27182812/ChatGLM-LLaMA-chinese-insturct | 28,303 | src/transformers/onnx/features.py | import os
from functools import partial, reduce
from typing import TYPE_CHECKING, Callable, Dict, Optional, Tuple, Type, Union
import transformers
from .. import PretrainedConfig, is_tf_available, is_torch_available
from ..utils import TF2_WEIGHTS_NAME, WEIGHTS_NAME, logging
from .config import OnnxConfig
if TYPE_CHECKING:
from transformers import PreTrainedModel, TFPreTrainedModel
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
if is_torch_available():
from transformers.models.auto import (
AutoModel,
AutoModelForCausalLM,
AutoModelForImageClassification,
AutoModelForImageSegmentation,
AutoModelForMaskedImageModeling,
AutoModelForMaskedLM,
AutoModelForMultipleChoice,
AutoModelForObjectDetection,
AutoModelForQuestionAnswering,
AutoModelForSemanticSegmentation,
AutoModelForSeq2SeqLM,
AutoModelForSequenceClassification,
AutoModelForSpeechSeq2Seq,
AutoModelForTokenClassification,
AutoModelForVision2Seq,
)
if is_tf_available():
from transformers.models.auto import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForMultipleChoice,
TFAutoModelForQuestionAnswering,
TFAutoModelForSemanticSegmentation,
TFAutoModelForSeq2SeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
)
if not is_torch_available() and not is_tf_available():
logger.warning(
"The ONNX export features are only supported for PyTorch or TensorFlow. You will not be able to export models"
" without one of these libraries installed."
)
def supported_features_mapping(
*supported_features: str, onnx_config_cls: str = None
) -> Dict[str, Callable[[PretrainedConfig], OnnxConfig]]:
"""
Generate the mapping between supported the features and their corresponding OnnxConfig for a given model.
Args:
*supported_features: The names of the supported features.
onnx_config_cls: The OnnxConfig full name corresponding to the model.
Returns:
The dictionary mapping a feature to an OnnxConfig constructor.
"""
if onnx_config_cls is None:
raise ValueError("A OnnxConfig class must be provided")
config_cls = transformers
for attr_name in onnx_config_cls.split("."):
config_cls = getattr(config_cls, attr_name)
mapping = {}
for feature in supported_features:
if "-with-past" in feature:
task = feature.replace("-with-past", "")
mapping[feature] = partial(config_cls.with_past, task=task)
else:
mapping[feature] = partial(config_cls.from_model_config, task=feature)
return mapping
class FeaturesManager:
_TASKS_TO_AUTOMODELS = {}
_TASKS_TO_TF_AUTOMODELS = {}
if is_torch_available():
_TASKS_TO_AUTOMODELS = {
"default": AutoModel,
"masked-lm": AutoModelForMaskedLM,
"causal-lm": AutoModelForCausalLM,
"seq2seq-lm": AutoModelForSeq2SeqLM,
"sequence-classification": AutoModelForSequenceClassification,
"token-classification": AutoModelForTokenClassification,
"multiple-choice": AutoModelForMultipleChoice,
"object-detection": AutoModelForObjectDetection,
"question-answering": AutoModelForQuestionAnswering,
"image-classification": AutoModelForImageClassification,
"image-segmentation": AutoModelForImageSegmentation,
"masked-im": AutoModelForMaskedImageModeling,
"semantic-segmentation": AutoModelForSemanticSegmentation,
"vision2seq-lm": AutoModelForVision2Seq,
"speech2seq-lm": AutoModelForSpeechSeq2Seq,
}
if is_tf_available():
_TASKS_TO_TF_AUTOMODELS = {
"default": TFAutoModel,
"masked-lm": TFAutoModelForMaskedLM,
"causal-lm": TFAutoModelForCausalLM,
"seq2seq-lm": TFAutoModelForSeq2SeqLM,
"sequence-classification": TFAutoModelForSequenceClassification,
"token-classification": TFAutoModelForTokenClassification,
"multiple-choice": TFAutoModelForMultipleChoice,
"question-answering": TFAutoModelForQuestionAnswering,
"semantic-segmentation": TFAutoModelForSemanticSegmentation,
}
# Set of model topologies we support associated to the features supported by each topology and the factory
_SUPPORTED_MODEL_TYPE = {
"albert": supported_features_mapping(
"default",
"masked-lm",
"sequence-classification",
"multiple-choice",
"token-classification",
"question-answering",
onnx_config_cls="models.albert.AlbertOnnxConfig",
),
"bart": supported_features_mapping(
"default",
"default-with-past",
"causal-lm",
"causal-lm-with-past",
"seq2seq-lm",
"seq2seq-lm-with-past",
"sequence-classification",
"question-answering",
onnx_config_cls="models.bart.BartOnnxConfig",
),
# BEiT cannot be used with the masked image modeling autoclass, so this feature is excluded here
"beit": supported_features_mapping(
"default", "image-classification", onnx_config_cls="models.beit.BeitOnnxConfig"
),
"bert": supported_features_mapping(
"default",
"masked-lm",
"causal-lm",
"sequence-classification",
"multiple-choice",
"token-classification",
"question-answering",
onnx_config_cls="models.bert.BertOnnxConfig",
),
"big-bird": supported_features_mapping(
"default",
"masked-lm",
"causal-lm",
"sequence-classification",
"multiple-choice",
"token-classification",
"question-answering",
onnx_config_cls="models.big_bird.BigBirdOnnxConfig",
),
"bigbird-pegasus": supported_features_mapping(
"default",
"default-with-past",
"causal-lm",
"causal-lm-with-past",
"seq2seq-lm",
"seq2seq-lm-with-past",
"sequence-classification",
"question-answering",
onnx_config_cls="models.bigbird_pegasus.BigBirdPegasusOnnxConfig",
),
"blenderbot": supported_features_mapping(
"default",
"default-with-past",
"causal-lm",
"causal-lm-with-past",
"seq2seq-lm",
"seq2seq-lm-with-past",
onnx_config_cls="models.blenderbot.BlenderbotOnnxConfig",
),
"blenderbot-small": supported_features_mapping(
"default",
"default-with-past",
"causal-lm",
"causal-lm-with-past",
"seq2seq-lm",
"seq2seq-lm-with-past",
onnx_config_cls="models.blenderbot_small.BlenderbotSmallOnnxConfig",
),
"bloom": supported_features_mapping(
"default",
"default-with-past",
"causal-lm",
"causal-lm-with-past",
"sequence-classification",
"token-classification",
onnx_config_cls="models.bloom.BloomOnnxConfig",
),
"camembert": supported_features_mapping(
"default",
"masked-lm",
"causal-lm",
"sequence-classification",
"multiple-choice",
"token-classification",
"question-answering",
onnx_config_cls="models.camembert.CamembertOnnxConfig",
),
"clip": supported_features_mapping(
"default",
onnx_config_cls="models.clip.CLIPOnnxConfig",
),
"codegen": supported_features_mapping(
"default",
"causal-lm",
onnx_config_cls="models.codegen.CodeGenOnnxConfig",
),
"convbert": supported_features_mapping(
"default",
"masked-lm",
"sequence-classification",
"multiple-choice",
"token-classification",
"question-answering",
onnx_config_cls="models.convbert.ConvBertOnnxConfig",
),
"convnext": supported_features_mapping(
"default",
"image-classification",
onnx_config_cls="models.convnext.ConvNextOnnxConfig",
),
"data2vec-text": supported_features_mapping(
"default",
"masked-lm",
"sequence-classification",
"multiple-choice",
"token-classification",
"question-answering",
onnx_config_cls="models.data2vec.Data2VecTextOnnxConfig",
),
"data2vec-vision": supported_features_mapping(
"default",
"image-classification",
# ONNX doesn't support `adaptive_avg_pool2d` yet
# "semantic-segmentation",
onnx_config_cls="models.data2vec.Data2VecVisionOnnxConfig",
),
"deberta": supported_features_mapping(
"default",
"masked-lm",
"sequence-classification",
"token-classification",
"question-answering",
onnx_config_cls="models.deberta.DebertaOnnxConfig",
),
"deberta-v2": supported_features_mapping(
"default",
"masked-lm",
"sequence-classification",
"multiple-choice",
"token-classification",
"question-answering",
onnx_config_cls="models.deberta_v2.DebertaV2OnnxConfig",
),
"deit": supported_features_mapping(
"default", "image-classification", "masked-im", onnx_config_cls="models.deit.DeiTOnnxConfig"
),
"detr": supported_features_mapping(
"default",
"object-detection",
"image-segmentation",
onnx_config_cls="models.detr.DetrOnnxConfig",
),
"distilbert": supported_features_mapping(
"default",
"masked-lm",
"sequence-classification",
"multiple-choice",
"token-classification",
"question-answering",
onnx_config_cls="models.distilbert.DistilBertOnnxConfig",
),
"electra": supported_features_mapping(
"default",
"masked-lm",
"causal-lm",
"sequence-classification",
"multiple-choice",
"token-classification",
"question-answering",
onnx_config_cls="models.electra.ElectraOnnxConfig",
),
"flaubert": supported_features_mapping(
"default",
"masked-lm",
"causal-lm",
"sequence-classification",
"multiple-choice",
"token-classification",
"question-answering",
onnx_config_cls="models.flaubert.FlaubertOnnxConfig",
),
"gpt2": supported_features_mapping(
"default",
"default-with-past",
"causal-lm",
"causal-lm-with-past",
"sequence-classification",
"token-classification",
onnx_config_cls="models.gpt2.GPT2OnnxConfig",
),
"gptj": supported_features_mapping(
"default",
"default-with-past",
"causal-lm",
"causal-lm-with-past",
"question-answering",
"sequence-classification",
onnx_config_cls="models.gptj.GPTJOnnxConfig",
),
"gpt-neo": supported_features_mapping(
"default",
"default-with-past",
"causal-lm",
"causal-lm-with-past",
"sequence-classification",
onnx_config_cls="models.gpt_neo.GPTNeoOnnxConfig",
),
"groupvit": supported_features_mapping(
"default",
onnx_config_cls="models.groupvit.GroupViTOnnxConfig",
),
"ibert": supported_features_mapping(
"default",
"masked-lm",
"sequence-classification",
"multiple-choice",
"token-classification",
"question-answering",
onnx_config_cls="models.ibert.IBertOnnxConfig",
),
"imagegpt": supported_features_mapping(
"default", "image-classification", onnx_config_cls="models.imagegpt.ImageGPTOnnxConfig"
),
"layoutlm": supported_features_mapping(
"default",
"masked-lm",
"sequence-classification",
"token-classification",
onnx_config_cls="models.layoutlm.LayoutLMOnnxConfig",
),
"layoutlmv3": supported_features_mapping(
"default",
"question-answering",
"sequence-classification",
"token-classification",
onnx_config_cls="models.layoutlmv3.LayoutLMv3OnnxConfig",
),
"levit": supported_features_mapping(
"default", "image-classification", onnx_config_cls="models.levit.LevitOnnxConfig"
),
"longt5": supported_features_mapping(
"default",
"default-with-past",
"seq2seq-lm",
"seq2seq-lm-with-past",
onnx_config_cls="models.longt5.LongT5OnnxConfig",
),
"longformer": supported_features_mapping(
"default",
"masked-lm",
"multiple-choice",
"question-answering",
"sequence-classification",
"token-classification",
onnx_config_cls="models.longformer.LongformerOnnxConfig",
),
"marian": supported_features_mapping(
"default",
"default-with-past",
"seq2seq-lm",
"seq2seq-lm-with-past",
"causal-lm",
"causal-lm-with-past",
onnx_config_cls="models.marian.MarianOnnxConfig",
),
"mbart": supported_features_mapping(
"default",
"default-with-past",
"causal-lm",
"causal-lm-with-past",
"seq2seq-lm",
"seq2seq-lm-with-past",
"sequence-classification",
"question-answering",
onnx_config_cls="models.mbart.MBartOnnxConfig",
),
"mobilebert": supported_features_mapping(
"default",
"masked-lm",
"sequence-classification",
"multiple-choice",
"token-classification",
"question-answering",
onnx_config_cls="models.mobilebert.MobileBertOnnxConfig",
),
"mobilenet-v1": supported_features_mapping(
"default",
"image-classification",
onnx_config_cls="models.mobilenet_v1.MobileNetV1OnnxConfig",
),
"mobilenet-v2": supported_features_mapping(
"default",
"image-classification",
onnx_config_cls="models.mobilenet_v2.MobileNetV2OnnxConfig",
),
"mobilevit": supported_features_mapping(
"default",
"image-classification",
onnx_config_cls="models.mobilevit.MobileViTOnnxConfig",
),
"mt5": supported_features_mapping(
"default",
"default-with-past",
"seq2seq-lm",
"seq2seq-lm-with-past",
onnx_config_cls="models.mt5.MT5OnnxConfig",
),
"m2m-100": supported_features_mapping(
"default",
"default-with-past",
"seq2seq-lm",
"seq2seq-lm-with-past",
onnx_config_cls="models.m2m_100.M2M100OnnxConfig",
),
"owlvit": supported_features_mapping(
"default",
onnx_config_cls="models.owlvit.OwlViTOnnxConfig",
),
"perceiver": supported_features_mapping(
"image-classification",
"masked-lm",
"sequence-classification",
onnx_config_cls="models.perceiver.PerceiverOnnxConfig",
),
"poolformer": supported_features_mapping(
"default", "image-classification", onnx_config_cls="models.poolformer.PoolFormerOnnxConfig"
),
"rembert": supported_features_mapping(
"default",
"masked-lm",
"causal-lm",
"sequence-classification",
"multiple-choice",
"token-classification",
"question-answering",
onnx_config_cls="models.rembert.RemBertOnnxConfig",
),
"resnet": supported_features_mapping(
"default",
"image-classification",
onnx_config_cls="models.resnet.ResNetOnnxConfig",
),
"roberta": supported_features_mapping(
"default",
"masked-lm",
"causal-lm",
"sequence-classification",
"multiple-choice",
"token-classification",
"question-answering",
onnx_config_cls="models.roberta.RobertaOnnxConfig",
),
"roformer": supported_features_mapping(
"default",
"masked-lm",
"causal-lm",
"sequence-classification",
"token-classification",
"multiple-choice",
"question-answering",
"token-classification",
onnx_config_cls="models.roformer.RoFormerOnnxConfig",
),
"segformer": supported_features_mapping(
"default",
"image-classification",
"semantic-segmentation",
onnx_config_cls="models.segformer.SegformerOnnxConfig",
),
"squeezebert": supported_features_mapping(
"default",
"masked-lm",
"sequence-classification",
"multiple-choice",
"token-classification",
"question-answering",
onnx_config_cls="models.squeezebert.SqueezeBertOnnxConfig",
),
"swin": supported_features_mapping(
"default", "image-classification", "masked-im", onnx_config_cls="models.swin.SwinOnnxConfig"
),
"t5": supported_features_mapping(
"default",
"default-with-past",
"seq2seq-lm",
"seq2seq-lm-with-past",
onnx_config_cls="models.t5.T5OnnxConfig",
),
"vision-encoder-decoder": supported_features_mapping(
"vision2seq-lm", onnx_config_cls="models.vision_encoder_decoder.VisionEncoderDecoderOnnxConfig"
),
"vit": supported_features_mapping(
"default", "image-classification", "masked-im", onnx_config_cls="models.vit.ViTOnnxConfig"
),
"whisper": supported_features_mapping(
"default",
"default-with-past",
"speech2seq-lm",
"speech2seq-lm-with-past",
onnx_config_cls="models.whisper.WhisperOnnxConfig",
),
"xlm": supported_features_mapping(
"default",
"masked-lm",
"causal-lm",
"sequence-classification",
"multiple-choice",
"token-classification",
"question-answering",
onnx_config_cls="models.xlm.XLMOnnxConfig",
),
"xlm-roberta": supported_features_mapping(
"default",
"masked-lm",
"causal-lm",
"sequence-classification",
"multiple-choice",
"token-classification",
"question-answering",
onnx_config_cls="models.xlm_roberta.XLMRobertaOnnxConfig",
),
"yolos": supported_features_mapping(
"default",
"object-detection",
onnx_config_cls="models.yolos.YolosOnnxConfig",
),
}
AVAILABLE_FEATURES = sorted(reduce(lambda s1, s2: s1 | s2, (v.keys() for v in _SUPPORTED_MODEL_TYPE.values())))
@staticmethod
def get_supported_features_for_model_type(
model_type: str, model_name: Optional[str] = None
) -> Dict[str, Callable[[PretrainedConfig], OnnxConfig]]:
"""
Tries to retrieve the feature -> OnnxConfig constructor map from the model type.
Args:
model_type (`str`):
The model type to retrieve the supported features for.
model_name (`str`, *optional*):
The name attribute of the model object, only used for the exception message.
Returns:
The dictionary mapping each feature to a corresponding OnnxConfig constructor.
"""
model_type = model_type.lower()
if model_type not in FeaturesManager._SUPPORTED_MODEL_TYPE:
model_type_and_model_name = f"{model_type} ({model_name})" if model_name else model_type
raise KeyError(
f"{model_type_and_model_name} is not supported yet. "
f"Only {list(FeaturesManager._SUPPORTED_MODEL_TYPE.keys())} are supported. "
f"If you want to support {model_type} please propose a PR or open up an issue."
)
return FeaturesManager._SUPPORTED_MODEL_TYPE[model_type]
@staticmethod
def feature_to_task(feature: str) -> str:
return feature.replace("-with-past", "")
@staticmethod
def _validate_framework_choice(framework: str):
"""
Validates if the framework requested for the export is both correct and available, otherwise throws an
exception.
"""
if framework not in ["pt", "tf"]:
raise ValueError(
f"Only two frameworks are supported for ONNX export: pt or tf, but {framework} was provided."
)
elif framework == "pt" and not is_torch_available():
raise RuntimeError("Cannot export model to ONNX using PyTorch because no PyTorch package was found.")
elif framework == "tf" and not is_tf_available():
raise RuntimeError("Cannot export model to ONNX using TensorFlow because no TensorFlow package was found.")
@staticmethod
def get_model_class_for_feature(feature: str, framework: str = "pt") -> Type:
"""
Attempts to retrieve an AutoModel class from a feature name.
Args:
feature (`str`):
The feature required.
framework (`str`, *optional*, defaults to `"pt"`):
The framework to use for the export.
Returns:
The AutoModel class corresponding to the feature.
"""
task = FeaturesManager.feature_to_task(feature)
FeaturesManager._validate_framework_choice(framework)
if framework == "pt":
task_to_automodel = FeaturesManager._TASKS_TO_AUTOMODELS
else:
task_to_automodel = FeaturesManager._TASKS_TO_TF_AUTOMODELS
if task not in task_to_automodel:
raise KeyError(
f"Unknown task: {feature}. Possible values are {list(FeaturesManager._TASKS_TO_AUTOMODELS.values())}"
)
return task_to_automodel[task]
@staticmethod
def determine_framework(model: str, framework: str = None) -> str:
"""
Determines the framework to use for the export.
The priority is in the following order:
1. User input via `framework`.
2. If local checkpoint is provided, use the same framework as the checkpoint.
3. Available framework in environment, with priority given to PyTorch
Args:
model (`str`):
The name of the model to export.
framework (`str`, *optional*, defaults to `None`):
The framework to use for the export. See above for priority if none provided.
Returns:
The framework to use for the export.
"""
if framework is not None:
return framework
framework_map = {"pt": "PyTorch", "tf": "TensorFlow"}
exporter_map = {"pt": "torch", "tf": "tf2onnx"}
if os.path.isdir(model):
if os.path.isfile(os.path.join(model, WEIGHTS_NAME)):
framework = "pt"
elif os.path.isfile(os.path.join(model, TF2_WEIGHTS_NAME)):
framework = "tf"
else:
raise FileNotFoundError(
"Cannot determine framework from given checkpoint location."
f" There should be a {WEIGHTS_NAME} for PyTorch"
f" or {TF2_WEIGHTS_NAME} for TensorFlow."
)
logger.info(f"Local {framework_map[framework]} model found.")
else:
if is_torch_available():
framework = "pt"
elif is_tf_available():
framework = "tf"
else:
raise EnvironmentError("Neither PyTorch nor TensorFlow found in environment. Cannot export to ONNX.")
logger.info(f"Framework not requested. Using {exporter_map[framework]} to export to ONNX.")
return framework
@staticmethod
def get_model_from_feature(
feature: str, model: str, framework: str = None, cache_dir: str = None
) -> Union["PreTrainedModel", "TFPreTrainedModel"]:
"""
Attempts to retrieve a model from a model's name and the feature to be enabled.
Args:
feature (`str`):
The feature required.
model (`str`):
The name of the model to export.
framework (`str`, *optional*, defaults to `None`):
The framework to use for the export. See `FeaturesManager.determine_framework` for the priority should
none be provided.
Returns:
The instance of the model.
"""
framework = FeaturesManager.determine_framework(model, framework)
model_class = FeaturesManager.get_model_class_for_feature(feature, framework)
try:
model = model_class.from_pretrained(model, cache_dir=cache_dir)
except OSError:
if framework == "pt":
logger.info("Loading TensorFlow model in PyTorch before exporting to ONNX.")
model = model_class.from_pretrained(model, from_tf=True, cache_dir=cache_dir)
else:
logger.info("Loading PyTorch model in TensorFlow before exporting to ONNX.")
model = model_class.from_pretrained(model, from_pt=True, cache_dir=cache_dir)
return model
@staticmethod
def check_supported_model_or_raise(
model: Union["PreTrainedModel", "TFPreTrainedModel"], feature: str = "default"
) -> Tuple[str, Callable]:
"""
Check whether or not the model has the requested features.
Args:
model: The model to export.
feature: The name of the feature to check if it is available.
Returns:
(str) The type of the model (OnnxConfig) The OnnxConfig instance holding the model export properties.
"""
model_type = model.config.model_type.replace("_", "-")
model_name = getattr(model, "name", "")
model_features = FeaturesManager.get_supported_features_for_model_type(model_type, model_name=model_name)
if feature not in model_features:
raise ValueError(
f"{model.config.model_type} doesn't support feature {feature}. Supported values are: {model_features}"
)
return model.config.model_type, FeaturesManager._SUPPORTED_MODEL_TYPE[model_type][feature]
def get_config(model_type: str, feature: str) -> OnnxConfig:
"""
Gets the OnnxConfig for a model_type and feature combination.
Args:
model_type (`str`):
The model type to retrieve the config for.
feature (`str`):
The feature to retrieve the config for.
Returns:
`OnnxConfig`: config for the combination
"""
return FeaturesManager._SUPPORTED_MODEL_TYPE[model_type][feature]
|
274056675/springboot-openai-chatgpt | 9,721 | mng_web/src/views/base/region.vue | <template>
<el-row>
<el-col :span="9">
<div class="box">
<el-scrollbar>
<basic-container>
<avue-tree :option="treeOption" :data="treeData" @node-click="nodeClick"/>
</basic-container>
</el-scrollbar>
</div>
</el-col>
<el-col :span="15">
<basic-container>
<el-button-group>
<el-button v-if="permission.region_add" type="primary" size="small" icon="el-icon-circle-plus-outline" @click="addChildren">新增下级</el-button>
<el-button v-if="permission.region_delete" type="primary" size="small" icon="el-icon-delete" @click="handleDelete">删除</el-button>
<el-button v-if="permission.region_debug" type="primary" size="small" icon="el-icon-video-play" @click="handleDebug">调试</el-button>
</el-button-group>
</basic-container>
<basic-container>
<avue-form ref="form" :option="regionOption" v-model="regionForm" @submit="handleSubmit">
<template slot="code" slot-scope="{}">
<el-input placeholder="请输入 区划子编号" v-model="regionForm.subCode">
<template slot="prepend">{{regionForm.parentCode}}</template>
</el-input>
</template>
</avue-form>
<el-dialog title="行政区划数据调试"
append-to-body
:visible.sync="debugBox"
width="350px">
<avue-form :option="debugOption" v-model="debugForm"/>
</el-dialog>
</basic-container>
</el-col>
</el-row>
</template>
<script>
import {getLazyTree, getDetail, submit, remove} from "@/api/base/region";
import {mapGetters} from "vuex";
import {validatenull} from "@/util/validate";
export default {
data() {
return {
topCode: '00',
treeCode: '',
treeParentCode: '',
treeData: [],
treeOption: {
nodeKey: 'id',
lazy: true,
treeLoad: function (node, resolve) {
const parentCode = (node.level === 0) ? "00" : node.data.id;
getLazyTree(parentCode).then(res => {
resolve(res.data.data.map(item => {
return {
...item,
leaf: !item.hasChildren
}
}))
});
},
addBtn: false,
menu: false,
size: 'small',
props: {
labelText: '标题',
label: 'title',
value: 'value',
children: 'children'
}
},
regionForm: {},
regionOption: {
labelWidth: 100,
column: [
{
label: "父区划编号",
prop: "parentCode",
span: 24,
disabled: true,
rules: [{
required: true,
message: "请输入父区划编号",
trigger: "blur"
}]
},
{
label: "父区划名称",
prop: "parentName",
span: 24,
disabled: true,
},
{
label: "区划编号",
prop: "code",
formslot: true,
span: 24,
rules: [{
required: true,
message: "请输入区划编号",
trigger: "blur"
}]
},
{
label: "区划子编号",
prop: "subCode",
display: false,
},
{
label: "区划名称",
prop: "name",
span: 24,
rules: [{
required: true,
message: "请输入区划名称",
trigger: "blur"
}]
},
{
label: "区划等级",
prop: "level",
type: "radio",
dicUrl: "/api/blade-system/dict/dictionary?code=region",
props: {
label: "dictValue",
value: "dictKey"
},
dataType: "number",
span: 24,
rules: [{
required: true,
message: "请选择区划等级",
trigger: "blur"
}]
},
{
label: "区划排序",
prop: "sort",
type: "number",
span: 24,
rules: [{
required: true,
message: "请输入区划排序",
trigger: "blur"
}]
},
{
label: "区划备注",
prop: "remark",
type: "textarea",
minRows: 6,
span: 24,
},
]
},
debugBox: false,
debugForm: {},
debugOption: {
labelWidth: 50,
submitBtn: false,
emptyBtn: false,
column: [
{
label: '省份',
prop: 'province',
type: 'select',
props: {
label: 'name',
value: 'code'
},
cascaderItem: ['city', 'district'],
dicUrl: '/api/blade-system/region/select',
span: 24,
},
{
label: '地市',
prop: 'city',
type: 'select',
props: {
label: 'name',
value: 'code'
},
dicFlag: false,
dicUrl: '/api/blade-system/region/select?code={{key}}',
span: 24,
},
{
label: '区县',
prop: 'district',
type: 'select',
props: {
label: 'name',
value: 'code'
},
dicFlag: false,
dicUrl: '/api/blade-system/region/select?code={{key}}',
span: 24,
}
]
}
};
},
watch: {
'regionForm.subCode'() {
this.regionForm.code = this.regionForm.parentCode + this.regionForm.subCode;
},
'excelForm.isCovered'() {
if (this.excelForm.isCovered !== '') {
const column = this.findObject(this.excelOption.column, "excelFile");
column.action = `/api/blade-system/region/import-region?isCovered=${this.excelForm.isCovered}`;
}
}
},
computed: {
...mapGetters(["permission"]),
permissionList() {
return {
addBtn: this.vaildData(this.permission.region_add, false),
viewBtn: this.vaildData(this.permission.region_view, false),
delBtn: this.vaildData(this.permission.region_delete, false),
editBtn: this.vaildData(this.permission.region_edit, false)
};
},
ids() {
let ids = [];
this.selectionList.forEach(ele => {
ids.push(ele.id);
});
return ids.join(",");
}
},
methods: {
initTree() {
this.treeData = [];
getLazyTree(this.topCode).then(res => {
this.treeData = res.data.data.map(item => {
return {
...item,
leaf: !item.hasChildren
}
})
});
},
nodeClick(data) {
const column = this.findObject(this.regionOption.column, "parentCode");
column.disabled = true;
this.treeCode = data.id;
this.treeParentCode = data.parentId;
getDetail(this.treeCode).then(res => {
this.regionForm = res.data.data;
this.regionForm.subCode = this.regionForm.code.replace(this.regionForm.parentCode, '');
})
},
addChildren() {
if (validatenull(this.regionForm.code) || validatenull(this.regionForm.name)) {
this.$message.warning("请先选择一项区划");
return;
}
this.regionForm.parentCode = this.regionForm.code;
this.regionForm.parentName = this.regionForm.name;
this.regionForm.code = '';
this.regionForm.subCode = '';
this.regionForm.name = '';
this.regionForm.level = (this.regionForm.level === 5) ? 5 : this.regionForm.level + 1;
},
handleSubmit(form, done, loading) {
const parentCode = form.parentCode === this.topCode ? '' : form.parentCode;
form.code = parentCode + form.subCode;
submit(form).then(() => {
this.$message({
type: "success",
message: "操作成功!"
});
this.initTree();
this.regionForm.subCode = '';
this.$refs.form.resetForm();
done();
}, error => {
loading();
window.console.log(error);
});
},
handleDelete() {
if (validatenull(this.regionForm.code)) {
this.$message.warning("请先选择一项区划");
return;
}
this.$confirm(`确定将 [${this.regionForm.name}] 数据删除?`, {
confirmButtonText: "确定",
cancelButtonText: "取消",
type: "warning"
})
.then(() => {
return remove(this.treeCode);
})
.then(() => {
this.$message({
type: "success",
message: "操作成功!"
});
this.initTree();
this.regionForm.subCode = '';
this.$refs.form.resetForm();
});
},
uploadAfter(res, done, loading, column) {
window.console.log(column);
this.excelBox = false;
this.initTree();
done();
},
handleDebug() {
this.debugBox = true;
},
}
};
</script>
<style>
.box {
height: 800px;
}
.el-scrollbar {
height: 100%;
}
.box .el-scrollbar__wrap {
overflow: scroll;
}
</style>
|
274056675/springboot-openai-chatgpt | 6,420 | mng_web/src/views/desk/notice.vue | <template>
<basic-container>
<avue-crud :option="option"
:table-loading="loading"
:data="data"
:page="page"
@row-del="rowDel"
v-model="form"
:permission="permissionList"
@row-update="rowUpdate"
@row-save="rowSave"
:before-open="beforeOpen"
@search-change="searchChange"
@search-reset="searchReset"
@selection-change="selectionChange"
@current-change="currentChange"
@size-change="sizeChange"
@on-load="onLoad">
<template slot="menuLeft">
<el-button type="danger"
size="small"
icon="el-icon-delete"
plain
v-if="permission.notice_delete"
@click="handleDelete">删 除
</el-button>
</template>
<template slot-scope="{row}"
slot="category">
<el-tag>{{row.categoryName}}</el-tag>
</template>
</avue-crud>
</basic-container>
</template>
<script>
import {getList, remove, update, add, getNotice} from "@/api/desk/notice";
import {mapGetters} from "vuex";
export default {
data() {
return {
form: {},
query: {},
loading: true,
page: {
pageSize: 10,
currentPage: 1,
total: 0
},
selectionList: [],
option: {
height: 'auto',
calcHeight: 210,
searchShow: true,
searchMenuSpan: 6,
tip: false,
border: true,
index: true,
viewBtn: true,
selection: true,
column: [
{
label: "通知标题",
prop: "title",
span: 24,
search: true,
rules: [{
required: true,
message: "请输入通知标题",
trigger: "blur"
}]
},
{
label: "通知类型",
type: "select",
dicUrl: "/api/blade-system/dict/dictionary?code=notice",
props: {
label: "dictValue",
value: "dictKey"
},
slot: true,
prop: "category",
search: true,
rules: [{
required: true,
message: "请输入通知类型",
trigger: "blur"
}]
},
{
label: "通知日期",
prop: "releaseTime",
type: "date",
format: "yyyy-MM-dd hh:mm:ss",
valueFormat: "yyyy-MM-dd hh:mm:ss",
rules: [{
required: true,
message: "请输入通知日期",
trigger: "blur"
}]
},
{
label: "通知内容",
prop: "content",
span: 24,
minRows: 6,
type: "textarea"
}
]
},
data: []
};
},
computed: {
...mapGetters(["permission"]),
permissionList() {
return {
addBtn: this.vaildData(this.permission.notice_add, false),
viewBtn: this.vaildData(this.permission.notice_view, false),
delBtn: this.vaildData(this.permission.notice_delete, false),
editBtn: this.vaildData(this.permission.notice_edit, false)
};
},
ids() {
let ids = [];
this.selectionList.forEach(ele => {
ids.push(ele.id);
});
return ids.join(",");
}
},
methods: {
rowSave(row, done, loading) {
add(row).then(() => {
done();
this.onLoad(this.page);
this.$message({
type: "success",
message: "操作成功!"
});
}, error => {
window.console.log(error);
loading();
});
},
rowUpdate(row, index, done, loading) {
update(row).then(() => {
done();
this.onLoad(this.page);
this.$message({
type: "success",
message: "操作成功!"
});
}, error => {
window.console.log(error);
loading();
});
},
rowDel(row) {
this.$confirm("确定将选择数据删除?", {
confirmButtonText: "确定",
cancelButtonText: "取消",
type: "warning"
})
.then(() => {
return remove(row.id);
})
.then(() => {
this.onLoad(this.page);
this.$message({
type: "success",
message: "操作成功!"
});
});
},
searchReset() {
this.query = {};
this.onLoad(this.page);
},
searchChange(params, done) {
this.query = params;
this.page.currentPage = 1;
this.onLoad(this.page, params);
done();
},
selectionChange(list) {
this.selectionList = list;
},
handleDelete() {
if (this.selectionList.length === 0) {
this.$message.warning("请选择至少一条数据");
return;
}
this.$confirm("确定将选择数据删除?", {
confirmButtonText: "确定",
cancelButtonText: "取消",
type: "warning"
})
.then(() => {
return remove(this.ids);
})
.then(() => {
this.onLoad(this.page);
this.$message({
type: "success",
message: "操作成功!"
});
this.$refs.crud.toggleSelection();
});
},
beforeOpen(done, type) {
if (["edit", "view"].includes(type)) {
getNotice(this.form.id).then(res => {
this.form = res.data.data;
});
}
done();
},
currentChange(currentPage) {
this.page.currentPage = currentPage;
},
sizeChange(pageSize) {
this.page.pageSize = pageSize;
},
onLoad(page, params = {}) {
this.loading = true;
getList(page.currentPage, page.pageSize, Object.assign(params, this.query)).then(res => {
const data = res.data.data;
this.page.total = data.total;
this.data = data.records;
this.loading = false;
});
}
}
};
</script>
<style>
</style>
|
27182812/ChatGLM-LLaMA-chinese-insturct | 3,625 | src/transformers/onnx/utils.py | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ctypes import c_float, sizeof
from enum import Enum
from typing import TYPE_CHECKING, Optional, Union
if TYPE_CHECKING:
from .. import AutoFeatureExtractor, AutoProcessor, AutoTokenizer # tests_ignore
class ParameterFormat(Enum):
Float = c_float
@property
def size(self) -> int:
"""
Number of byte required for this data type
Returns:
Integer > 0
"""
return sizeof(self.value)
def compute_effective_axis_dimension(dimension: int, fixed_dimension: int, num_token_to_add: int = 0) -> int:
"""
Args:
dimension:
fixed_dimension:
num_token_to_add:
Returns:
"""
# < 0 is possible if using a dynamic axis
if dimension <= 0:
dimension = fixed_dimension
dimension -= num_token_to_add
return dimension
def compute_serialized_parameters_size(num_parameters: int, dtype: ParameterFormat) -> int:
"""
Compute the size taken by all the parameters in the given the storage format when serializing the model
Args:
num_parameters: Number of parameters to be saved
dtype: The data format each parameter will be saved
Returns:
Size (in byte) taken to save all the parameters
"""
return num_parameters * dtype.size
def get_preprocessor(model_name: str) -> Optional[Union["AutoTokenizer", "AutoFeatureExtractor", "AutoProcessor"]]:
"""
Gets a preprocessor (tokenizer, feature extractor or processor) that is available for `model_name`.
Args:
model_name (`str`): Name of the model for which a preprocessor are loaded.
Returns:
`Optional[Union[AutoTokenizer, AutoFeatureExtractor, AutoProcessor]]`:
If a processor is found, it is returned. Otherwise, if a tokenizer or a feature extractor exists, it is
returned. If both a tokenizer and a feature extractor exist, an error is raised. The function returns
`None` if no preprocessor is found.
"""
# Avoid circular imports by only importing this here.
from .. import AutoFeatureExtractor, AutoProcessor, AutoTokenizer # tests_ignore
try:
return AutoProcessor.from_pretrained(model_name)
except (ValueError, OSError, KeyError):
tokenizer, feature_extractor = None, None
try:
tokenizer = AutoTokenizer.from_pretrained(model_name)
except (OSError, KeyError):
pass
try:
feature_extractor = AutoFeatureExtractor.from_pretrained(model_name)
except (OSError, KeyError):
pass
if tokenizer is not None and feature_extractor is not None:
raise ValueError(
f"Couldn't auto-detect preprocessor for {model_name}. Found both a tokenizer and a feature extractor."
)
elif tokenizer is None and feature_extractor is None:
return None
elif tokenizer is not None:
return tokenizer
else:
return feature_extractor
|
27182812/ChatGLM-LLaMA-chinese-insturct | 9,353 | src/transformers/onnx/__main__.py | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
import sys
import warnings
from argparse import ArgumentParser
from pathlib import Path
from packaging import version
from .. import AutoFeatureExtractor, AutoProcessor, AutoTokenizer
from ..utils import logging
from ..utils.import_utils import is_optimum_available
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import get_preprocessor
MIN_OPTIMUM_VERSION = "1.5.0"
ENCODER_DECODER_MODELS = ["vision-encoder-decoder"]
def export_with_optimum(args):
if is_optimum_available():
from optimum.version import __version__ as optimum_version
parsed_optimum_version = version.parse(optimum_version)
if parsed_optimum_version < version.parse(MIN_OPTIMUM_VERSION):
raise RuntimeError(
f"transformers.onnx requires optimum >= {MIN_OPTIMUM_VERSION} but {optimum_version} is installed. You "
"can upgrade optimum by running: pip install -U optimum[exporters]"
)
else:
raise RuntimeError(
"transformers.onnx requires optimum to run, you can install the library by running: pip install "
"optimum[exporters]"
)
cmd_line = [
sys.executable,
"-m",
"optimum.exporters.onnx",
f"--model {args.model}",
f"--task {args.feature}",
f"--framework {args.framework}" if args.framework is not None else "",
f"{args.output}",
]
proc = subprocess.Popen(" ".join(cmd_line), stdout=subprocess.PIPE, shell=True)
proc.wait()
logger.info(
"The export was done by optimum.exporters.onnx. We recommend using to use this package directly in future, as "
"transformers.onnx is deprecated, and will be removed in v5. You can find more information here: "
"https://huggingface.co/docs/optimum/exporters/onnx/usage_guides/export_a_model."
)
def export_with_transformers(args):
args.output = args.output if args.output.is_file() else args.output.joinpath("model.onnx")
if not args.output.parent.exists():
args.output.parent.mkdir(parents=True)
# Allocate the model
model = FeaturesManager.get_model_from_feature(
args.feature, args.model, framework=args.framework, cache_dir=args.cache_dir
)
model_kind, model_onnx_config = FeaturesManager.check_supported_model_or_raise(model, feature=args.feature)
onnx_config = model_onnx_config(model.config)
if model_kind in ENCODER_DECODER_MODELS:
encoder_model = model.get_encoder()
decoder_model = model.get_decoder()
encoder_onnx_config = onnx_config.get_encoder_config(encoder_model.config)
decoder_onnx_config = onnx_config.get_decoder_config(
encoder_model.config, decoder_model.config, feature=args.feature
)
if args.opset is None:
args.opset = max(encoder_onnx_config.default_onnx_opset, decoder_onnx_config.default_onnx_opset)
if args.opset < min(encoder_onnx_config.default_onnx_opset, decoder_onnx_config.default_onnx_opset):
raise ValueError(
f"Opset {args.opset} is not sufficient to export {model_kind}. At least "
f" {min(encoder_onnx_config.default_onnx_opset, decoder_onnx_config.default_onnx_opset)} is required."
)
preprocessor = AutoFeatureExtractor.from_pretrained(args.model)
onnx_inputs, onnx_outputs = export(
preprocessor,
encoder_model,
encoder_onnx_config,
args.opset,
args.output.parent.joinpath("encoder_model.onnx"),
)
validate_model_outputs(
encoder_onnx_config,
preprocessor,
encoder_model,
args.output.parent.joinpath("encoder_model.onnx"),
onnx_outputs,
args.atol if args.atol else encoder_onnx_config.atol_for_validation,
)
preprocessor = AutoTokenizer.from_pretrained(args.model)
onnx_inputs, onnx_outputs = export(
preprocessor,
decoder_model,
decoder_onnx_config,
args.opset,
args.output.parent.joinpath("decoder_model.onnx"),
)
validate_model_outputs(
decoder_onnx_config,
preprocessor,
decoder_model,
args.output.parent.joinpath("decoder_model.onnx"),
onnx_outputs,
args.atol if args.atol else decoder_onnx_config.atol_for_validation,
)
logger.info(
f"All good, model saved at: {args.output.parent.joinpath('encoder_model.onnx').as_posix()},"
f" {args.output.parent.joinpath('decoder_model.onnx').as_posix()}"
)
else:
# Instantiate the appropriate preprocessor
if args.preprocessor == "auto":
preprocessor = get_preprocessor(args.model)
elif args.preprocessor == "tokenizer":
preprocessor = AutoTokenizer.from_pretrained(args.model)
elif args.preprocessor == "feature_extractor":
preprocessor = AutoFeatureExtractor.from_pretrained(args.model)
elif args.preprocessor == "processor":
preprocessor = AutoProcessor.from_pretrained(args.model)
else:
raise ValueError(f"Unknown preprocessor type '{args.preprocessor}'")
# Ensure the requested opset is sufficient
if args.opset is None:
args.opset = onnx_config.default_onnx_opset
if args.opset < onnx_config.default_onnx_opset:
raise ValueError(
f"Opset {args.opset} is not sufficient to export {model_kind}. "
f"At least {onnx_config.default_onnx_opset} is required."
)
onnx_inputs, onnx_outputs = export(
preprocessor,
model,
onnx_config,
args.opset,
args.output,
)
if args.atol is None:
args.atol = onnx_config.atol_for_validation
validate_model_outputs(onnx_config, preprocessor, model, args.output, onnx_outputs, args.atol)
logger.info(f"All good, model saved at: {args.output.as_posix()}")
warnings.warn(
"The export was done by transformers.onnx which is deprecated and will be removed in v5. We recommend"
" using optimum.exporters.onnx in future. You can find more information here:"
" https://huggingface.co/docs/optimum/exporters/onnx/usage_guides/export_a_model.",
FutureWarning,
)
def main():
parser = ArgumentParser("Hugging Face Transformers ONNX exporter")
parser.add_argument(
"-m", "--model", type=str, required=True, help="Model ID on huggingface.co or path on disk to load model from."
)
parser.add_argument(
"--feature",
default="default",
help="The type of features to export the model with.",
)
parser.add_argument("--opset", type=int, default=None, help="ONNX opset version to export the model with.")
parser.add_argument(
"--atol", type=float, default=None, help="Absolute difference tolerance when validating the model."
)
parser.add_argument(
"--framework",
type=str,
choices=["pt", "tf"],
default=None,
help=(
"The framework to use for the ONNX export."
" If not provided, will attempt to use the local checkpoint's original framework"
" or what is available in the environment."
),
)
parser.add_argument("output", type=Path, help="Path indicating where to store generated ONNX model.")
parser.add_argument("--cache_dir", type=str, default=None, help="Path indicating where to store cache.")
parser.add_argument(
"--preprocessor",
type=str,
choices=["auto", "tokenizer", "feature_extractor", "processor"],
default="auto",
help="Which type of preprocessor to use. 'auto' tries to automatically detect it.",
)
parser.add_argument(
"--export_with_transformers",
action="store_true",
help=(
"Whether to use transformers.onnx instead of optimum.exporters.onnx to perform the ONNX export. It can be "
"useful when exporting a model supported in transformers but not in optimum, otherwise it is not "
"recommended."
),
)
args = parser.parse_args()
if args.export_with_transformers or not is_optimum_available():
export_with_transformers(args)
else:
export_with_optimum(args)
if __name__ == "__main__":
logger = logging.get_logger("transformers.onnx") # pylint: disable=invalid-name
logger.setLevel(logging.INFO)
main()
|
233zzh/TitanDataOperationSystem | 15,827 | 代码/web代码/titanApp/src/main/resources/static/src/assets/extra-libs/flot/examples/axes-time-zones/tz/antarctica | # <pre>
# This file is in the public domain, so clarified as of
# 2009-05-17 by Arthur David Olson.
# From Paul Eggert (1999-11-15):
# To keep things manageable, we list only locations occupied year-round; see
# <a href="http://www.comnap.aq/comnap/comnap.nsf/P/Stations/">
# COMNAP - Stations and Bases
# </a>
# and
# <a href="http://www.spri.cam.ac.uk/bob/periant.htm">
# Summary of the Peri-Antarctic Islands (1998-07-23)
# </a>
# for information.
# Unless otherwise specified, we have no time zone information.
#
# Except for the French entries,
# I made up all time zone abbreviations mentioned here; corrections welcome!
# FORMAT is `zzz' and GMTOFF is 0 for locations while uninhabited.
# These rules are stolen from the `southamerica' file.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule ArgAQ 1964 1966 - Mar 1 0:00 0 -
Rule ArgAQ 1964 1966 - Oct 15 0:00 1:00 S
Rule ArgAQ 1967 only - Apr 2 0:00 0 -
Rule ArgAQ 1967 1968 - Oct Sun>=1 0:00 1:00 S
Rule ArgAQ 1968 1969 - Apr Sun>=1 0:00 0 -
Rule ArgAQ 1974 only - Jan 23 0:00 1:00 S
Rule ArgAQ 1974 only - May 1 0:00 0 -
Rule ChileAQ 1972 1986 - Mar Sun>=9 3:00u 0 -
Rule ChileAQ 1974 1987 - Oct Sun>=9 4:00u 1:00 S
Rule ChileAQ 1987 only - Apr 12 3:00u 0 -
Rule ChileAQ 1988 1989 - Mar Sun>=9 3:00u 0 -
Rule ChileAQ 1988 only - Oct Sun>=1 4:00u 1:00 S
Rule ChileAQ 1989 only - Oct Sun>=9 4:00u 1:00 S
Rule ChileAQ 1990 only - Mar 18 3:00u 0 -
Rule ChileAQ 1990 only - Sep 16 4:00u 1:00 S
Rule ChileAQ 1991 1996 - Mar Sun>=9 3:00u 0 -
Rule ChileAQ 1991 1997 - Oct Sun>=9 4:00u 1:00 S
Rule ChileAQ 1997 only - Mar 30 3:00u 0 -
Rule ChileAQ 1998 only - Mar Sun>=9 3:00u 0 -
Rule ChileAQ 1998 only - Sep 27 4:00u 1:00 S
Rule ChileAQ 1999 only - Apr 4 3:00u 0 -
Rule ChileAQ 1999 2010 - Oct Sun>=9 4:00u 1:00 S
Rule ChileAQ 2000 2007 - Mar Sun>=9 3:00u 0 -
# N.B.: the end of March 29 in Chile is March 30 in Universal time,
# which is used below in specifying the transition.
Rule ChileAQ 2008 only - Mar 30 3:00u 0 -
Rule ChileAQ 2009 only - Mar Sun>=9 3:00u 0 -
Rule ChileAQ 2010 only - Apr Sun>=1 3:00u 0 -
Rule ChileAQ 2011 only - May Sun>=2 3:00u 0 -
Rule ChileAQ 2011 only - Aug Sun>=16 4:00u 1:00 S
Rule ChileAQ 2012 only - Apr Sun>=23 3:00u 0 -
Rule ChileAQ 2012 only - Sep Sun>=2 4:00u 1:00 S
Rule ChileAQ 2013 max - Mar Sun>=9 3:00u 0 -
Rule ChileAQ 2013 max - Oct Sun>=9 4:00u 1:00 S
# These rules are stolen from the `australasia' file.
Rule AusAQ 1917 only - Jan 1 0:01 1:00 -
Rule AusAQ 1917 only - Mar 25 2:00 0 -
Rule AusAQ 1942 only - Jan 1 2:00 1:00 -
Rule AusAQ 1942 only - Mar 29 2:00 0 -
Rule AusAQ 1942 only - Sep 27 2:00 1:00 -
Rule AusAQ 1943 1944 - Mar lastSun 2:00 0 -
Rule AusAQ 1943 only - Oct 3 2:00 1:00 -
Rule ATAQ 1967 only - Oct Sun>=1 2:00s 1:00 -
Rule ATAQ 1968 only - Mar lastSun 2:00s 0 -
Rule ATAQ 1968 1985 - Oct lastSun 2:00s 1:00 -
Rule ATAQ 1969 1971 - Mar Sun>=8 2:00s 0 -
Rule ATAQ 1972 only - Feb lastSun 2:00s 0 -
Rule ATAQ 1973 1981 - Mar Sun>=1 2:00s 0 -
Rule ATAQ 1982 1983 - Mar lastSun 2:00s 0 -
Rule ATAQ 1984 1986 - Mar Sun>=1 2:00s 0 -
Rule ATAQ 1986 only - Oct Sun>=15 2:00s 1:00 -
Rule ATAQ 1987 1990 - Mar Sun>=15 2:00s 0 -
Rule ATAQ 1987 only - Oct Sun>=22 2:00s 1:00 -
Rule ATAQ 1988 1990 - Oct lastSun 2:00s 1:00 -
Rule ATAQ 1991 1999 - Oct Sun>=1 2:00s 1:00 -
Rule ATAQ 1991 2005 - Mar lastSun 2:00s 0 -
Rule ATAQ 2000 only - Aug lastSun 2:00s 1:00 -
Rule ATAQ 2001 max - Oct Sun>=1 2:00s 1:00 -
Rule ATAQ 2006 only - Apr Sun>=1 2:00s 0 -
Rule ATAQ 2007 only - Mar lastSun 2:00s 0 -
Rule ATAQ 2008 max - Apr Sun>=1 2:00s 0 -
# Argentina - year-round bases
# Belgrano II, Confin Coast, -770227-0343737, since 1972-02-05
# Esperanza, San Martin Land, -6323-05659, since 1952-12-17
# Jubany, Potter Peninsula, King George Island, -6414-0602320, since 1982-01
# Marambio, Seymour I, -6414-05637, since 1969-10-29
# Orcadas, Laurie I, -6016-04444, since 1904-02-22
# San Martin, Debenham I, -6807-06708, since 1951-03-21
# (except 1960-03 / 1976-03-21)
# Australia - territories
# Heard Island, McDonald Islands (uninhabited)
# previously sealers and scientific personnel wintered
# <a href="http://web.archive.org/web/20021204222245/http://www.dstc.qut.edu.au/DST/marg/daylight.html">
# Margaret Turner reports
# </a> (1999-09-30) that they're UTC+5, with no DST;
# presumably this is when they have visitors.
#
# year-round bases
# Casey, Bailey Peninsula, -6617+11032, since 1969
# Davis, Vestfold Hills, -6835+07759, since 1957-01-13
# (except 1964-11 - 1969-02)
# Mawson, Holme Bay, -6736+06253, since 1954-02-13
# From Steffen Thorsen (2009-03-11):
# Three Australian stations in Antarctica have changed their time zone:
# Casey moved from UTC+8 to UTC+11
# Davis moved from UTC+7 to UTC+5
# Mawson moved from UTC+6 to UTC+5
# The changes occurred on 2009-10-18 at 02:00 (local times).
#
# Government source: (Australian Antarctic Division)
# <a href="http://www.aad.gov.au/default.asp?casid=37079">
# http://www.aad.gov.au/default.asp?casid=37079
# </a>
#
# We have more background information here:
# <a href="http://www.timeanddate.com/news/time/antarctica-new-times.html">
# http://www.timeanddate.com/news/time/antarctica-new-times.html
# </a>
# From Steffen Thorsen (2010-03-10):
# We got these changes from the Australian Antarctic Division:
# - Macquarie Island will stay on UTC+11 for winter and therefore not
# switch back from daylight savings time when other parts of Australia do
# on 4 April.
#
# - Casey station reverted to its normal time of UTC+8 on 5 March 2010.
# The change to UTC+11 is being considered as a regular summer thing but
# has not been decided yet.
#
# - Davis station will revert to its normal time of UTC+7 at 10 March 2010
# 20:00 UTC.
#
# - Mawson station stays on UTC+5.
#
# In addition to the Rule changes for Casey/Davis, it means that Macquarie
# will no longer be like Hobart and will have to have its own Zone created.
#
# Background:
# <a href="http://www.timeanddate.com/news/time/antartica-time-changes-2010.html">
# http://www.timeanddate.com/news/time/antartica-time-changes-2010.html
# </a>
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Antarctica/Casey 0 - zzz 1969
8:00 - WST 2009 Oct 18 2:00
# Western (Aus) Standard Time
11:00 - CAST 2010 Mar 5 2:00
# Casey Time
8:00 - WST 2011 Oct 28 2:00
11:00 - CAST 2012 Feb 21 17:00u
8:00 - WST
Zone Antarctica/Davis 0 - zzz 1957 Jan 13
7:00 - DAVT 1964 Nov # Davis Time
0 - zzz 1969 Feb
7:00 - DAVT 2009 Oct 18 2:00
5:00 - DAVT 2010 Mar 10 20:00u
7:00 - DAVT 2011 Oct 28 2:00
5:00 - DAVT 2012 Feb 21 20:00u
7:00 - DAVT
Zone Antarctica/Mawson 0 - zzz 1954 Feb 13
6:00 - MAWT 2009 Oct 18 2:00
# Mawson Time
5:00 - MAWT
Zone Antarctica/Macquarie 0 - zzz 1911
10:00 - EST 1916 Oct 1 2:00
10:00 1:00 EST 1917 Feb
10:00 AusAQ EST 1967
10:00 ATAQ EST 2010 Apr 4 3:00
11:00 - MIST # Macquarie Island Time
# References:
# <a href="http://www.antdiv.gov.au/aad/exop/sfo/casey/casey_aws.html">
# Casey Weather (1998-02-26)
# </a>
# <a href="http://www.antdiv.gov.au/aad/exop/sfo/davis/video.html">
# Davis Station, Antarctica (1998-02-26)
# </a>
# <a href="http://www.antdiv.gov.au/aad/exop/sfo/mawson/video.html">
# Mawson Station, Antarctica (1998-02-25)
# </a>
# Brazil - year-round base
# Comandante Ferraz, King George Island, -6205+05824, since 1983/4
# Chile - year-round bases and towns
# Escudero, South Shetland Is, -621157-0585735, since 1994
# Presidente Eduadro Frei, King George Island, -6214-05848, since 1969-03-07
# General Bernardo O'Higgins, Antarctic Peninsula, -6319-05704, since 1948-02
# Capitan Arturo Prat, -6230-05941
# Villa Las Estrellas (a town), around the Frei base, since 1984-04-09
# These locations have always used Santiago time; use TZ='America/Santiago'.
# China - year-round bases
# Great Wall, King George Island, -6213-05858, since 1985-02-20
# Zhongshan, Larsemann Hills, Prydz Bay, -6922+07623, since 1989-02-26
# France - year-round bases
#
# From Antoine Leca (1997-01-20):
# Time data are from Nicole Pailleau at the IFRTP
# (French Institute for Polar Research and Technology).
# She confirms that French Southern Territories and Terre Adelie bases
# don't observe daylight saving time, even if Terre Adelie supplies came
# from Tasmania.
#
# French Southern Territories with year-round inhabitants
#
# Martin-de-Vivies Base, Amsterdam Island, -374105+0773155, since 1950
# Alfred-Faure Base, Crozet Islands, -462551+0515152, since 1964
# Port-aux-Francais, Kerguelen Islands, -492110+0701303, since 1951;
# whaling & sealing station operated 1908/1914, 1920/1929, and 1951/1956
#
# St Paul Island - near Amsterdam, uninhabited
# fishing stations operated variously 1819/1931
#
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Indian/Kerguelen 0 - zzz 1950 # Port-aux-Francais
5:00 - TFT # ISO code TF Time
#
# year-round base in the main continent
# Dumont-d'Urville, Ile des Petrels, -6640+14001, since 1956-11
#
# Another base at Port-Martin, 50km east, began operation in 1947.
# It was destroyed by fire on 1952-01-14.
#
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Antarctica/DumontDUrville 0 - zzz 1947
10:00 - PMT 1952 Jan 14 # Port-Martin Time
0 - zzz 1956 Nov
10:00 - DDUT # Dumont-d'Urville Time
# Reference:
# <a href="http://en.wikipedia.org/wiki/Dumont_d'Urville_Station">
# Dumont d'Urville Station (2005-12-05)
# </a>
# Germany - year-round base
# Georg von Neumayer, -7039-00815
# India - year-round base
# Dakshin Gangotri, -7005+01200
# Japan - year-round bases
# Dome Fuji, -7719+03942
# Syowa, -690022+0393524
#
# From Hideyuki Suzuki (1999-02-06):
# In all Japanese stations, +0300 is used as the standard time.
#
# Syowa station, which is the first antarctic station of Japan,
# was established on 1957-01-29. Since Syowa station is still the main
# station of Japan, it's appropriate for the principal location.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Antarctica/Syowa 0 - zzz 1957 Jan 29
3:00 - SYOT # Syowa Time
# See:
# <a href="http://www.nipr.ac.jp/english/ara01.html">
# NIPR Antarctic Research Activities (1999-08-17)
# </a>
# S Korea - year-round base
# King Sejong, King George Island, -6213-05847, since 1988
# New Zealand - claims
# Balleny Islands (never inhabited)
# Scott Island (never inhabited)
#
# year-round base
# Scott, Ross Island, since 1957-01, is like Antarctica/McMurdo.
#
# These rules for New Zealand are stolen from the `australasia' file.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule NZAQ 1974 only - Nov 3 2:00s 1:00 D
Rule NZAQ 1975 1988 - Oct lastSun 2:00s 1:00 D
Rule NZAQ 1989 only - Oct 8 2:00s 1:00 D
Rule NZAQ 1990 2006 - Oct Sun>=1 2:00s 1:00 D
Rule NZAQ 1975 only - Feb 23 2:00s 0 S
Rule NZAQ 1976 1989 - Mar Sun>=1 2:00s 0 S
Rule NZAQ 1990 2007 - Mar Sun>=15 2:00s 0 S
Rule NZAQ 2007 max - Sep lastSun 2:00s 1:00 D
Rule NZAQ 2008 max - Apr Sun>=1 2:00s 0 S
# Norway - territories
# Bouvet (never inhabited)
#
# claims
# Peter I Island (never inhabited)
# Poland - year-round base
# Arctowski, King George Island, -620945-0582745, since 1977
# Russia - year-round bases
# Bellingshausen, King George Island, -621159-0585337, since 1968-02-22
# Mirny, Davis coast, -6633+09301, since 1956-02
# Molodezhnaya, Alasheyev Bay, -6740+04551,
# year-round from 1962-02 to 1999-07-01
# Novolazarevskaya, Queen Maud Land, -7046+01150,
# year-round from 1960/61 to 1992
# Vostok, since 1957-12-16, temporarily closed 1994-02/1994-11
# <a href="http://quest.arc.nasa.gov/antarctica/QA/computers/Directions,Time,ZIP">
# From Craig Mundell (1994-12-15)</a>:
# Vostok, which is one of the Russian stations, is set on the same
# time as Moscow, Russia.
#
# From Lee Hotz (2001-03-08):
# I queried the folks at Columbia who spent the summer at Vostok and this is
# what they had to say about time there:
# ``in the US Camp (East Camp) we have been on New Zealand (McMurdo)
# time, which is 12 hours ahead of GMT. The Russian Station Vostok was
# 6 hours behind that (although only 2 miles away, i.e. 6 hours ahead
# of GMT). This is a time zone I think two hours east of Moscow. The
# natural time zone is in between the two: 8 hours ahead of GMT.''
#
# From Paul Eggert (2001-05-04):
# This seems to be hopelessly confusing, so I asked Lee Hotz about it
# in person. He said that some Antartic locations set their local
# time so that noon is the warmest part of the day, and that this
# changes during the year and does not necessarily correspond to mean
# solar noon. So the Vostok time might have been whatever the clocks
# happened to be during their visit. So we still don't really know what time
# it is at Vostok. But we'll guess UTC+6.
#
Zone Antarctica/Vostok 0 - zzz 1957 Dec 16
6:00 - VOST # Vostok time
# S Africa - year-round bases
# Marion Island, -4653+03752
# Sanae, -7141-00250
# UK
#
# British Antarctic Territories (BAT) claims
# South Orkney Islands
# scientific station from 1903
# whaling station at Signy I 1920/1926
# South Shetland Islands
#
# year-round bases
# Bird Island, South Georgia, -5400-03803, since 1983
# Deception Island, -6259-06034, whaling station 1912/1931,
# scientific station 1943/1967,
# previously sealers and a scientific expedition wintered by accident,
# and a garrison was deployed briefly
# Halley, Coates Land, -7535-02604, since 1956-01-06
# Halley is on a moving ice shelf and is periodically relocated
# so that it is never more than 10km from its nominal location.
# Rothera, Adelaide Island, -6734-6808, since 1976-12-01
#
# From Paul Eggert (2002-10-22)
# <http://webexhibits.org/daylightsaving/g.html> says Rothera is -03 all year.
#
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Antarctica/Rothera 0 - zzz 1976 Dec 1
-3:00 - ROTT # Rothera time
# Uruguay - year round base
# Artigas, King George Island, -621104-0585107
# USA - year-round bases
#
# Palmer, Anvers Island, since 1965 (moved 2 miles in 1968)
#
# From Ethan Dicks (1996-10-06):
# It keeps the same time as Punta Arenas, Chile, because, just like us
# and the South Pole, that's the other end of their supply line....
# I verified with someone who was there that since 1980,
# Palmer has followed Chile. Prior to that, before the Falklands War,
# Palmer used to be supplied from Argentina.
#
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Antarctica/Palmer 0 - zzz 1965
-4:00 ArgAQ AR%sT 1969 Oct 5
-3:00 ArgAQ AR%sT 1982 May
-4:00 ChileAQ CL%sT
#
#
# McMurdo, Ross Island, since 1955-12
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Antarctica/McMurdo 0 - zzz 1956
12:00 NZAQ NZ%sT
#
# Amundsen-Scott, South Pole, continuously occupied since 1956-11-20
#
# From Paul Eggert (1996-09-03):
# Normally it wouldn't have a separate entry, since it's like the
# larger Antarctica/McMurdo since 1970, but it's too famous to omit.
#
# From Chris Carrier (1996-06-27):
# Siple, the first commander of the South Pole station,
# stated that he would have liked to have kept GMT at the station,
# but that he found it more convenient to keep GMT+12
# as supplies for the station were coming from McMurdo Sound,
# which was on GMT+12 because New Zealand was on GMT+12 all year
# at that time (1957). (Source: Siple's book 90 degrees SOUTH.)
#
# From Susan Smith
# http://www.cybertours.com/whs/pole10.html
# (1995-11-13 16:24:56 +1300, no longer available):
# We use the same time as McMurdo does.
# And they use the same time as Christchurch, NZ does....
# One last quirk about South Pole time.
# All the electric clocks are usually wrong.
# Something about the generators running at 60.1hertz or something
# makes all of the clocks run fast. So every couple of days,
# we have to go around and set them back 5 minutes or so.
# Maybe if we let them run fast all of the time, we'd get to leave here sooner!!
#
Link Antarctica/McMurdo Antarctica/South_Pole
|
274056675/springboot-openai-chatgpt | 9,662 | mng_web/src/views/dic/treedic.vue | <template>
<div class="box">
<div class="tree-dic-box">
<avue-crud
:option="tableDicOption"
:data="tableDicData"
:page="tableDicPage"
:table-loading="loading"
ref="crud"
v-model="tableDicForm"
:before-open="beforeOpen"
:before-close="beforeClose"
@tree-load="treeLoad"
@row-del="(row)=>rowDel(row)"
@row-update="rowUpdate"
@row-save="rowSave"
@selection-change="selectionChange"
@current-change="currentChange"
@size-change="sizeChange"
@refresh-change="refreshChange"
@on-load="onLoadParent"
>
<template slot-scope="scope" slot="menuLeft">
<el-button
size="small"
icon="el-icon-delete"
v-if="selectionList.length>0"
@click="rowDel(scope.row,'确定要删除所有选择的记录?')"
>批量删除</el-button>
</template>
<template slot-scope="scope" slot="menu">
<el-button
size="small"
icon="el-icon-s-tools"
type="text"
@click="addChildData(scope.row.id)"
>添加下级</el-button>
</template>
</avue-crud>
</div>
</div>
</template>
<script>
import { apiRequestHead } from '@/config/url.js'
import {
getTreeDicDataApi,
addTreeDicDataApi,
editTreeDicDataApi,
delTreeDicDataApi,
getTreeChildeDicDataApi,
} from '@/api/research/datadic'
import { getTreeAllDataApi, getTreeItemDataApi } from '@/api/research/codelist'
import { mapGetters } from 'vuex'
export default {
data() {
return {
timerInte: null,
maps: new Map(), //存储树表格用于数据回显
tableDicForm: {},
tableDicData: [],
tableDicPage: {
pageSize: 10,
pageSizes: [10, 30, 50, 100, 200],
currentPage: 1,
total: 0,
},
tableDicOption: {
selection: true, //开启选择框 并且勾选触发 @selection-change
reserveSelection: true, //保留之前的勾选
align: 'center', //文本对齐方式
menuAlign: 'center', //操作栏对齐方式
columnBtn: false,
lazy: true,
column: [
{
label: '父级节点',
prop: 'pid',
span: 24,
type: 'tree',
hide: true,
editDisabled: true,
dicUrl: `/api/${apiRequestHead}/sys/loadTreeData`,
dicQuery: {
pid: 0,
tableName: 'sys_category', //数据库表名
text: 'name', //展开列字段名
code: 'id', //主键名
pidField: 'pid', //父id名
hasChildField: 'has_child', //是否有子集key名
condition: '',
},
props: {
label: 'title',
value: 'key',
},
dicFormatter: (res) => {
return res.data
},
multiple: false,
filter: false,
lazy: true,
dicFlag: true,
addDisabled: false,
treeLoad: async (node, resolve) => {
if (node.data instanceof Array && node.level != 0) {
return false
}
let treeRes = await getTreeAllDataApi({
pid: node.data.key ? node.data.key : 0,
tableName: 'sys_category', //数据库表名
text: 'name', //展开列字段名
code: 'id', //主键名
pidField: 'pid', //父id名
hasChildField: 'has_child', //是否有子集key名
condition: '',
})
if (treeRes.data.success) {
resolve(treeRes.data.data)
} else {
resolve([])
}
},
},
{
label: '分类名称',
prop: 'name',
span: 24,
rules: [
{
required: true,
trigger: 'blur',
message: '值不能为空',
},
],
},
{
label: '分类编码',
prop: 'code',
span: 24,
rules: [
{
required: true,
trigger: 'blur',
message: '值不能为空',
},
],
},
],
},
selectionList: [],
query: {},
loading: false,
}
},
computed: {
...mapGetters(['userInfo', 'permission']),
ids() {
let ids = []
this.selectionList.forEach((ele) => {
ids.push(ele.id)
})
return ids.join(',')
},
},
mounted() {},
methods: {
async addChildData(id) {
// this.tableDicForm.pid = id
const column = this.findObject(this.tableDicOption.column, 'pid')
column.value = id
column.addDisabled = true
this.setFormPidText(id)
this.$refs.crud.rowAdd()
},
//设置树表格的表单pid文本
async setFormPidText(id) {
let itemRes = await getTreeItemDataApi({
tableName: 'sys_category',
tableLine: 'name',
rowKey: 'id',
key: id,
})
if (itemRes.data.success) {
this.timerInte = setInterval(() => {
let dom = document.querySelector('label[for=pid]')
if (dom) {
dom.parentNode.querySelector('input').value = itemRes.data.data[0]
? itemRes.data.data[0]
: ''
clearInterval(this.timerInte)
}
}, 300)
}
},
// 数据字典
rowSave(row, done, loading) {
addTreeDicDataApi(row)
.then((res) => {
if (res.data.success == true) {
this.$message({
message: '保存成功~',
type: 'success',
})
this.onLoadParent(this.tableDicPage, this.query)
this.treeTableDataEcho('add', row)
done()
} else {
throw new Error()
}
})
.catch(() => {
loading()
})
},
rowUpdate(row, index, done, loading) {
editTreeDicDataApi(row)
.then((res) => {
if (res.data.success == true) {
this.$message({
message: '修改成功~',
type: 'success',
})
this.onLoadParent(this.tableDicPage, this.query)
this.treeTableDataEcho('edit', row)
done()
} else {
throw new Error()
}
})
.catch(() => {
loading()
})
},
rowDel(row, text) {
let ids = ''
if (!text) {
ids = row.id
text = '确认要删除此记录?'
} else {
ids = this.ids
}
this.$confirm(text, {
confirmButtonText: '确定',
cancelButtonText: '取消',
type: 'warning',
})
.then(async () => {
let removeRes = await delTreeDicDataApi(ids)
if (removeRes.data.success) {
this.tableDicPage.currentPage = 1
this.onLoadParent(this.tableDicPage, this.query)
row.id = ids
this.treeTableDataEcho('del', row)
this.$message({
type: 'success',
message: '删除成功!',
})
this.$refs.crud.selectClear()
}
})
.catch(() => {})
},
selectionChange(list) {
this.selectionList = list
},
selectionClear() {
this.selectionList = []
this.$refs.crud.toggleSelection()
},
currentChange(currentPage) {
this.tableDicPage.currentPage = currentPage
},
sizeChange(pageSize) {
this.tableDicPage.pageSize = pageSize
},
refreshChange() {
this.onLoadParent(this.tableDicPage, this.query)
},
async onLoadParent(page, params = {}) {
this.loading = true
params.pid = 0
let res = await getTreeDicDataApi(page.currentPage, page.pageSize, params)
if (res.data.success) {
let data = res.data.data
this.tableDicData = data.records.map((item) => {
item.hasChildren = item.hasChild === '0' ? true : false
return item
})
this.tableDicPage.total = data.total
}
this.loading = false
},
async treeLoad(tree, treeNode, resolve) {
const pid = tree.id
this.maps.set(pid, { tree, treeNode, resolve })
let data = {
pid,
}
let tableDataRes = await getTreeChildeDicDataApi(data)
if (tableDataRes.data.success) {
let resData = tableDataRes.data.data
if (!resData) {
resData = []
}
resData = resData.map((item) => {
item.hasChildren = item.hasChild === '0' ? true : false
return item
})
resolve(resData)
} else {
resolve([])
}
},
//表格树 数据回显逻辑
async treeTableDataEcho(type, row) {
if (type == 'del') {
this.$set(
this.$refs.crud.$refs.table.store.states.lazyTreeNodeMap,
row.pid,
[]
)
}
this.maps.forEach((item, key) => {
const { tree, treeNode, resolve } = this.maps.get(key)
this.treeLoad(tree, treeNode, resolve)
})
},
async beforeOpen(done, type) {
if (['edit'].includes(type)) {
this.setFormPidText(this.tableDicForm.pid)
}
done()
},
beforeClose(done) {
if (this.timerInte) {
clearInterval(this.timerInte)
}
const column = this.findObject(this.tableDicOption.column, 'pid')
column.value = ''
column.addDisabled = false
done()
},
},
}
</script>
<style lang="scss" scoped>
.tree-dic-box {
padding: 16px;
background-color: #fff;
}
</style>
|
27182812/ChatGLM-LLaMA-chinese-insturct | 32,531 | src/transformers/onnx/config.py | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import dataclasses
import warnings
from abc import ABC, abstractmethod
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Callable, Dict, Iterable, List, Mapping, Optional, Tuple, Union
import numpy as np
from packaging import version
from ..utils import TensorType, is_torch_available, is_vision_available, logging
from .utils import ParameterFormat, compute_effective_axis_dimension, compute_serialized_parameters_size
if TYPE_CHECKING:
from ..configuration_utils import PretrainedConfig
from ..feature_extraction_utils import FeatureExtractionMixin
from ..image_processing_utils import ImageProcessingMixin
from ..tokenization_utils_base import PreTrainedTokenizerBase
if is_vision_available():
from PIL import Image
logger = logging.get_logger(__name__)
DEFAULT_ONNX_OPSET = 11
# 2 Gb
EXTERNAL_DATA_FORMAT_SIZE_LIMIT = 2 * 1024 * 1024 * 1024
@dataclasses.dataclass
class PatchingSpec:
"""
Data class that holds patching specifications.
Args:
o: Module / object where the op to patch is located
name: Name of the op to monkey patch
custom_op: Custom op that patches the original op
orig_op: Original op that is being patched
op_wrapper: Wrapper (optional) that wraps both the original and custom ops.
It is useful for ops that are class or static methods for instance.
"""
o: Any
name: str
custom_op: Callable
orig_op: Optional[Callable] = None
op_wrapper: Optional[Callable] = None
class OnnxConfig(ABC):
"""
Base class for ONNX exportable model describing metadata on how to export the model through the ONNX format.
"""
default_fixed_batch = 2
default_fixed_sequence = 8
default_fixed_num_choices = 4
torch_onnx_minimum_version = version.parse("1.8")
_tasks_to_common_outputs = {
"causal-lm": OrderedDict({"logits": {0: "batch", 1: "sequence"}}),
"default": OrderedDict({"last_hidden_state": {0: "batch", 1: "sequence"}}),
"image-classification": OrderedDict({"logits": {0: "batch", 1: "sequence"}}),
"image-segmentation": OrderedDict(
{
"logits": {0: "batch", 1: "sequence"},
"pred_boxes": {0: "batch", 1: "sequence"},
"pred_masks": {0: "batch", 1: "sequence"},
}
),
"masked-im": OrderedDict({"logits": {0: "batch", 1: "sequence"}}),
"masked-lm": OrderedDict({"logits": {0: "batch", 1: "sequence"}}),
"multiple-choice": OrderedDict({"logits": {0: "batch"}}),
"object-detection": OrderedDict(
{
"logits": {0: "batch", 1: "sequence"},
"pred_boxes": {0: "batch", 1: "sequence"},
}
),
"question-answering": OrderedDict(
{
"start_logits": {0: "batch", 1: "sequence"},
"end_logits": {0: "batch", 1: "sequence"},
}
),
"semantic-segmentation": OrderedDict({"logits": {0: "batch", 1: "num_labels", 2: "height", 3: "width"}}),
"seq2seq-lm": OrderedDict({"logits": {0: "batch", 1: "decoder_sequence"}}),
"sequence-classification": OrderedDict({"logits": {0: "batch"}}),
"token-classification": OrderedDict({"logits": {0: "batch", 1: "sequence"}}),
"vision2seq-lm": OrderedDict({"logits": {0: "batch", 1: "sequence"}}),
"speech2seq-lm": OrderedDict({"logits": {0: "batch", 1: "sequence"}}),
}
def __init__(self, config: "PretrainedConfig", task: str = "default", patching_specs: List[PatchingSpec] = None):
self._config = config
if task not in self._tasks_to_common_outputs:
raise ValueError(
f"{task} is not a supported task, supported tasks: {self._tasks_to_common_outputs.keys()}"
)
self.task = task
self._patching_specs = []
for spec in patching_specs if patching_specs is not None else []:
final_spec = spec
if spec.orig_op is None:
final_spec = dataclasses.replace(spec, orig_op=getattr(spec.o, spec.name))
self._patching_specs.append(final_spec)
@classmethod
def from_model_config(cls, config: "PretrainedConfig", task: str = "default") -> "OnnxConfig":
"""
Instantiate a OnnxConfig for a specific model
Args:
config: The model's configuration to use when exporting to ONNX
Returns:
OnnxConfig for this model
"""
return cls(config, task=task)
@property
@abstractmethod
def inputs(self) -> Mapping[str, Mapping[int, str]]:
"""
Mapping containing the axis definition of the input tensors to provide to the model
Returns:
For each input: its name associated to the axes symbolic name and the axis position within the tensor
"""
raise NotImplementedError()
@property
def outputs(self) -> Mapping[str, Mapping[int, str]]:
"""
Mapping containing the axis definition of the output tensors to provide to the model
Returns:
For each output: its name associated to the axes symbolic name and the axis position within the tensor
"""
common_outputs = self._tasks_to_common_outputs[self.task]
return copy.deepcopy(common_outputs)
@property
def values_override(self) -> Optional[Mapping[str, Any]]:
"""
Dictionary of keys to override in the model's config before exporting
Returns:
Dictionary with the keys (and their corresponding values) to override
"""
if hasattr(self._config, "use_cache"):
return {"use_cache": False}
return None
@property
def default_batch_size(self) -> int:
"""
The default batch size to use if no other indication
Returns:
Integer > 0
"""
# Using 2 avoid ONNX making assumption about single sample batch
return OnnxConfig.default_fixed_batch
@property
def default_sequence_length(self) -> int:
"""
The default sequence length to use if no other indication
Returns:
Integer > 0
"""
return OnnxConfig.default_fixed_sequence
@property
def default_num_choices(self) -> int:
"""
The default number of choices to use if no other indication
Returns:
Integer > 0
"""
return OnnxConfig.default_fixed_num_choices
@property
def default_onnx_opset(self) -> int:
"""
Which onnx opset to use when exporting the model
Returns:
Integer ONNX Opset version
"""
return DEFAULT_ONNX_OPSET
@property
def atol_for_validation(self) -> float:
"""
What absolute tolerance value to use during model conversion validation.
Returns:
Float absolute tolerance value.
"""
return 1e-5
@property
def is_torch_support_available(self) -> bool:
"""
The minimum PyTorch version required to export the model.
Returns:
`bool`: Whether the installed version of PyTorch is compatible with the model.
"""
if is_torch_available():
from transformers.utils import torch_version
return torch_version >= self.torch_onnx_minimum_version
else:
return False
@staticmethod
def use_external_data_format(num_parameters: int) -> bool:
"""
Flag indicating if the model requires using external data format
Args:
num_parameters: Number of parameter on the model
Returns:
True if model.num_parameters() * size_of(float32) >= 2Gb False otherwise
"""
return (
compute_serialized_parameters_size(num_parameters, ParameterFormat.Float)
>= EXTERNAL_DATA_FORMAT_SIZE_LIMIT
)
def _generate_dummy_images(
self, batch_size: int = 2, num_channels: int = 3, image_height: int = 40, image_width: int = 40
):
images = []
for _ in range(batch_size):
data = np.random.rand(image_height, image_width, num_channels) * 255
images.append(Image.fromarray(data.astype("uint8")).convert("RGB"))
return images
def _generate_dummy_audio(
self, batch_size: int = 2, sampling_rate: int = 22050, time_duration: float = 5.0, frequency: int = 220
):
audio_data = []
for _ in range(batch_size):
# time variable
t = np.linspace(0, time_duration, int(time_duration * sampling_rate), endpoint=False)
# generate pure sine wave at `frequency` Hz
audio_data.append(0.5 * np.sin(2 * np.pi * frequency * t))
return audio_data
def generate_dummy_inputs(
self,
preprocessor: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin", "ImageProcessingMixin"],
batch_size: int = -1,
seq_length: int = -1,
num_choices: int = -1,
is_pair: bool = False,
framework: Optional[TensorType] = None,
num_channels: int = 3,
image_width: int = 40,
image_height: int = 40,
sampling_rate: int = 22050,
time_duration: float = 5.0,
frequency: int = 220,
tokenizer: "PreTrainedTokenizerBase" = None,
) -> Mapping[str, Any]:
"""
Generate inputs to provide to the ONNX exporter for the specific framework
Args:
preprocessor: ([`PreTrainedTokenizerBase`], [`FeatureExtractionMixin`], or [`ImageProcessingMixin`]):
The preprocessor associated with this model configuration.
batch_size (`int`, *optional*, defaults to -1):
The batch size to export the model for (-1 means dynamic axis).
num_choices (`int`, *optional*, defaults to -1):
The number of candidate answers provided for multiple choice task (-1 means dynamic axis).
seq_length (`int`, *optional*, defaults to -1):
The sequence length to export the model for (-1 means dynamic axis).
is_pair (`bool`, *optional*, defaults to `False`):
Indicate if the input is a pair (sentence 1, sentence 2)
framework (`TensorType`, *optional*, defaults to `None`):
The framework (PyTorch or TensorFlow) that the tokenizer will generate tensors for.
num_channels (`int`, *optional*, defaults to 3):
The number of channels of the generated images.
image_width (`int`, *optional*, defaults to 40):
The width of the generated images.
image_height (`int`, *optional*, defaults to 40):
The height of the generated images.
sampling_rate (`int`, *optional* defaults to 22050)
The sampling rate for audio data generation.
time_duration (`float`, *optional* defaults to 5.0)
Total seconds of sampling for audio data generation.
frequency (`int`, *optional* defaults to 220)
The desired natural frequency of generated audio.
Returns:
Mapping[str, Tensor] holding the kwargs to provide to the model's forward function
"""
from ..feature_extraction_utils import FeatureExtractionMixin
from ..image_processing_utils import ImageProcessingMixin
from ..tokenization_utils_base import PreTrainedTokenizerBase
if isinstance(preprocessor, PreTrainedTokenizerBase) and tokenizer is not None:
raise ValueError("You cannot provide both a tokenizer and a preprocessor to generate dummy inputs.")
if tokenizer is not None:
warnings.warn(
"The `tokenizer` argument is deprecated and will be removed in version 5 of Transformers. Use"
" `preprocessor` instead.",
FutureWarning,
)
logger.warning("Overwriting the `preprocessor` argument with `tokenizer` to generate dummmy inputs.")
preprocessor = tokenizer
if isinstance(preprocessor, PreTrainedTokenizerBase):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
batch_size = compute_effective_axis_dimension(
batch_size, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0
)
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
token_to_add = preprocessor.num_special_tokens_to_add(is_pair)
seq_length = compute_effective_axis_dimension(
seq_length, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=token_to_add
)
# Generate dummy inputs according to compute batch and sequence
input_token = (
preprocessor.unk_token
if (preprocessor.unk_token is not None and len(preprocessor.unk_token) > 0)
else "0"
)
dummy_input = [" ".join([input_token]) * seq_length] * batch_size
if self.task == "multiple-choice":
# If dynamic axis (-1) we forward with a fixed dimension of 4 candidate answers to avoid optimizations
# made by ONNX
num_choices = compute_effective_axis_dimension(
num_choices, fixed_dimension=OnnxConfig.default_fixed_num_choices, num_token_to_add=0
)
dummy_input = dummy_input * num_choices
# The shape of the tokenized inputs values is [batch_size * num_choices, seq_length]
tokenized_input = preprocessor(dummy_input, text_pair=dummy_input)
# Unflatten the tokenized inputs values expanding it to the shape [batch_size, num_choices, seq_length]
for k, v in tokenized_input.items():
tokenized_input[k] = [v[i : i + num_choices] for i in range(0, len(v), num_choices)]
return dict(tokenized_input.convert_to_tensors(tensor_type=framework))
return dict(preprocessor(dummy_input, return_tensors=framework))
elif isinstance(preprocessor, ImageProcessingMixin):
if preprocessor.model_input_names[0] != "pixel_values":
raise ValueError(
f"The `preprocessor` is an image processor ({preprocessor.__class__.__name__}) and expects"
f' `model_input_names[0]` to be "pixel_values", but got {preprocessor.model_input_names[0]}'
)
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
dummy_input = self._generate_dummy_images(batch_size, num_channels, image_height, image_width)
return dict(preprocessor(images=dummy_input, return_tensors=framework))
elif isinstance(preprocessor, FeatureExtractionMixin) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
dummy_input = self._generate_dummy_images(batch_size, num_channels, image_height, image_width)
return dict(preprocessor(images=dummy_input, return_tensors=framework))
elif (
isinstance(preprocessor, FeatureExtractionMixin) and preprocessor.model_input_names[0] == "input_features"
):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
dummy_input = self._generate_dummy_audio(batch_size, sampling_rate, time_duration, frequency)
return dict(preprocessor(dummy_input, return_tensors=framework))
else:
raise ValueError(
"Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor."
)
def generate_dummy_inputs_onnxruntime(self, reference_model_inputs: Mapping[str, Any]) -> Mapping[str, Any]:
"""
Generate inputs for ONNX Runtime using the reference model inputs. Override this to run inference with seq2seq
models which have the encoder and decoder exported as separate ONNX files.
Args:
reference_model_inputs ([`Mapping[str, Tensor]`):
Reference inputs for the model.
Returns:
`Mapping[str, Tensor]`: The mapping holding the kwargs to provide to the model's forward function
"""
return reference_model_inputs
def patch_ops(self):
for spec in self._patching_specs:
custom_op = spec.custom_op if spec.op_wrapper is None else spec.op_wrapper(spec.custom_op)
setattr(spec.o, spec.name, custom_op)
def restore_ops(self):
for spec in self._patching_specs:
orig_op = spec.orig_op if spec.op_wrapper is None else spec.op_wrapper(spec.orig_op)
setattr(spec.o, spec.name, orig_op)
@classmethod
def flatten_output_collection_property(cls, name: str, field: Iterable[Any]) -> Dict[str, Any]:
"""
Flatten any potential nested structure expanding the name of the field with the index of the element within the
structure.
Args:
name: The name of the nested structure
field: The structure to, potentially, be flattened
Returns:
(Dict[str, Any]): Outputs with flattened structure and key mapping this new structure.
"""
from itertools import chain
return {f"{name}.{idx}": item for idx, item in enumerate(chain.from_iterable(field))}
class OnnxConfigWithPast(OnnxConfig, ABC):
def __init__(
self,
config: "PretrainedConfig",
task: str = "default",
patching_specs: List[PatchingSpec] = None,
use_past: bool = False,
):
super().__init__(config, task=task, patching_specs=patching_specs)
self.use_past = use_past
@classmethod
def with_past(cls, config: "PretrainedConfig", task: str = "default") -> "OnnxConfigWithPast":
"""
Instantiate a OnnxConfig with `use_past` attribute set to True
Args:
config: The underlying model's config to use when exporting to ONNX
Returns:
OnnxConfig with `.use_past = True`
"""
return cls(config, task=task, use_past=True)
@property
def outputs(self) -> Mapping[str, Mapping[int, str]]:
common_outputs = super().outputs
if self.use_past:
self.fill_with_past_key_values_(common_outputs, direction="outputs")
return common_outputs
@property
def values_override(self) -> Optional[Mapping[str, Any]]:
if hasattr(self._config, "use_cache"):
return {"use_cache": self.use_past}
return None
@property
def num_layers(self) -> int:
"""
The number of layers attribute retrieved from the model config. Override this for model configs where the
number of layers attribute is not called `num_layers`.
"""
if not hasattr(self._config, "num_layers"):
raise AttributeError(
"could not find the number of layers attribute in the model configuration, override the num_layers"
" property of the model OnnxConfig to solve this"
)
return self._config.num_layers
@property
def num_attention_heads(self) -> int:
"""
The number of attention heads attribute retrieved from the model config. Override this for model configs where
the number of attention heads attribute is not called `num_attention_heads`.
"""
if not hasattr(self._config, "num_attention_heads"):
raise AttributeError(
"could not find the number of attention heads attribute in the model configuration, override the"
" num_attention_heads property of the model OnnxConfig to solve this"
)
return self._config.num_attention_heads
def generate_dummy_inputs(
self,
tokenizer: "PreTrainedTokenizerBase",
batch_size: int = -1,
seq_length: int = -1,
is_pair: bool = False,
framework: Optional[TensorType] = None,
) -> Mapping[str, Any]:
# TODO: should we set seq_length = 1 when self.use_past = True?
common_inputs = super().generate_dummy_inputs(
tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework
)
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
else:
import torch
batch, seqlen = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
past_key_values_length = seqlen + 2
shape = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
if "attention_mask" in common_inputs:
mask_dtype = common_inputs["attention_mask"].dtype
common_inputs["attention_mask"] = torch.cat(
[common_inputs["attention_mask"], torch.ones(batch, past_key_values_length, dtype=mask_dtype)],
dim=1,
)
common_inputs["past_key_values"] = []
for _ in range(self.num_layers):
common_inputs["past_key_values"].append((torch.zeros(shape), torch.zeros(shape)))
return common_inputs
def fill_with_past_key_values_(
self, inputs_or_outputs: Mapping[str, Mapping[int, str]], direction: str, inverted_values_shape: bool = False
):
"""
Fill the input_or_outputs mapping with past_key_values dynamic axes considering.
Args:
inputs_or_outputs: The mapping to fill.
direction: either "inputs" or "outputs", it specifies whether input_or_outputs is the input mapping or the
output mapping, this is important for axes naming.
inverted_values_shape:
If `True`, store values on dynamic axis 1, else on axis 2.
"""
if direction not in ["inputs", "outputs"]:
raise ValueError(f'direction must either be "inputs" or "outputs", but {direction} was given')
name = "past_key_values" if direction == "inputs" else "present"
for i in range(self.num_layers):
inputs_or_outputs[f"{name}.{i}.key"] = {0: "batch", 2: "past_sequence + sequence"}
if inverted_values_shape:
inputs_or_outputs[f"{name}.{i}.value"] = {0: "batch", 1: "past_sequence + sequence"}
else:
inputs_or_outputs[f"{name}.{i}.value"] = {0: "batch", 2: "past_sequence + sequence"}
def _flatten_past_key_values_(self, flattened_output, name, idx, t):
flattened_output[f"{name}.{idx}.key"] = t[0]
flattened_output[f"{name}.{idx}.value"] = t[1]
def flatten_output_collection_property(self, name: str, field: Iterable[Any]) -> Dict[str, Any]:
flattened_output = {}
if name in ["present", "past_key_values"]:
for idx, t in enumerate(field):
self._flatten_past_key_values_(flattened_output, name, idx, t)
else:
flattened_output = super().flatten_output_collection_property(name, field)
return flattened_output
class OnnxSeq2SeqConfigWithPast(OnnxConfigWithPast):
@property
def outputs(self) -> Mapping[str, Mapping[int, str]]:
common_outputs = super(OnnxConfigWithPast, self).outputs
# Renaming the outputs axes properly.
for name, axes_names in common_outputs.items():
sequence_name = "encoder_sequence" if "encoder" in name else "decoder_sequence"
for axis_idx, name in axes_names.items():
if "sequence" in name:
axes_names[axis_idx] = sequence_name
# We reset the value as the order in common_outputs (OrderedDict) is lost otherwise
else:
axes_names[axis_idx] = name
if self.use_past:
self.fill_with_past_key_values_(common_outputs, direction="outputs")
return common_outputs
@property
def num_layers(self) -> Tuple[int]:
try:
num_layers = super().num_layers
num_layers = (num_layers, num_layers)
except AttributeError:
if hasattr(self._config, "encoder_layers") and hasattr(self._config, "decoder_layers"):
num_layers = (self._config.encoder_layers, self._config.decoder_layers)
else:
raise AttributeError(
"could not find the number of encoder and decoder layers attributes in the model configuration,"
" override the num_layers property of the model OnnxConfig to solve this"
)
return num_layers
@property
def num_attention_heads(self) -> Tuple[int]:
try:
num_attention_heads = super().num_attention_heads
num_attention_heads = (num_attention_heads, num_attention_heads)
except AttributeError:
if hasattr(self._config, "encoder_attention_heads") and hasattr(self._config, "decoder_attention_heads"):
num_attention_heads = (self._config.encoder_attention_heads, self._config.decoder_attention_heads)
else:
raise AttributeError(
"could not find the number of attention heads for the encoder and the decoder attributes in the"
" model configuration, override the num_attention_heads property of the model OnnxConfig to solve"
" this"
)
return num_attention_heads
def generate_dummy_inputs(
self,
tokenizer: "PreTrainedTokenizerBase",
batch_size: int = -1,
seq_length: int = -1,
is_pair: bool = False,
framework: Optional[TensorType] = None,
) -> Mapping[str, Any]:
encoder_inputs = super(OnnxConfigWithPast, self).generate_dummy_inputs(
tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework
)
# Generate decoder inputs
decoder_seq_length = seq_length if not self.use_past else 1
decoder_inputs = super(OnnxConfigWithPast, self).generate_dummy_inputs(
tokenizer, batch_size=batch_size, seq_length=decoder_seq_length, is_pair=is_pair, framework=framework
)
decoder_inputs = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
common_inputs = dict(**encoder_inputs, **decoder_inputs)
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
else:
import torch
batch = common_inputs["input_ids"].shape[0]
encoder_seq_length = common_inputs["input_ids"].shape[1]
decoder_seq_length = common_inputs["decoder_input_ids"].shape[1]
num_encoder_attention_heads, num_decoder_attention_heads = self.num_attention_heads
encoder_shape = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
decoder_shape = (
batch,
num_decoder_attention_heads,
# Not using the same length for past_key_values
decoder_seq_length + 3,
self._config.hidden_size // num_decoder_attention_heads,
)
common_inputs["past_key_values"] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
num_encoder_layers, num_decoder_layers = self.num_layers
min_num_layers = min(num_encoder_layers, num_decoder_layers)
max_num_layers = max(num_encoder_layers, num_decoder_layers) - min_num_layers
remaining_side_name = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(min_num_layers):
# For encoder-decoder models, past_key_values contains pre-computed values for both the encoder and the
# decoder layers, hence a tuple of 4 tensors instead of 2
common_inputs["past_key_values"].append(
(
torch.zeros(decoder_shape),
torch.zeros(decoder_shape),
torch.zeros(encoder_shape),
torch.zeros(encoder_shape),
)
)
# TODO: test this.
shape = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(min_num_layers, max_num_layers):
common_inputs["past_key_values"].append((torch.zeros(shape), torch.zeros(shape)))
return common_inputs
def fill_with_past_key_values_(self, inputs_or_outputs: Mapping[str, Mapping[int, str]], direction: str):
if direction not in ["inputs", "outputs"]:
raise ValueError(f'direction must either be "inputs" or "outputs", but {direction} was given')
name = "past_key_values" if direction == "inputs" else "present"
# If the number of encoder and decoder layers are present in the model configuration, both are considered
num_encoder_layers, num_decoder_layers = self.num_layers
min_num_layers = min(num_encoder_layers, num_decoder_layers)
max_num_layers = max(num_encoder_layers, num_decoder_layers) - min_num_layers
remaining_side_name = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
encoder_sequence = "past_encoder_sequence"
decoder_sequence = "past_decoder_sequence" if direction == "inputs" else "past_decoder_sequence + sequence"
for i in range(min_num_layers):
inputs_or_outputs[f"{name}.{i}.decoder.key"] = {0: "batch", 2: decoder_sequence}
inputs_or_outputs[f"{name}.{i}.decoder.value"] = {0: "batch", 2: decoder_sequence}
inputs_or_outputs[f"{name}.{i}.encoder.key"] = {0: "batch", 2: encoder_sequence}
inputs_or_outputs[f"{name}.{i}.encoder.value"] = {0: "batch", 2: encoder_sequence}
for i in range(min_num_layers, max_num_layers):
if remaining_side_name == "encoder":
axes_info = {0: "batch", 2: encoder_sequence}
else:
axes_info = {0: "batch", 2: decoder_sequence}
inputs_or_outputs[f"{name}.{i}.{remaining_side_name}.key"] = axes_info
def _flatten_past_key_values_(self, flattened_output, name, idx, t):
flattened_output[f"{name}.{idx}.decoder.key"] = t[0]
flattened_output[f"{name}.{idx}.decoder.value"] = t[1]
flattened_output[f"{name}.{idx}.encoder.key"] = t[2]
flattened_output[f"{name}.{idx}.encoder.value"] = t[3]
|
27182812/ChatGLM-LLaMA-chinese-insturct | 5,389 | src/transformers/sagemaker/training_args_sm.py | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
logger = logging.get_logger(__name__)
# TODO: should be moved to `utils` after refactoring of SageMakerTrainer
def is_sagemaker_model_parallel_available():
# Get the sagemaker specific mp parameters from smp_options variable.
smp_options = os.getenv("SM_HP_MP_PARAMETERS", "{}")
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
smp_options = json.loads(smp_options)
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
mpi_options = os.getenv("SM_FRAMEWORK_PARAMS", "{}")
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
mpi_options = json.loads(mpi_options)
if not mpi_options.get("sagemaker_mpi_enabled", False):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("smdistributed") is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class SageMakerTrainingArguments(TrainingArguments):
mp_parameters: str = field(
default="",
metadata={"help": "Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"},
)
def __post_init__(self):
super().__post_init__()
warnings.warn(
"`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use "
"`TrainingArguments` instead.",
FutureWarning,
)
@cached_property
def _setup_devices(self) -> "torch.device":
logger.info("PyTorch: setting up devices")
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"torch.distributed process group is initialized, but local_rank == -1. "
"In order to use Torch DDP, launch your script with `python -m torch.distributed.launch"
)
if self.no_cuda:
device = torch.device("cpu")
self._n_gpu = 0
elif is_sagemaker_model_parallel_available():
local_rank = smp.local_rank()
device = torch.device("cuda", local_rank)
self._n_gpu = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="smddp", timeout=self.ddp_timeout_delta)
self.local_rank = int(os.getenv("SMDATAPARALLEL_LOCAL_RANK"))
device = torch.device("cuda", self.local_rank)
self._n_gpu = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
self._n_gpu = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="nccl", timeout=self.ddp_timeout_delta)
device = torch.device("cuda", self.local_rank)
self._n_gpu = 1
if device.type == "cuda":
torch.cuda.set_device(device)
return device
@property
def world_size(self):
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def place_model_on_device(self):
return not is_sagemaker_model_parallel_available()
@property
def _no_sync_in_gradient_accumulation(self):
return False
|
27182812/ChatGLM-LLaMA-chinese-insturct | 1,044 | src/transformers/sagemaker/trainer_sm.py | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from ..trainer import Trainer
from ..utils import logging
logger = logging.get_logger(__name__)
class SageMakerTrainer(Trainer):
def __init__(self, args=None, **kwargs):
warnings.warn(
"`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` "
"instead.",
FutureWarning,
)
super().__init__(args=args, **kwargs)
|
27182812/ChatGLM-LLaMA-chinese-insturct | 3,320 | src/transformers/models/__init__.py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_2,
bloom,
bort,
bridgetower,
byt5,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
cpm,
ctrl,
cvt,
data2vec,
deberta,
deberta_v2,
decision_transformer,
deformable_detr,
deit,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encoder_decoder,
ernie,
ernie_m,
esm,
flaubert,
flava,
fnet,
fsmt,
funnel,
git,
glpn,
gpt2,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_sw3,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
jukebox,
layoutlm,
layoutlmv2,
layoutlmv3,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longt5,
luke,
lxmert,
m2m_100,
marian,
markuplm,
mask2former,
maskformer,
mbart,
mbart50,
mctct,
megatron_bert,
megatron_gpt2,
mluke,
mmbt,
mobilebert,
mobilenet_v1,
mobilenet_v2,
mobilevit,
mpnet,
mt5,
mvp,
nat,
nezha,
nllb,
nystromformer,
oneformer,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
retribert,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_2,
speecht5,
splinter,
squeezebert,
swin,
swin2sr,
swinv2,
switch_transformers,
t5,
table_transformer,
tapas,
tapex,
time_series_transformer,
timesformer,
trajectory_transformer,
transfo_xl,
trocr,
tvlt,
unispeech,
unispeech_sat,
upernet,
van,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
wav2vec2,
wav2vec2_conformer,
wav2vec2_phoneme,
wav2vec2_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
|
274056675/springboot-openai-chatgpt | 13,601 | mng_web/src/views/dic/datadic.vue | <template>
<div class="box">
<div class="data-dic-box">
<avue-crud
:option="tableDicOption"
:data="tableDicData"
:page="tableDicPage"
:table-loading="loading"
ref="crud"
v-model="tableDicForm"
@row-del="(row)=>rowDel(row)"
@row-update="rowUpdate"
@row-save="rowSave"
@search-change="searchChange"
@search-reset="searchReset"
@selection-change="selectionChange"
@current-change="currentChange"
@size-change="sizeChange"
@refresh-change="refreshChange"
@on-load="onLoadParent"
>
<template slot-scope="scope" slot="menuLeft">
<el-button
size="small"
icon="el-icon-delete"
v-if="selectionList.length>0"
@click="rowDel(scope.row,'确定要删除所有选择的记录?')"
>批量删除</el-button>
</template>
<template slot-scope="scope" slot="menu">
<el-button
size="small"
icon="el-icon-s-tools"
type="text"
@click="openDicListFun(scope.row.id)"
>字典配置</el-button>
</template>
</avue-crud>
</div>
<el-drawer
title="字典列表"
:visible.sync="isOpenDrawer"
:append-to-body="true"
:modal-append-to-body="true"
:size="800"
>
<div class="data-dic-list-box">
<avue-crud
:option="tableDicListOption"
:data="tableDicListData"
:page="tableDicListPage"
:table-loading="loadingChild"
ref="crudChild"
v-model="tableDicListForm"
@row-del="(row)=>rowDelChild(row)"
@row-update="rowUpdateChild"
@row-save="rowSaveChild"
@search-change="searchChangeChild"
@search-reset="searchResetChild"
@selection-change="selectionChangeChild"
@current-change="currentChangeChild"
@size-change="sizeChangeChild"
@refresh-change="refreshChangeChild"
@on-load="onLoadChild"
>
<!-- <template slot-scope="scope" slot="statusForm">
<el-switch
v-model="scope.row.status"
:value="scope.value"
:disabled="scope.disabled"
active-value="1"
inactive-value="0"
></el-switch>
</template>-->
<template slot-scope="scope" slot="statusSearch">
<el-select v-model="scope.row.status" placeholder="请选择" :size="scope.size">
<el-option label="启用" :value="1"></el-option>
<el-option label="禁用" :value="0"></el-option>
</el-select>
</template>
<template slot-scope="scope" slot="menuLeft">
<el-button
size="small"
icon="el-icon-delete"
v-if="selectionChildList.length>0"
@click="rowDelChild(scope.row,'确定要删除所有选择的记录?')"
>批量删除</el-button>
</template>
</avue-crud>
</div>
</el-drawer>
</div>
</template>
<script>
import {
getDicDataApi,
addDicDataApi,
editDicDataApi,
delDicDataApi,
getDicListDataApi,
addDicListDataApi,
editDicListDataApi,
delDicListDataApi,
} from '@/api/research/datadic'
import { mapGetters } from 'vuex'
export default {
data() {
return {
// 数据字典
tableDicForm: {},
tableDicData: [],
tableDicOption: {
border: true, //开启边框
index: true, //开启序号
selection: true, //开启选择框 并且勾选触发 @selection-change
reserveSelection: true, //保留之前的勾选
align: 'center', //文本对齐方式
menuAlign: 'center', //操作栏对齐方式
columnBtn: false,
searchMenuSpan: 8,
column: [
{
label: '字典名称',
prop: 'dictName',
span: 24,
rules: [
{
required: true,
trigger: 'blur',
message: '值不能为空',
},
],
search: true,
},
{
label: '字典编号',
prop: 'dictCode',
span: 24,
rules: [
{
required: true,
trigger: 'blur',
message: '值不能为空',
},
],
search: true,
},
{
label: '描述',
prop: 'description',
span: 24,
overHidden:true,
},
],
},
tableDicPage: {
pageSize: 10,
pageSizes: [10, 30, 50, 100, 200],
currentPage: 1,
total: 0,
},
selectionList: [],
query: {},
loading: false,
// 字典配置
isOpenDrawer: false,
currentDicId: '',
tableDicListForm: {},
tableDicListData: [],
tableDicListOption: {
border: true, //开启边框
index: true, //开启序号
selection: true, //开启选择框 并且勾选触发 @selection-change
reserveSelection: true, //保留之前的勾选
align: 'center', //文本对齐方式
menuAlign: 'center', //操作栏对齐方式
columnBtn: false,
searchMenuSpan: 8,
column: [
{
label: '名称',
prop: 'itemText',
span: 24,
rules: [
{
required: true,
trigger: 'blur',
message: '值不能为空',
},
],
search: true,
searchSpan: 8,
},
{
label: '数据值',
prop: 'itemValue',
span: 24,
rules: [
{
required: true,
trigger: 'blur',
message: '值不能为空',
},
],
},
{
label: '描述',
prop: 'description',
span: 24,
hide: true,
},
{
label: '排序值',
prop: 'sortOrder',
span: 24,
hide: true,
type: 'number',
minRows: 1,
value: 1,
style: {
width: '160px !important',
},
},
{
label: '是否启用',
prop: 'status',
span: 24,
hide: true,
value: true,
search: true,
searchSpan: 8,
type: 'switch',
},
],
},
tableDicListPage: {
pageSize: 10,
pageSizes: [10, 30, 50, 100, 200],
currentPage: 1,
total: 0,
},
selectionChildList: [],
childQuery: {},
loadingChild: false,
}
},
computed: {
...mapGetters(['userInfo', 'permission']),
ids() {
let ids = []
this.selectionList.forEach((ele) => {
ids.push(ele.id)
})
return ids.join(',')
},
idsChild() {
let ids = []
this.selectionChildList.forEach((ele) => {
ids.push(ele.id)
})
return ids.join(',')
},
},
mounted() {},
methods: {
// 数据字典
rowSave(row, done, loading) {
addDicDataApi(row)
.then((res) => {
if (res.data.success == true) {
this.$message({
message: '保存成功~',
type: 'success',
})
this.onLoadParent(this.tableDicPage, this.query)
done()
} else {
throw new Error()
}
})
.catch(() => {
loading()
})
},
rowUpdate(row, index, done, loading) {
editDicDataApi(row)
.then((res) => {
if (res.data.success == true) {
this.$message({
message: '修改成功~',
type: 'success',
})
this.onLoadParent(this.tableDicPage, this.query)
done()
} else {
throw new Error()
}
})
.catch(() => {
loading()
})
},
rowDel(row, text) {
let ids = ''
if (!text) {
ids = row.id
text = '确认要删除此记录?'
} else {
ids = this.ids
}
this.$confirm(text, {
confirmButtonText: '确定',
cancelButtonText: '取消',
type: 'warning',
})
.then(async () => {
let removeRes = await delDicDataApi(ids)
if (removeRes.data.success) {
this.tableDicPage.currentPage = 1
this.onLoadParent(this.tableDicPage, this.query)
this.$message({
type: 'success',
message: '删除成功!',
})
this.$refs.crud.selectClear()
}
})
.catch(() => {})
},
searchReset() {
this.query = {}
this.onLoadParent(this.tableDicPage)
},
searchChange(params, done) {
this.query = params
this.tableDicPage.currentPage = 1
this.onLoadParent(this.tableDicPage, params)
done()
},
selectionChange(list) {
this.selectionList = list
},
selectionClear() {
this.selectionList = []
this.$refs.crud.toggleSelection()
},
currentChange(currentPage) {
this.tableDicPage.currentPage = currentPage
},
sizeChange(pageSize) {
this.tableDicPage.pageSize = pageSize
},
refreshChange() {
this.onLoadParent(this.tableDicPage, this.query)
},
async onLoadParent(page, params = {}) {
params = {
...params,
...this.query,
}
this.loading = true
let res = await getDicDataApi(page.currentPage, page.pageSize, params)
if (res.data.success) {
let data = res.data.data
this.tableDicData = data.records
this.tableDicPage.total = data.total
}
this.loading = false
},
// 字典配置
//数据处理
setDataRowStatus(row) {
if (row instanceof Array) {
row = row.map((item) => {
if (item.status === false || item.status === true) {
item.status = item.status ? 1 : 0
} else {
item.status = item.status ? true : false
}
return item
})
} else {
if (row.status === false || row.status === true) {
row.status = row.status ? 1 : 0
} else {
row.status = row.status ? true : false
}
}
return row
},
openDicListFun(id) {
//打开字典配置
this.currentDicId = id
this.onLoadChild({
currentPage: 1,
pageSize: 10,
})
this.isOpenDrawer = true
},
rowSaveChild(row, done, loading) {
row = this.setDataRowStatus(row)
row.dictId = this.currentDicId
addDicListDataApi(row)
.then((res) => {
if (res.data.success == true) {
this.$message({
message: '保存成功~',
type: 'success',
})
this.onLoadChild(this.tableDicListPage, this.childQuery)
done()
} else {
throw new Error()
}
})
.catch(() => {
loading()
})
},
rowUpdateChild(row, index, done, loading) {
row = this.setDataRowStatus(row)
row.dictId = this.currentDicId
editDicListDataApi(row)
.then((res) => {
if (res.data.success == true) {
this.$message({
message: '修改成功~',
type: 'success',
})
this.onLoadChild(this.tableDicListPage, this.childQuery)
done()
} else {
throw new Error()
}
})
.catch(() => {
loading()
})
},
rowDelChild(row, text) {
let ids = ''
if (!text) {
text = '确认要删除此记录?'
ids = row.id
} else {
ids = this.idsChild
}
this.$confirm(text, {
confirmButtonText: '确定',
cancelButtonText: '取消',
type: 'warning',
})
.then(async () => {
let removeRes = await delDicListDataApi(ids)
if (removeRes.data.success) {
this.tableDicListPage.currentPage = 1
this.onLoadChild(this.tableDicListPage, this.childQuery)
this.$message({
type: 'success',
message: '删除成功!',
})
this.$refs.crudChild.selectClear()
}
})
.catch(() => {})
},
searchResetChild() {
this.childQuery = {}
this.onLoadChild(this.tableDicListPage)
},
searchChangeChild(params, done) {
this.childQuery = params
this.tableDicListPage.currentPage = 1
this.onLoadChild(this.tableDicListPage, params)
done()
},
selectionChangeChild(list) {
this.selectionChildList = list
},
currentChangeChild(currentPage) {
this.tableDicListPage.currentPage = currentPage
},
sizeChangeChild(pageSize) {
this.tableDicListPage.pageSize = pageSize
},
refreshChangeChild() {
this.onLoadChild(this.tableDicListPage, this.query)
},
async onLoadChild(page, params = {}) {
params.dictId = this.currentDicId
this.loadingChild = true
let res = await getDicListDataApi(page.currentPage, page.pageSize, params)
if (res.data.success) {
let data = res.data.data
data.records = this.setDataRowStatus(data.records)
this.tableDicListData = data.records
this.tableDicListPage.total = data.total
}
this.loadingChild = false
},
},
}
</script>
<style lang="scss" scoped>
.data-dic-box,
.data-dic-list-box {
padding: 16px;
background-color: #fff;
}
</style>
|
233zzh/TitanDataOperationSystem | 121,381 | 代码/web代码/titanApp/src/main/resources/static/src/assets/extra-libs/flot/examples/axes-time-zones/tz/europe | # <pre>
# This file is in the public domain, so clarified as of
# 2009-05-17 by Arthur David Olson.
# This data is by no means authoritative; if you think you know better,
# go ahead and edit the file (and please send any changes to
# tz@iana.org for general use in the future).
# From Paul Eggert (2006-03-22):
# A good source for time zone historical data outside the U.S. is
# Thomas G. Shanks and Rique Pottenger, The International Atlas (6th edition),
# San Diego: ACS Publications, Inc. (2003).
#
# Gwillim Law writes that a good source
# for recent time zone data is the International Air Transport
# Association's Standard Schedules Information Manual (IATA SSIM),
# published semiannually. Law sent in several helpful summaries
# of the IATA's data after 1990.
#
# Except where otherwise noted, Shanks & Pottenger is the source for
# entries through 1991, and IATA SSIM is the source for entries afterwards.
#
# Other sources occasionally used include:
#
# Edward W. Whitman, World Time Differences,
# Whitman Publishing Co, 2 Niagara Av, Ealing, London (undated),
# which I found in the UCLA library.
#
# <a href="http://www.pettswoodvillage.co.uk/Daylight_Savings_William_Willett.pdf">
# William Willett, The Waste of Daylight, 19th edition
# </a> (1914-03)
#
# Brazil's Departamento Servico da Hora (DSH),
# <a href="http://pcdsh01.on.br/HISTHV.htm">
# History of Summer Time
# </a> (1998-09-21, in Portuguese)
#
# I invented the abbreviations marked `*' in the following table;
# the rest are from earlier versions of this file, or from other sources.
# Corrections are welcome!
# std dst 2dst
# LMT Local Mean Time
# -4:00 AST ADT Atlantic
# -3:00 WGT WGST Western Greenland*
# -1:00 EGT EGST Eastern Greenland*
# 0:00 GMT BST BDST Greenwich, British Summer
# 0:00 GMT IST Greenwich, Irish Summer
# 0:00 WET WEST WEMT Western Europe
# 0:19:32.13 AMT NST Amsterdam, Netherlands Summer (1835-1937)*
# 0:20 NET NEST Netherlands (1937-1940)*
# 1:00 CET CEST CEMT Central Europe
# 1:00:14 SET Swedish (1879-1899)*
# 2:00 EET EEST Eastern Europe
# 3:00 MSK MSD Moscow
#
# A reliable and entertaining source about time zones, especially in Britain,
# Derek Howse, Greenwich time and longitude, Philip Wilson Publishers (1997).
# From Peter Ilieve (1994-12-04),
# The original six [EU members]: Belgium, France, (West) Germany, Italy,
# Luxembourg, the Netherlands.
# Plus, from 1 Jan 73: Denmark, Ireland, United Kingdom.
# Plus, from 1 Jan 81: Greece.
# Plus, from 1 Jan 86: Spain, Portugal.
# Plus, from 1 Jan 95: Austria, Finland, Sweden. (Norway negotiated terms for
# entry but in a referendum on 28 Nov 94 the people voted No by 52.2% to 47.8%
# on a turnout of 88.6%. This was almost the same result as Norway's previous
# referendum in 1972, they are the only country to have said No twice.
# Referendums in the other three countries voted Yes.)
# ...
# Estonia ... uses EU dates but not at 01:00 GMT, they use midnight GMT.
# I don't think they know yet what they will do from 1996 onwards.
# ...
# There shouldn't be any [current members who are not using EU rules].
# A Directive has the force of law, member states are obliged to enact
# national law to implement it. The only contentious issue was the
# different end date for the UK and Ireland, and this was always allowed
# in the Directive.
###############################################################################
# Britain (United Kingdom) and Ireland (Eire)
# From Peter Ilieve (1994-07-06):
#
# On 17 Jan 1994 the Independent, a UK quality newspaper, had a piece about
# historical vistas along the Thames in west London. There was a photo
# and a sketch map showing some of the sightlines involved. One paragraph
# of the text said:
#
# `An old stone obelisk marking a forgotten terrestrial meridian stands
# beside the river at Kew. In the 18th century, before time and longitude
# was standardised by the Royal Observatory in Greenwich, scholars observed
# this stone and the movement of stars from Kew Observatory nearby. They
# made their calculations and set the time for the Horse Guards and Parliament,
# but now the stone is obscured by scrubwood and can only be seen by walking
# along the towpath within a few yards of it.'
#
# I have a one inch to one mile map of London and my estimate of the stone's
# position is 51 deg. 28' 30" N, 0 deg. 18' 45" W. The longitude should
# be within about +-2". The Ordnance Survey grid reference is TQ172761.
#
# [This yields GMTOFF = -0:01:15 for London LMT in the 18th century.]
# From Paul Eggert (1993-11-18):
#
# Howse writes that Britain was the first country to use standard time.
# The railways cared most about the inconsistencies of local mean time,
# and it was they who forced a uniform time on the country.
# The original idea was credited to Dr. William Hyde Wollaston (1766-1828)
# and was popularized by Abraham Follett Osler (1808-1903).
# The first railway to adopt London time was the Great Western Railway
# in November 1840; other railways followed suit, and by 1847 most
# (though not all) railways used London time. On 1847-09-22 the
# Railway Clearing House, an industry standards body, recommended that GMT be
# adopted at all stations as soon as the General Post Office permitted it.
# The transition occurred on 12-01 for the L&NW, the Caledonian,
# and presumably other railways; the January 1848 Bradshaw's lists many
# railways as using GMT. By 1855 the vast majority of public
# clocks in Britain were set to GMT (though some, like the great clock
# on Tom Tower at Christ Church, Oxford, were fitted with two minute hands,
# one for local time and one for GMT). The last major holdout was the legal
# system, which stubbornly stuck to local time for many years, leading
# to oddities like polls opening at 08:13 and closing at 16:13.
# The legal system finally switched to GMT when the Statutes (Definition
# of Time) Act took effect; it received the Royal Assent on 1880-08-02.
#
# In the tables below, we condense this complicated story into a single
# transition date for London, namely 1847-12-01. We don't know as much
# about Dublin, so we use 1880-08-02, the legal transition time.
# From Paul Eggert (2003-09-27):
# Summer Time was first seriously proposed by William Willett (1857-1915),
# a London builder and member of the Royal Astronomical Society
# who circulated a pamphlet ``The Waste of Daylight'' (1907)
# that proposed advancing clocks 20 minutes on each of four Sundays in April,
# and retarding them by the same amount on four Sundays in September.
# A bill was drafted in 1909 and introduced in Parliament several times,
# but it met with ridicule and opposition, especially from farming interests.
# Later editions of the pamphlet proposed one-hour summer time, and
# it was eventually adopted as a wartime measure in 1916.
# See: Summer Time Arrives Early, The Times (2000-05-18).
# A monument to Willett was unveiled on 1927-05-21, in an open space in
# a 45-acre wood near Chislehurst, Kent that was purchased by popular
# subscription and open to the public. On the south face of the monolith,
# designed by G. W. Miller, is the...William Willett Memorial Sundial,
# which is permanently set to Summer Time.
# From Winston Churchill (1934-04-28):
# It is one of the paradoxes of history that we should owe the boon of
# summer time, which gives every year to the people of this country
# between 160 and 170 hours more daylight leisure, to a war which
# plunged Europe into darkness for four years, and shook the
# foundations of civilization throughout the world.
# -- <a href="http://www.winstonchurchill.org/fh114willett.htm">
# "A Silent Toast to William Willett", Pictorial Weekly
# </a>
# From Paul Eggert (1996-09-03):
# The OED Supplement says that the English originally said ``Daylight Saving''
# when they were debating the adoption of DST in 1908; but by 1916 this
# term appears only in quotes taken from DST's opponents, whereas the
# proponents (who eventually won the argument) are quoted as using ``Summer''.
# From Arthur David Olson (1989-01-19):
#
# A source at the British Information Office in New York avers that it's
# known as "British" Summer Time in all parts of the United Kingdom.
# Date: 4 Jan 89 08:57:25 GMT (Wed)
# From: Jonathan Leffler
# [British Summer Time] is fixed annually by Act of Parliament.
# If you can predict what Parliament will do, you should be in
# politics making a fortune, not computing.
# From Chris Carrier (1996-06-14):
# I remember reading in various wartime issues of the London Times the
# acronym BDST for British Double Summer Time. Look for the published
# time of sunrise and sunset in The Times, when BDST was in effect, and
# if you find a zone reference it will say, "All times B.D.S.T."
# From Joseph S. Myers (1999-09-02):
# ... some military cables (WO 219/4100 - this is a copy from the
# main SHAEF archives held in the US National Archives, SHAEF/5252/8/516)
# agree that the usage is BDST (this appears in a message dated 17 Feb 1945).
# From Joseph S. Myers (2000-10-03):
# On 18th April 1941, Sir Stephen Tallents of the BBC wrote to Sir
# Alexander Maxwell of the Home Office asking whether there was any
# official designation; the reply of the 21st was that there wasn't
# but he couldn't think of anything better than the "Double British
# Summer Time" that the BBC had been using informally.
# http://student.cusu.cam.ac.uk/~jsm28/british-time/bbc-19410418.png
# http://student.cusu.cam.ac.uk/~jsm28/british-time/ho-19410421.png
# From Sir Alexander Maxwell in the above-mentioned letter (1941-04-21):
# [N]o official designation has as far as I know been adopted for the time
# which is to be introduced in May....
# I cannot think of anything better than "Double British Summer Time"
# which could not be said to run counter to any official description.
# From Paul Eggert (2000-10-02):
# Howse writes (p 157) `DBST' too, but `BDST' seems to have been common
# and follows the more usual convention of putting the location name first,
# so we use `BDST'.
# Peter Ilieve (1998-04-19) described at length
# the history of summer time legislation in the United Kingdom.
# Since 1998 Joseph S. Myers has been updating
# and extending this list, which can be found in
# http://student.cusu.cam.ac.uk/~jsm28/british-time/
# <a href="http://www.polyomino.org.uk/british-time/">
# History of legal time in Britain
# </a>
# Rob Crowther (2012-01-04) reports that that URL no longer
# exists, and the article can now be found at:
# <a href="http://www.polyomino.org.uk/british-time/">
# http://www.polyomino.org.uk/british-time/
# </a>
# From Joseph S. Myers (1998-01-06):
#
# The legal time in the UK outside of summer time is definitely GMT, not UTC;
# see Lord Tanlaw's speech
# <a href="http://www.parliament.the-stationery-office.co.uk/pa/ld199697/ldhansrd/pdvn/lds97/text/70611-20.htm#70611-20_head0">
# (Lords Hansard 11 June 1997 columns 964 to 976)
# </a>.
# From Paul Eggert (2006-03-22):
#
# For lack of other data, follow Shanks & Pottenger for Eire in 1940-1948.
#
# Given Ilieve and Myers's data, the following claims by Shanks & Pottenger
# are incorrect:
# * Wales did not switch from GMT to daylight saving time until
# 1921 Apr 3, when they began to conform with the rest of Great Britain.
# Actually, Wales was identical after 1880.
# * Eire had two transitions on 1916 Oct 1.
# It actually just had one transition.
# * Northern Ireland used single daylight saving time throughout WW II.
# Actually, it conformed to Britain.
# * GB-Eire changed standard time to 1 hour ahead of GMT on 1968-02-18.
# Actually, that date saw the usual switch to summer time.
# Standard time was not changed until 1968-10-27 (the clocks didn't change).
#
# Here is another incorrect claim by Shanks & Pottenger:
# * Jersey, Guernsey, and the Isle of Man did not switch from GMT
# to daylight saving time until 1921 Apr 3, when they began to
# conform with Great Britain.
# S.R.&O. 1916, No. 382 and HO 45/10811/312364 (quoted above) say otherwise.
#
# The following claim by Shanks & Pottenger is possible though doubtful;
# we'll ignore it for now.
# * Dublin's 1971-10-31 switch was at 02:00, even though London's was 03:00.
#
#
# Whitman says Dublin Mean Time was -0:25:21, which is more precise than
# Shanks & Pottenger.
# Perhaps this was Dunsink Observatory Time, as Dunsink Observatory
# (8 km NW of Dublin's center) seemingly was to Dublin as Greenwich was
# to London. For example:
#
# "Timeball on the ballast office is down. Dunsink time."
# -- James Joyce, Ulysses
# From Joseph S. Myers (2005-01-26):
# Irish laws are available online at www.irishstatutebook.ie. These include
# various relating to legal time, for example:
#
# ZZA13Y1923.html ZZA12Y1924.html ZZA8Y1925.html ZZSIV20PG1267.html
#
# ZZSI71Y1947.html ZZSI128Y1948.html ZZSI23Y1949.html ZZSI41Y1950.html
# ZZSI27Y1951.html ZZSI73Y1952.html
#
# ZZSI11Y1961.html ZZSI232Y1961.html ZZSI182Y1962.html
# ZZSI167Y1963.html ZZSI257Y1964.html ZZSI198Y1967.html
# ZZA23Y1968.html ZZA17Y1971.html
#
# ZZSI67Y1981.html ZZSI212Y1982.html ZZSI45Y1986.html
# ZZSI264Y1988.html ZZSI52Y1990.html ZZSI371Y1992.html
# ZZSI395Y1994.html ZZSI484Y1997.html ZZSI506Y2001.html
#
# [These are all relative to the root, e.g., the first is
# <http://www.irishstatutebook.ie/ZZA13Y1923.html>.]
#
# (These are those I found, but there could be more. In any case these
# should allow various updates to the comments in the europe file to cover
# the laws applicable in Ireland.)
#
# (Note that the time in the Republic of Ireland since 1968 has been defined
# in terms of standard time being GMT+1 with a period of winter time when it
# is GMT, rather than standard time being GMT with a period of summer time
# being GMT+1.)
# From Paul Eggert (1999-03-28):
# Clive Feather (<news:859845706.26043.0@office.demon.net>, 1997-03-31)
# reports that Folkestone (Cheriton) Shuttle Terminal uses Concession Time
# (CT), equivalent to French civil time.
# Julian Hill (<news:36118128.5A14@virgin.net>, 1998-09-30) reports that
# trains between Dollands Moor (the freight facility next door)
# and Frethun run in CT.
# My admittedly uninformed guess is that the terminal has two authorities,
# the French concession operators and the British civil authorities,
# and that the time depends on who you're talking to.
# If, say, the British police were called to the station for some reason,
# I would expect the official police report to use GMT/BST and not CET/CEST.
# This is a borderline case, but for now let's stick to GMT/BST.
# From an anonymous contributor (1996-06-02):
# The law governing time in Ireland is under Statutory Instrument SI 395/94,
# which gives force to European Union 7th Council Directive # 94/21/EC.
# Under this directive, the Minister for Justice in Ireland makes appropriate
# regulations. I spoke this morning with the Secretary of the Department of
# Justice (tel +353 1 678 9711) who confirmed to me that the correct name is
# "Irish Summer Time", abbreviated to "IST".
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
# Summer Time Act, 1916
Rule GB-Eire 1916 only - May 21 2:00s 1:00 BST
Rule GB-Eire 1916 only - Oct 1 2:00s 0 GMT
# S.R.&O. 1917, No. 358
Rule GB-Eire 1917 only - Apr 8 2:00s 1:00 BST
Rule GB-Eire 1917 only - Sep 17 2:00s 0 GMT
# S.R.&O. 1918, No. 274
Rule GB-Eire 1918 only - Mar 24 2:00s 1:00 BST
Rule GB-Eire 1918 only - Sep 30 2:00s 0 GMT
# S.R.&O. 1919, No. 297
Rule GB-Eire 1919 only - Mar 30 2:00s 1:00 BST
Rule GB-Eire 1919 only - Sep 29 2:00s 0 GMT
# S.R.&O. 1920, No. 458
Rule GB-Eire 1920 only - Mar 28 2:00s 1:00 BST
# S.R.&O. 1920, No. 1844
Rule GB-Eire 1920 only - Oct 25 2:00s 0 GMT
# S.R.&O. 1921, No. 363
Rule GB-Eire 1921 only - Apr 3 2:00s 1:00 BST
Rule GB-Eire 1921 only - Oct 3 2:00s 0 GMT
# S.R.&O. 1922, No. 264
Rule GB-Eire 1922 only - Mar 26 2:00s 1:00 BST
Rule GB-Eire 1922 only - Oct 8 2:00s 0 GMT
# The Summer Time Act, 1922
Rule GB-Eire 1923 only - Apr Sun>=16 2:00s 1:00 BST
Rule GB-Eire 1923 1924 - Sep Sun>=16 2:00s 0 GMT
Rule GB-Eire 1924 only - Apr Sun>=9 2:00s 1:00 BST
Rule GB-Eire 1925 1926 - Apr Sun>=16 2:00s 1:00 BST
# The Summer Time Act, 1925
Rule GB-Eire 1925 1938 - Oct Sun>=2 2:00s 0 GMT
Rule GB-Eire 1927 only - Apr Sun>=9 2:00s 1:00 BST
Rule GB-Eire 1928 1929 - Apr Sun>=16 2:00s 1:00 BST
Rule GB-Eire 1930 only - Apr Sun>=9 2:00s 1:00 BST
Rule GB-Eire 1931 1932 - Apr Sun>=16 2:00s 1:00 BST
Rule GB-Eire 1933 only - Apr Sun>=9 2:00s 1:00 BST
Rule GB-Eire 1934 only - Apr Sun>=16 2:00s 1:00 BST
Rule GB-Eire 1935 only - Apr Sun>=9 2:00s 1:00 BST
Rule GB-Eire 1936 1937 - Apr Sun>=16 2:00s 1:00 BST
Rule GB-Eire 1938 only - Apr Sun>=9 2:00s 1:00 BST
Rule GB-Eire 1939 only - Apr Sun>=16 2:00s 1:00 BST
# S.R.&O. 1939, No. 1379
Rule GB-Eire 1939 only - Nov Sun>=16 2:00s 0 GMT
# S.R.&O. 1940, No. 172 and No. 1883
Rule GB-Eire 1940 only - Feb Sun>=23 2:00s 1:00 BST
# S.R.&O. 1941, No. 476
Rule GB-Eire 1941 only - May Sun>=2 1:00s 2:00 BDST
Rule GB-Eire 1941 1943 - Aug Sun>=9 1:00s 1:00 BST
# S.R.&O. 1942, No. 506
Rule GB-Eire 1942 1944 - Apr Sun>=2 1:00s 2:00 BDST
# S.R.&O. 1944, No. 932
Rule GB-Eire 1944 only - Sep Sun>=16 1:00s 1:00 BST
# S.R.&O. 1945, No. 312
Rule GB-Eire 1945 only - Apr Mon>=2 1:00s 2:00 BDST
Rule GB-Eire 1945 only - Jul Sun>=9 1:00s 1:00 BST
# S.R.&O. 1945, No. 1208
Rule GB-Eire 1945 1946 - Oct Sun>=2 2:00s 0 GMT
Rule GB-Eire 1946 only - Apr Sun>=9 2:00s 1:00 BST
# The Summer Time Act, 1947
Rule GB-Eire 1947 only - Mar 16 2:00s 1:00 BST
Rule GB-Eire 1947 only - Apr 13 1:00s 2:00 BDST
Rule GB-Eire 1947 only - Aug 10 1:00s 1:00 BST
Rule GB-Eire 1947 only - Nov 2 2:00s 0 GMT
# Summer Time Order, 1948 (S.I. 1948/495)
Rule GB-Eire 1948 only - Mar 14 2:00s 1:00 BST
Rule GB-Eire 1948 only - Oct 31 2:00s 0 GMT
# Summer Time Order, 1949 (S.I. 1949/373)
Rule GB-Eire 1949 only - Apr 3 2:00s 1:00 BST
Rule GB-Eire 1949 only - Oct 30 2:00s 0 GMT
# Summer Time Order, 1950 (S.I. 1950/518)
# Summer Time Order, 1951 (S.I. 1951/430)
# Summer Time Order, 1952 (S.I. 1952/451)
Rule GB-Eire 1950 1952 - Apr Sun>=14 2:00s 1:00 BST
Rule GB-Eire 1950 1952 - Oct Sun>=21 2:00s 0 GMT
# revert to the rules of the Summer Time Act, 1925
Rule GB-Eire 1953 only - Apr Sun>=16 2:00s 1:00 BST
Rule GB-Eire 1953 1960 - Oct Sun>=2 2:00s 0 GMT
Rule GB-Eire 1954 only - Apr Sun>=9 2:00s 1:00 BST
Rule GB-Eire 1955 1956 - Apr Sun>=16 2:00s 1:00 BST
Rule GB-Eire 1957 only - Apr Sun>=9 2:00s 1:00 BST
Rule GB-Eire 1958 1959 - Apr Sun>=16 2:00s 1:00 BST
Rule GB-Eire 1960 only - Apr Sun>=9 2:00s 1:00 BST
# Summer Time Order, 1961 (S.I. 1961/71)
# Summer Time (1962) Order, 1961 (S.I. 1961/2465)
# Summer Time Order, 1963 (S.I. 1963/81)
Rule GB-Eire 1961 1963 - Mar lastSun 2:00s 1:00 BST
Rule GB-Eire 1961 1968 - Oct Sun>=23 2:00s 0 GMT
# Summer Time (1964) Order, 1963 (S.I. 1963/2101)
# Summer Time Order, 1964 (S.I. 1964/1201)
# Summer Time Order, 1967 (S.I. 1967/1148)
Rule GB-Eire 1964 1967 - Mar Sun>=19 2:00s 1:00 BST
# Summer Time Order, 1968 (S.I. 1968/117)
Rule GB-Eire 1968 only - Feb 18 2:00s 1:00 BST
# The British Standard Time Act, 1968
# (no summer time)
# The Summer Time Act, 1972
Rule GB-Eire 1972 1980 - Mar Sun>=16 2:00s 1:00 BST
Rule GB-Eire 1972 1980 - Oct Sun>=23 2:00s 0 GMT
# Summer Time Order, 1980 (S.I. 1980/1089)
# Summer Time Order, 1982 (S.I. 1982/1673)
# Summer Time Order, 1986 (S.I. 1986/223)
# Summer Time Order, 1988 (S.I. 1988/931)
Rule GB-Eire 1981 1995 - Mar lastSun 1:00u 1:00 BST
Rule GB-Eire 1981 1989 - Oct Sun>=23 1:00u 0 GMT
# Summer Time Order, 1989 (S.I. 1989/985)
# Summer Time Order, 1992 (S.I. 1992/1729)
# Summer Time Order 1994 (S.I. 1994/2798)
Rule GB-Eire 1990 1995 - Oct Sun>=22 1:00u 0 GMT
# Summer Time Order 1997 (S.I. 1997/2982)
# See EU for rules starting in 1996.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Europe/London -0:01:15 - LMT 1847 Dec 1 0:00s
0:00 GB-Eire %s 1968 Oct 27
1:00 - BST 1971 Oct 31 2:00u
0:00 GB-Eire %s 1996
0:00 EU GMT/BST
Link Europe/London Europe/Jersey
Link Europe/London Europe/Guernsey
Link Europe/London Europe/Isle_of_Man
Zone Europe/Dublin -0:25:00 - LMT 1880 Aug 2
-0:25:21 - DMT 1916 May 21 2:00
-0:25:21 1:00 IST 1916 Oct 1 2:00s
0:00 GB-Eire %s 1921 Dec 6 # independence
0:00 GB-Eire GMT/IST 1940 Feb 25 2:00
0:00 1:00 IST 1946 Oct 6 2:00
0:00 - GMT 1947 Mar 16 2:00
0:00 1:00 IST 1947 Nov 2 2:00
0:00 - GMT 1948 Apr 18 2:00
0:00 GB-Eire GMT/IST 1968 Oct 27
1:00 - IST 1971 Oct 31 2:00u
0:00 GB-Eire GMT/IST 1996
0:00 EU GMT/IST
###############################################################################
# Europe
# EU rules are for the European Union, previously known as the EC, EEC,
# Common Market, etc.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule EU 1977 1980 - Apr Sun>=1 1:00u 1:00 S
Rule EU 1977 only - Sep lastSun 1:00u 0 -
Rule EU 1978 only - Oct 1 1:00u 0 -
Rule EU 1979 1995 - Sep lastSun 1:00u 0 -
Rule EU 1981 max - Mar lastSun 1:00u 1:00 S
Rule EU 1996 max - Oct lastSun 1:00u 0 -
# The most recent directive covers the years starting in 2002. See:
# <a="http://eur-lex.europa.eu/LexUriServ/LexUriServ.do?uri=CELEX:32000L0084:EN:NOT">
# Directive 2000/84/EC of the European Parliament and of the Council
# of 19 January 2001 on summer-time arrangements.
# </a>
# W-Eur differs from EU only in that W-Eur uses standard time.
Rule W-Eur 1977 1980 - Apr Sun>=1 1:00s 1:00 S
Rule W-Eur 1977 only - Sep lastSun 1:00s 0 -
Rule W-Eur 1978 only - Oct 1 1:00s 0 -
Rule W-Eur 1979 1995 - Sep lastSun 1:00s 0 -
Rule W-Eur 1981 max - Mar lastSun 1:00s 1:00 S
Rule W-Eur 1996 max - Oct lastSun 1:00s 0 -
# Older C-Eur rules are for convenience in the tables.
# From 1977 on, C-Eur differs from EU only in that C-Eur uses standard time.
Rule C-Eur 1916 only - Apr 30 23:00 1:00 S
Rule C-Eur 1916 only - Oct 1 1:00 0 -
Rule C-Eur 1917 1918 - Apr Mon>=15 2:00s 1:00 S
Rule C-Eur 1917 1918 - Sep Mon>=15 2:00s 0 -
Rule C-Eur 1940 only - Apr 1 2:00s 1:00 S
Rule C-Eur 1942 only - Nov 2 2:00s 0 -
Rule C-Eur 1943 only - Mar 29 2:00s 1:00 S
Rule C-Eur 1943 only - Oct 4 2:00s 0 -
Rule C-Eur 1944 1945 - Apr Mon>=1 2:00s 1:00 S
# Whitman gives 1944 Oct 7; go with Shanks & Pottenger.
Rule C-Eur 1944 only - Oct 2 2:00s 0 -
# From Jesper Norgaard Welen (2008-07-13):
#
# I found what is probably a typo of 2:00 which should perhaps be 2:00s
# in the C-Eur rule from tz database version 2008d (this part was
# corrected in version 2008d). The circumstancial evidence is simply the
# tz database itself, as seen below:
#
# Zone Europe/Paris 0:09:21 - LMT 1891 Mar 15 0:01
# 0:00 France WE%sT 1945 Sep 16 3:00
#
# Zone Europe/Monaco 0:29:32 - LMT 1891 Mar 15
# 0:00 France WE%sT 1945 Sep 16 3:00
#
# Zone Europe/Belgrade 1:22:00 - LMT 1884
# 1:00 1:00 CEST 1945 Sep 16 2:00s
#
# Rule France 1945 only - Sep 16 3:00 0 -
# Rule Belgium 1945 only - Sep 16 2:00s 0 -
# Rule Neth 1945 only - Sep 16 2:00s 0 -
#
# The rule line to be changed is:
#
# Rule C-Eur 1945 only - Sep 16 2:00 0 -
#
# It seems that Paris, Monaco, Rule France, Rule Belgium all agree on
# 2:00 standard time, e.g. 3:00 local time. However there are no
# countries that use C-Eur rules in September 1945, so the only items
# affected are apparently these ficticious zones that translates acronyms
# CET and MET:
#
# Zone CET 1:00 C-Eur CE%sT
# Zone MET 1:00 C-Eur ME%sT
#
# It this is right then the corrected version would look like:
#
# Rule C-Eur 1945 only - Sep 16 2:00s 0 -
#
# A small step for mankind though 8-)
Rule C-Eur 1945 only - Sep 16 2:00s 0 -
Rule C-Eur 1977 1980 - Apr Sun>=1 2:00s 1:00 S
Rule C-Eur 1977 only - Sep lastSun 2:00s 0 -
Rule C-Eur 1978 only - Oct 1 2:00s 0 -
Rule C-Eur 1979 1995 - Sep lastSun 2:00s 0 -
Rule C-Eur 1981 max - Mar lastSun 2:00s 1:00 S
Rule C-Eur 1996 max - Oct lastSun 2:00s 0 -
# E-Eur differs from EU only in that E-Eur switches at midnight local time.
Rule E-Eur 1977 1980 - Apr Sun>=1 0:00 1:00 S
Rule E-Eur 1977 only - Sep lastSun 0:00 0 -
Rule E-Eur 1978 only - Oct 1 0:00 0 -
Rule E-Eur 1979 1995 - Sep lastSun 0:00 0 -
Rule E-Eur 1981 max - Mar lastSun 0:00 1:00 S
Rule E-Eur 1996 max - Oct lastSun 0:00 0 -
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Russia 1917 only - Jul 1 23:00 1:00 MST # Moscow Summer Time
Rule Russia 1917 only - Dec 28 0:00 0 MMT # Moscow Mean Time
Rule Russia 1918 only - May 31 22:00 2:00 MDST # Moscow Double Summer Time
Rule Russia 1918 only - Sep 16 1:00 1:00 MST
Rule Russia 1919 only - May 31 23:00 2:00 MDST
Rule Russia 1919 only - Jul 1 2:00 1:00 S
Rule Russia 1919 only - Aug 16 0:00 0 -
Rule Russia 1921 only - Feb 14 23:00 1:00 S
Rule Russia 1921 only - Mar 20 23:00 2:00 M # Midsummer
Rule Russia 1921 only - Sep 1 0:00 1:00 S
Rule Russia 1921 only - Oct 1 0:00 0 -
# Act No.925 of the Council of Ministers of the USSR (1980-10-24):
Rule Russia 1981 1984 - Apr 1 0:00 1:00 S
Rule Russia 1981 1983 - Oct 1 0:00 0 -
# Act No.967 of the Council of Ministers of the USSR (1984-09-13), repeated in
# Act No.227 of the Council of Ministers of the USSR (1989-03-14):
Rule Russia 1984 1991 - Sep lastSun 2:00s 0 -
Rule Russia 1985 1991 - Mar lastSun 2:00s 1:00 S
#
Rule Russia 1992 only - Mar lastSat 23:00 1:00 S
Rule Russia 1992 only - Sep lastSat 23:00 0 -
Rule Russia 1993 2010 - Mar lastSun 2:00s 1:00 S
Rule Russia 1993 1995 - Sep lastSun 2:00s 0 -
Rule Russia 1996 2010 - Oct lastSun 2:00s 0 -
# From Alexander Krivenyshev (2011-06-14):
# According to Kremlin press service, Russian President Dmitry Medvedev
# signed a federal law "On calculation of time" on June 9, 2011.
# According to the law Russia is abolishing daylight saving time.
#
# Medvedev signed a law "On the Calculation of Time" (in russian):
# <a href="http://bmockbe.ru/events/?ID=7583">
# http://bmockbe.ru/events/?ID=7583
# </a>
#
# Medvedev signed a law on the calculation of the time (in russian):
# <a href="http://www.regnum.ru/news/polit/1413906.html">
# http://www.regnum.ru/news/polit/1413906.html
# </a>
# From Arthur David Olson (2011-06-15):
# Take "abolishing daylight saving time" to mean that time is now considered
# to be standard.
# These are for backward compatibility with older versions.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone WET 0:00 EU WE%sT
Zone CET 1:00 C-Eur CE%sT
Zone MET 1:00 C-Eur ME%sT
Zone EET 2:00 EU EE%sT
# Previous editions of this database used abbreviations like MET DST
# for Central European Summer Time, but this didn't agree with common usage.
# From Markus Kuhn (1996-07-12):
# The official German names ... are
#
# Mitteleuropaeische Zeit (MEZ) = UTC+01:00
# Mitteleuropaeische Sommerzeit (MESZ) = UTC+02:00
#
# as defined in the German Time Act (Gesetz ueber die Zeitbestimmung (ZeitG),
# 1978-07-25, Bundesgesetzblatt, Jahrgang 1978, Teil I, S. 1110-1111)....
# I wrote ... to the German Federal Physical-Technical Institution
#
# Physikalisch-Technische Bundesanstalt (PTB)
# Laboratorium 4.41 "Zeiteinheit"
# Postfach 3345
# D-38023 Braunschweig
# phone: +49 531 592-0
#
# ... I received today an answer letter from Dr. Peter Hetzel, head of the PTB
# department for time and frequency transmission. He explained that the
# PTB translates MEZ and MESZ into English as
#
# Central European Time (CET) = UTC+01:00
# Central European Summer Time (CEST) = UTC+02:00
# Albania
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Albania 1940 only - Jun 16 0:00 1:00 S
Rule Albania 1942 only - Nov 2 3:00 0 -
Rule Albania 1943 only - Mar 29 2:00 1:00 S
Rule Albania 1943 only - Apr 10 3:00 0 -
Rule Albania 1974 only - May 4 0:00 1:00 S
Rule Albania 1974 only - Oct 2 0:00 0 -
Rule Albania 1975 only - May 1 0:00 1:00 S
Rule Albania 1975 only - Oct 2 0:00 0 -
Rule Albania 1976 only - May 2 0:00 1:00 S
Rule Albania 1976 only - Oct 3 0:00 0 -
Rule Albania 1977 only - May 8 0:00 1:00 S
Rule Albania 1977 only - Oct 2 0:00 0 -
Rule Albania 1978 only - May 6 0:00 1:00 S
Rule Albania 1978 only - Oct 1 0:00 0 -
Rule Albania 1979 only - May 5 0:00 1:00 S
Rule Albania 1979 only - Sep 30 0:00 0 -
Rule Albania 1980 only - May 3 0:00 1:00 S
Rule Albania 1980 only - Oct 4 0:00 0 -
Rule Albania 1981 only - Apr 26 0:00 1:00 S
Rule Albania 1981 only - Sep 27 0:00 0 -
Rule Albania 1982 only - May 2 0:00 1:00 S
Rule Albania 1982 only - Oct 3 0:00 0 -
Rule Albania 1983 only - Apr 18 0:00 1:00 S
Rule Albania 1983 only - Oct 1 0:00 0 -
Rule Albania 1984 only - Apr 1 0:00 1:00 S
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Europe/Tirane 1:19:20 - LMT 1914
1:00 - CET 1940 Jun 16
1:00 Albania CE%sT 1984 Jul
1:00 EU CE%sT
# Andorra
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Europe/Andorra 0:06:04 - LMT 1901
0:00 - WET 1946 Sep 30
1:00 - CET 1985 Mar 31 2:00
1:00 EU CE%sT
# Austria
# From Paul Eggert (2006-03-22): Shanks & Pottenger give 1918-06-16 and
# 1945-11-18, but the Austrian Federal Office of Metrology and
# Surveying (BEV) gives 1918-09-16 and for Vienna gives the "alleged"
# date of 1945-04-12 with no time. For the 1980-04-06 transition
# Shanks & Pottenger give 02:00, the BEV 00:00. Go with the BEV,
# and guess 02:00 for 1945-04-12.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Austria 1920 only - Apr 5 2:00s 1:00 S
Rule Austria 1920 only - Sep 13 2:00s 0 -
Rule Austria 1946 only - Apr 14 2:00s 1:00 S
Rule Austria 1946 1948 - Oct Sun>=1 2:00s 0 -
Rule Austria 1947 only - Apr 6 2:00s 1:00 S
Rule Austria 1948 only - Apr 18 2:00s 1:00 S
Rule Austria 1980 only - Apr 6 0:00 1:00 S
Rule Austria 1980 only - Sep 28 0:00 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Europe/Vienna 1:05:20 - LMT 1893 Apr
1:00 C-Eur CE%sT 1920
1:00 Austria CE%sT 1940 Apr 1 2:00s
1:00 C-Eur CE%sT 1945 Apr 2 2:00s
1:00 1:00 CEST 1945 Apr 12 2:00s
1:00 - CET 1946
1:00 Austria CE%sT 1981
1:00 EU CE%sT
# Belarus
# From Yauhen Kharuzhy (2011-09-16):
# By latest Belarus government act Europe/Minsk timezone was changed to
# GMT+3 without DST (was GMT+2 with DST).
#
# Sources (Russian language):
# 1.
# <a href="http://www.belta.by/ru/all_news/society/V-Belarusi-otmenjaetsja-perexod-na-sezonnoe-vremja_i_572952.html">
# http://www.belta.by/ru/all_news/society/V-Belarusi-otmenjaetsja-perexod-na-sezonnoe-vremja_i_572952.html
# </a>
# 2.
# <a href="http://naviny.by/rubrics/society/2011/09/16/ic_articles_116_175144/">
# http://naviny.by/rubrics/society/2011/09/16/ic_articles_116_175144/
# </a>
# 3.
# <a href="http://news.tut.by/society/250578.html">
# http://news.tut.by/society/250578.html
# </a>
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Europe/Minsk 1:50:16 - LMT 1880
1:50 - MMT 1924 May 2 # Minsk Mean Time
2:00 - EET 1930 Jun 21
3:00 - MSK 1941 Jun 28
1:00 C-Eur CE%sT 1944 Jul 3
3:00 Russia MSK/MSD 1990
3:00 - MSK 1991 Mar 31 2:00s
2:00 1:00 EEST 1991 Sep 29 2:00s
2:00 - EET 1992 Mar 29 0:00s
2:00 1:00 EEST 1992 Sep 27 0:00s
2:00 Russia EE%sT 2011 Mar 27 2:00s
3:00 - FET # Further-eastern European Time
# Belgium
#
# From Paul Eggert (1997-07-02):
# Entries from 1918 through 1991 are taken from:
# Annuaire de L'Observatoire Royal de Belgique,
# Avenue Circulaire, 3, B-1180 BRUXELLES, CLVIIe annee, 1991
# (Imprimerie HAYEZ, s.p.r.l., Rue Fin, 4, 1080 BRUXELLES, MCMXC),
# pp 8-9.
# LMT before 1892 was 0:17:30, according to the official journal of Belgium:
# Moniteur Belge, Samedi 30 Avril 1892, N.121.
# Thanks to Pascal Delmoitie for these references.
# The 1918 rules are listed for completeness; they apply to unoccupied Belgium.
# Assume Brussels switched to WET in 1918 when the armistice took effect.
#
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Belgium 1918 only - Mar 9 0:00s 1:00 S
Rule Belgium 1918 1919 - Oct Sat>=1 23:00s 0 -
Rule Belgium 1919 only - Mar 1 23:00s 1:00 S
Rule Belgium 1920 only - Feb 14 23:00s 1:00 S
Rule Belgium 1920 only - Oct 23 23:00s 0 -
Rule Belgium 1921 only - Mar 14 23:00s 1:00 S
Rule Belgium 1921 only - Oct 25 23:00s 0 -
Rule Belgium 1922 only - Mar 25 23:00s 1:00 S
Rule Belgium 1922 1927 - Oct Sat>=1 23:00s 0 -
Rule Belgium 1923 only - Apr 21 23:00s 1:00 S
Rule Belgium 1924 only - Mar 29 23:00s 1:00 S
Rule Belgium 1925 only - Apr 4 23:00s 1:00 S
# DSH writes that a royal decree of 1926-02-22 specified the Sun following 3rd
# Sat in Apr (except if it's Easter, in which case it's one Sunday earlier),
# to Sun following 1st Sat in Oct, and that a royal decree of 1928-09-15
# changed the transition times to 02:00 GMT.
Rule Belgium 1926 only - Apr 17 23:00s 1:00 S
Rule Belgium 1927 only - Apr 9 23:00s 1:00 S
Rule Belgium 1928 only - Apr 14 23:00s 1:00 S
Rule Belgium 1928 1938 - Oct Sun>=2 2:00s 0 -
Rule Belgium 1929 only - Apr 21 2:00s 1:00 S
Rule Belgium 1930 only - Apr 13 2:00s 1:00 S
Rule Belgium 1931 only - Apr 19 2:00s 1:00 S
Rule Belgium 1932 only - Apr 3 2:00s 1:00 S
Rule Belgium 1933 only - Mar 26 2:00s 1:00 S
Rule Belgium 1934 only - Apr 8 2:00s 1:00 S
Rule Belgium 1935 only - Mar 31 2:00s 1:00 S
Rule Belgium 1936 only - Apr 19 2:00s 1:00 S
Rule Belgium 1937 only - Apr 4 2:00s 1:00 S
Rule Belgium 1938 only - Mar 27 2:00s 1:00 S
Rule Belgium 1939 only - Apr 16 2:00s 1:00 S
Rule Belgium 1939 only - Nov 19 2:00s 0 -
Rule Belgium 1940 only - Feb 25 2:00s 1:00 S
Rule Belgium 1944 only - Sep 17 2:00s 0 -
Rule Belgium 1945 only - Apr 2 2:00s 1:00 S
Rule Belgium 1945 only - Sep 16 2:00s 0 -
Rule Belgium 1946 only - May 19 2:00s 1:00 S
Rule Belgium 1946 only - Oct 7 2:00s 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Europe/Brussels 0:17:30 - LMT 1880
0:17:30 - BMT 1892 May 1 12:00 # Brussels MT
0:00 - WET 1914 Nov 8
1:00 - CET 1916 May 1 0:00
1:00 C-Eur CE%sT 1918 Nov 11 11:00u
0:00 Belgium WE%sT 1940 May 20 2:00s
1:00 C-Eur CE%sT 1944 Sep 3
1:00 Belgium CE%sT 1977
1:00 EU CE%sT
# Bosnia and Herzegovina
# see Serbia
# Bulgaria
#
# From Plamen Simenov via Steffen Thorsen (1999-09-09):
# A document of Government of Bulgaria (No.94/1997) says:
# EET --> EETDST is in 03:00 Local time in last Sunday of March ...
# EETDST --> EET is in 04:00 Local time in last Sunday of October
#
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Bulg 1979 only - Mar 31 23:00 1:00 S
Rule Bulg 1979 only - Oct 1 1:00 0 -
Rule Bulg 1980 1982 - Apr Sat>=1 23:00 1:00 S
Rule Bulg 1980 only - Sep 29 1:00 0 -
Rule Bulg 1981 only - Sep 27 2:00 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Europe/Sofia 1:33:16 - LMT 1880
1:56:56 - IMT 1894 Nov 30 # Istanbul MT?
2:00 - EET 1942 Nov 2 3:00
1:00 C-Eur CE%sT 1945
1:00 - CET 1945 Apr 2 3:00
2:00 - EET 1979 Mar 31 23:00
2:00 Bulg EE%sT 1982 Sep 26 2:00
2:00 C-Eur EE%sT 1991
2:00 E-Eur EE%sT 1997
2:00 EU EE%sT
# Croatia
# see Serbia
# Cyprus
# Please see the `asia' file for Asia/Nicosia.
# Czech Republic
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Czech 1945 only - Apr 8 2:00s 1:00 S
Rule Czech 1945 only - Nov 18 2:00s 0 -
Rule Czech 1946 only - May 6 2:00s 1:00 S
Rule Czech 1946 1949 - Oct Sun>=1 2:00s 0 -
Rule Czech 1947 only - Apr 20 2:00s 1:00 S
Rule Czech 1948 only - Apr 18 2:00s 1:00 S
Rule Czech 1949 only - Apr 9 2:00s 1:00 S
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Europe/Prague 0:57:44 - LMT 1850
0:57:44 - PMT 1891 Oct # Prague Mean Time
1:00 C-Eur CE%sT 1944 Sep 17 2:00s
1:00 Czech CE%sT 1979
1:00 EU CE%sT
# Denmark, Faroe Islands, and Greenland
# From Jesper Norgaard Welen (2005-04-26):
# http://www.hum.aau.dk/~poe/tid/tine/DanskTid.htm says that the law
# [introducing standard time] was in effect from 1894-01-01....
# The page http://www.retsinfo.dk/_GETDOCI_/ACCN/A18930008330-REGL
# confirms this, and states that the law was put forth 1893-03-29.
#
# The EU treaty with effect from 1973:
# http://www.retsinfo.dk/_GETDOCI_/ACCN/A19722110030-REGL
#
# This provoked a new law from 1974 to make possible summer time changes
# in subsequenet decrees with the law
# http://www.retsinfo.dk/_GETDOCI_/ACCN/A19740022330-REGL
#
# It seems however that no decree was set forward until 1980. I have
# not found any decree, but in another related law, the effecting DST
# changes are stated explicitly to be from 1980-04-06 at 02:00 to
# 1980-09-28 at 02:00. If this is true, this differs slightly from
# the EU rule in that DST runs to 02:00, not 03:00. We don't know
# when Denmark began using the EU rule correctly, but we have only
# confirmation of the 1980-time, so I presume it was correct in 1981:
# The law is about the management of the extra hour, concerning
# working hours reported and effect on obligatory-rest rules (which
# was suspended on that night):
# http://www.retsinfo.dk/_GETDOCI_/ACCN/C19801120554-REGL
# From Jesper Norgaard Welen (2005-06-11):
# The Herning Folkeblad (1980-09-26) reported that the night between
# Saturday and Sunday the clock is set back from three to two.
# From Paul Eggert (2005-06-11):
# Hence the "02:00" of the 1980 law refers to standard time, not
# wall-clock time, and so the EU rules were in effect in 1980.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Denmark 1916 only - May 14 23:00 1:00 S
Rule Denmark 1916 only - Sep 30 23:00 0 -
Rule Denmark 1940 only - May 15 0:00 1:00 S
Rule Denmark 1945 only - Apr 2 2:00s 1:00 S
Rule Denmark 1945 only - Aug 15 2:00s 0 -
Rule Denmark 1946 only - May 1 2:00s 1:00 S
Rule Denmark 1946 only - Sep 1 2:00s 0 -
Rule Denmark 1947 only - May 4 2:00s 1:00 S
Rule Denmark 1947 only - Aug 10 2:00s 0 -
Rule Denmark 1948 only - May 9 2:00s 1:00 S
Rule Denmark 1948 only - Aug 8 2:00s 0 -
#
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Europe/Copenhagen 0:50:20 - LMT 1890
0:50:20 - CMT 1894 Jan 1 # Copenhagen MT
1:00 Denmark CE%sT 1942 Nov 2 2:00s
1:00 C-Eur CE%sT 1945 Apr 2 2:00
1:00 Denmark CE%sT 1980
1:00 EU CE%sT
Zone Atlantic/Faroe -0:27:04 - LMT 1908 Jan 11 # Torshavn
0:00 - WET 1981
0:00 EU WE%sT
#
# From Paul Eggert (2004-10-31):
# During World War II, Germany maintained secret manned weather stations in
# East Greenland and Franz Josef Land, but we don't know their time zones.
# My source for this is Wilhelm Dege's book mentioned under Svalbard.
#
# From Paul Eggert (2006-03-22):
# Greenland joined the EU as part of Denmark, obtained home rule on 1979-05-01,
# and left the EU on 1985-02-01. It therefore should have been using EU
# rules at least through 1984. Shanks & Pottenger say Scoresbysund and Godthab
# used C-Eur rules after 1980, but IATA SSIM (1991/1996) says they use EU
# rules since at least 1991. Assume EU rules since 1980.
# From Gwillin Law (2001-06-06), citing
# <http://www.statkart.no/efs/efshefter/2001/efs5-2001.pdf> (2001-03-15),
# and with translations corrected by Steffen Thorsen:
#
# Greenland has four local times, and the relation to UTC
# is according to the following time line:
#
# The military zone near Thule UTC-4
# Standard Greenland time UTC-3
# Scoresbysund UTC-1
# Danmarkshavn UTC
#
# In the military area near Thule and in Danmarkshavn DST will not be
# introduced.
# From Rives McDow (2001-11-01):
#
# I correspond regularly with the Dansk Polarcenter, and wrote them at
# the time to clarify the situation in Thule. Unfortunately, I have
# not heard back from them regarding my recent letter. [But I have
# info from earlier correspondence.]
#
# According to the center, a very small local time zone around Thule
# Air Base keeps the time according to UTC-4, implementing daylight
# savings using North America rules, changing the time at 02:00 local time....
#
# The east coast of Greenland north of the community of Scoresbysund
# uses UTC in the same way as in Iceland, year round, with no dst.
# There are just a few stations on this coast, including the
# Danmarkshavn ICAO weather station mentioned in your September 29th
# email. The other stations are two sledge patrol stations in
# Mestersvig and Daneborg, the air force base at Station Nord, and the
# DPC research station at Zackenberg.
#
# Scoresbysund and two small villages nearby keep time UTC-1 and use
# the same daylight savings time period as in West Greenland (Godthab).
#
# The rest of Greenland, including Godthab (this area, although it
# includes central Greenland, is known as west Greenland), keeps time
# UTC-3, with daylight savings methods according to European rules.
#
# It is common procedure to use UTC 0 in the wilderness of East and
# North Greenland, because it is mainly Icelandic aircraft operators
# maintaining traffic in these areas. However, the official status of
# this area is that it sticks with Godthab time. This area might be
# considered a dual time zone in some respects because of this.
# From Rives McDow (2001-11-19):
# I heard back from someone stationed at Thule; the time change took place
# there at 2:00 AM.
# From Paul Eggert (2006-03-22):
# From 1997 on the CIA map shows Danmarkshavn on GMT;
# the 1995 map as like Godthab.
# For lack of better info, assume they were like Godthab before 1996.
# startkart.no says Thule does not observe DST, but this is clearly an error,
# so go with Shanks & Pottenger for Thule transitions until this year.
# For 2007 on assume Thule will stay in sync with US DST rules.
#
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Thule 1991 1992 - Mar lastSun 2:00 1:00 D
Rule Thule 1991 1992 - Sep lastSun 2:00 0 S
Rule Thule 1993 2006 - Apr Sun>=1 2:00 1:00 D
Rule Thule 1993 2006 - Oct lastSun 2:00 0 S
Rule Thule 2007 max - Mar Sun>=8 2:00 1:00 D
Rule Thule 2007 max - Nov Sun>=1 2:00 0 S
#
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Danmarkshavn -1:14:40 - LMT 1916 Jul 28
-3:00 - WGT 1980 Apr 6 2:00
-3:00 EU WG%sT 1996
0:00 - GMT
Zone America/Scoresbysund -1:27:52 - LMT 1916 Jul 28 # Ittoqqortoormiit
-2:00 - CGT 1980 Apr 6 2:00
-2:00 C-Eur CG%sT 1981 Mar 29
-1:00 EU EG%sT
Zone America/Godthab -3:26:56 - LMT 1916 Jul 28 # Nuuk
-3:00 - WGT 1980 Apr 6 2:00
-3:00 EU WG%sT
Zone America/Thule -4:35:08 - LMT 1916 Jul 28 # Pituffik air base
-4:00 Thule A%sT
# Estonia
# From Peter Ilieve (1994-10-15):
# A relative in Tallinn confirms the accuracy of the data for 1989 onwards
# [through 1994] and gives the legal authority for it,
# a regulation of the Government of Estonia, No. 111 of 1989....
#
# From Peter Ilieve (1996-10-28):
# [IATA SSIM (1992/1996) claims that the Baltic republics switch at 01:00s,
# but a relative confirms that Estonia still switches at 02:00s, writing:]
# ``I do not [know] exactly but there are some little different
# (confusing) rules for International Air and Railway Transport Schedules
# conversion in Sunday connected with end of summer time in Estonia....
# A discussion is running about the summer time efficiency and effect on
# human physiology. It seems that Estonia maybe will not change to
# summer time next spring.''
# From Peter Ilieve (1998-11-04), heavily edited:
# <a href="http://trip.rk.ee/cgi-bin/thw?${BASE}=akt&${OOHTML}=rtd&TA=1998&TO=1&AN=1390">
# The 1998-09-22 Estonian time law
# </a>
# refers to the Eighth Directive and cites the association agreement between
# the EU and Estonia, ratified by the Estonian law (RT II 1995, 22--27, 120).
#
# I also asked [my relative] whether they use any standard abbreviation
# for their standard and summer times. He says no, they use "suveaeg"
# (summer time) and "talveaeg" (winter time).
# From <a href="http://www.baltictimes.com/">The Baltic Times</a> (1999-09-09)
# via Steffen Thorsen:
# This year will mark the last time Estonia shifts to summer time,
# a council of the ruling coalition announced Sept. 6....
# But what this could mean for Estonia's chances of joining the European
# Union are still unclear. In 1994, the EU declared summer time compulsory
# for all member states until 2001. Brussels has yet to decide what to do
# after that.
# From Mart Oruaas (2000-01-29):
# Regulation no. 301 (1999-10-12) obsoletes previous regulation
# no. 206 (1998-09-22) and thus sticks Estonia to +02:00 GMT for all
# the year round. The regulation is effective 1999-11-01.
# From Toomas Soome (2002-02-21):
# The Estonian government has changed once again timezone politics.
# Now we are using again EU rules.
#
# From Urmet Jaanes (2002-03-28):
# The legislative reference is Government decree No. 84 on 2002-02-21.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Europe/Tallinn 1:39:00 - LMT 1880
1:39:00 - TMT 1918 Feb # Tallinn Mean Time
1:00 C-Eur CE%sT 1919 Jul
1:39:00 - TMT 1921 May
2:00 - EET 1940 Aug 6
3:00 - MSK 1941 Sep 15
1:00 C-Eur CE%sT 1944 Sep 22
3:00 Russia MSK/MSD 1989 Mar 26 2:00s
2:00 1:00 EEST 1989 Sep 24 2:00s
2:00 C-Eur EE%sT 1998 Sep 22
2:00 EU EE%sT 1999 Nov 1
2:00 - EET 2002 Feb 21
2:00 EU EE%sT
# Finland
# From Hannu Strang (1994-09-25 06:03:37 UTC):
# Well, here in Helsinki we're just changing from summer time to regular one,
# and it's supposed to change at 4am...
# From Janne Snabb (2010-0715):
#
# I noticed that the Finland data is not accurate for years 1981 and 1982.
# During these two first trial years the DST adjustment was made one hour
# earlier than in forthcoming years. Starting 1983 the adjustment was made
# according to the central European standards.
#
# This is documented in Heikki Oja: Aikakirja 2007, published by The Almanac
# Office of University of Helsinki, ISBN 952-10-3221-9, available online (in
# Finnish) at
#
# <a href="http://almanakka.helsinki.fi/aikakirja/Aikakirja2007kokonaan.pdf">
# http://almanakka.helsinki.fi/aikakirja/Aikakirja2007kokonaan.pdf
# </a>
#
# Page 105 (56 in PDF version) has a handy table of all past daylight savings
# transitions. It is easy enough to interpret without Finnish skills.
#
# This is also confirmed by Finnish Broadcasting Company's archive at:
#
# <a href="http://www.yle.fi/elavaarkisto/?s=s&g=1&ag=5&t=&a=3401">
# http://www.yle.fi/elavaarkisto/?s=s&g=1&ag=5&t=&a=3401
# </a>
#
# The news clip from 1981 says that "the time between 2 and 3 o'clock does not
# exist tonight."
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Finland 1942 only - Apr 3 0:00 1:00 S
Rule Finland 1942 only - Oct 3 0:00 0 -
Rule Finland 1981 1982 - Mar lastSun 2:00 1:00 S
Rule Finland 1981 1982 - Sep lastSun 3:00 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Europe/Helsinki 1:39:52 - LMT 1878 May 31
1:39:52 - HMT 1921 May # Helsinki Mean Time
2:00 Finland EE%sT 1983
2:00 EU EE%sT
# Aaland Is
Link Europe/Helsinki Europe/Mariehamn
# France
# From Ciro Discepolo (2000-12-20):
#
# Henri Le Corre, Regimes Horaires pour le monde entier, Editions
# Traditionnelles - Paris 2 books, 1993
#
# Gabriel, Traite de l'heure dans le monde, Guy Tredaniel editeur,
# Paris, 1991
#
# Francoise Gauquelin, Problemes de l'heure resolus en astrologie,
# Guy tredaniel, Paris 1987
#
# Shank & Pottenger seem to use `24:00' ambiguously; resolve it with Whitman.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule France 1916 only - Jun 14 23:00s 1:00 S
Rule France 1916 1919 - Oct Sun>=1 23:00s 0 -
Rule France 1917 only - Mar 24 23:00s 1:00 S
Rule France 1918 only - Mar 9 23:00s 1:00 S
Rule France 1919 only - Mar 1 23:00s 1:00 S
Rule France 1920 only - Feb 14 23:00s 1:00 S
Rule France 1920 only - Oct 23 23:00s 0 -
Rule France 1921 only - Mar 14 23:00s 1:00 S
Rule France 1921 only - Oct 25 23:00s 0 -
Rule France 1922 only - Mar 25 23:00s 1:00 S
# DSH writes that a law of 1923-05-24 specified 3rd Sat in Apr at 23:00 to 1st
# Sat in Oct at 24:00; and that in 1930, because of Easter, the transitions
# were Apr 12 and Oct 5. Go with Shanks & Pottenger.
Rule France 1922 1938 - Oct Sat>=1 23:00s 0 -
Rule France 1923 only - May 26 23:00s 1:00 S
Rule France 1924 only - Mar 29 23:00s 1:00 S
Rule France 1925 only - Apr 4 23:00s 1:00 S
Rule France 1926 only - Apr 17 23:00s 1:00 S
Rule France 1927 only - Apr 9 23:00s 1:00 S
Rule France 1928 only - Apr 14 23:00s 1:00 S
Rule France 1929 only - Apr 20 23:00s 1:00 S
Rule France 1930 only - Apr 12 23:00s 1:00 S
Rule France 1931 only - Apr 18 23:00s 1:00 S
Rule France 1932 only - Apr 2 23:00s 1:00 S
Rule France 1933 only - Mar 25 23:00s 1:00 S
Rule France 1934 only - Apr 7 23:00s 1:00 S
Rule France 1935 only - Mar 30 23:00s 1:00 S
Rule France 1936 only - Apr 18 23:00s 1:00 S
Rule France 1937 only - Apr 3 23:00s 1:00 S
Rule France 1938 only - Mar 26 23:00s 1:00 S
Rule France 1939 only - Apr 15 23:00s 1:00 S
Rule France 1939 only - Nov 18 23:00s 0 -
Rule France 1940 only - Feb 25 2:00 1:00 S
# The French rules for 1941-1944 were not used in Paris, but Shanks & Pottenger
# write that they were used in Monaco and in many French locations.
# Le Corre writes that the upper limit of the free zone was Arneguy, Orthez,
# Mont-de-Marsan, Bazas, Langon, Lamotte-Montravel, Marouil, La
# Rochefoucault, Champagne-Mouton, La Roche-Posay, La Haye-Descartes,
# Loches, Montrichard, Vierzon, Bourges, Moulins, Digoin,
# Paray-le-Monial, Montceau-les-Mines, Chalons-sur-Saone, Arbois,
# Dole, Morez, St-Claude, and Collonges (Haute-Savoie).
Rule France 1941 only - May 5 0:00 2:00 M # Midsummer
# Shanks & Pottenger say this transition occurred at Oct 6 1:00,
# but go with Denis Excoffier (1997-12-12),
# who quotes the Ephemerides Astronomiques for 1998 from Bureau des Longitudes
# as saying 5/10/41 22hUT.
Rule France 1941 only - Oct 6 0:00 1:00 S
Rule France 1942 only - Mar 9 0:00 2:00 M
Rule France 1942 only - Nov 2 3:00 1:00 S
Rule France 1943 only - Mar 29 2:00 2:00 M
Rule France 1943 only - Oct 4 3:00 1:00 S
Rule France 1944 only - Apr 3 2:00 2:00 M
Rule France 1944 only - Oct 8 1:00 1:00 S
Rule France 1945 only - Apr 2 2:00 2:00 M
Rule France 1945 only - Sep 16 3:00 0 -
# Shanks & Pottenger give Mar 28 2:00 and Sep 26 3:00;
# go with Excoffier's 28/3/76 0hUT and 25/9/76 23hUT.
Rule France 1976 only - Mar 28 1:00 1:00 S
Rule France 1976 only - Sep 26 1:00 0 -
# Shanks & Pottenger give 0:09:20 for Paris Mean Time, and Whitman 0:09:05,
# but Howse quotes the actual French legislation as saying 0:09:21.
# Go with Howse. Howse writes that the time in France was officially based
# on PMT-0:09:21 until 1978-08-09, when the time base finally switched to UTC.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Europe/Paris 0:09:21 - LMT 1891 Mar 15 0:01
0:09:21 - PMT 1911 Mar 11 0:01 # Paris MT
# Shanks & Pottenger give 1940 Jun 14 0:00; go with Excoffier and Le Corre.
0:00 France WE%sT 1940 Jun 14 23:00
# Le Corre says Paris stuck with occupied-France time after the liberation;
# go with Shanks & Pottenger.
1:00 C-Eur CE%sT 1944 Aug 25
0:00 France WE%sT 1945 Sep 16 3:00
1:00 France CE%sT 1977
1:00 EU CE%sT
# Germany
# From Markus Kuhn (1998-09-29):
# The German time zone web site by the Physikalisch-Technische
# Bundesanstalt contains DST information back to 1916.
# [See tz-link.htm for the URL.]
# From Joerg Schilling (2002-10-23):
# In 1945, Berlin was switched to Moscow Summer time (GMT+4) by
# <a href="http://www.dhm.de/lemo/html/biografien/BersarinNikolai/">
# General [Nikolai] Bersarin</a>.
# From Paul Eggert (2003-03-08):
# <a href="http://www.parlament-berlin.de/pds-fraktion.nsf/727459127c8b66ee8525662300459099/defc77cb784f180ac1256c2b0030274b/$FILE/bersarint.pdf">
# http://www.parlament-berlin.de/pds-fraktion.nsf/727459127c8b66ee8525662300459099/defc77cb784f180ac1256c2b0030274b/$FILE/bersarint.pdf
# </a>
# says that Bersarin issued an order to use Moscow time on May 20.
# However, Moscow did not observe daylight saving in 1945, so
# this was equivalent to CEMT (GMT+3), not GMT+4.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Germany 1946 only - Apr 14 2:00s 1:00 S
Rule Germany 1946 only - Oct 7 2:00s 0 -
Rule Germany 1947 1949 - Oct Sun>=1 2:00s 0 -
# http://www.ptb.de/de/org/4/44/441/salt.htm says the following transition
# occurred at 3:00 MEZ, not the 2:00 MEZ given in Shanks & Pottenger.
# Go with the PTB.
Rule Germany 1947 only - Apr 6 3:00s 1:00 S
Rule Germany 1947 only - May 11 2:00s 2:00 M
Rule Germany 1947 only - Jun 29 3:00 1:00 S
Rule Germany 1948 only - Apr 18 2:00s 1:00 S
Rule Germany 1949 only - Apr 10 2:00s 1:00 S
Rule SovietZone 1945 only - May 24 2:00 2:00 M # Midsummer
Rule SovietZone 1945 only - Sep 24 3:00 1:00 S
Rule SovietZone 1945 only - Nov 18 2:00s 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Europe/Berlin 0:53:28 - LMT 1893 Apr
1:00 C-Eur CE%sT 1945 May 24 2:00
1:00 SovietZone CE%sT 1946
1:00 Germany CE%sT 1980
1:00 EU CE%sT
# Georgia
# Please see the "asia" file for Asia/Tbilisi.
# Herodotus (Histories, IV.45) says Georgia north of the Phasis (now Rioni)
# is in Europe. Our reference location Tbilisi is in the Asian part.
# Gibraltar
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Europe/Gibraltar -0:21:24 - LMT 1880 Aug 2 0:00s
0:00 GB-Eire %s 1957 Apr 14 2:00
1:00 - CET 1982
1:00 EU CE%sT
# Greece
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
# Whitman gives 1932 Jul 5 - Nov 1; go with Shanks & Pottenger.
Rule Greece 1932 only - Jul 7 0:00 1:00 S
Rule Greece 1932 only - Sep 1 0:00 0 -
# Whitman gives 1941 Apr 25 - ?; go with Shanks & Pottenger.
Rule Greece 1941 only - Apr 7 0:00 1:00 S
# Whitman gives 1942 Feb 2 - ?; go with Shanks & Pottenger.
Rule Greece 1942 only - Nov 2 3:00 0 -
Rule Greece 1943 only - Mar 30 0:00 1:00 S
Rule Greece 1943 only - Oct 4 0:00 0 -
# Whitman gives 1944 Oct 3 - Oct 31; go with Shanks & Pottenger.
Rule Greece 1952 only - Jul 1 0:00 1:00 S
Rule Greece 1952 only - Nov 2 0:00 0 -
Rule Greece 1975 only - Apr 12 0:00s 1:00 S
Rule Greece 1975 only - Nov 26 0:00s 0 -
Rule Greece 1976 only - Apr 11 2:00s 1:00 S
Rule Greece 1976 only - Oct 10 2:00s 0 -
Rule Greece 1977 1978 - Apr Sun>=1 2:00s 1:00 S
Rule Greece 1977 only - Sep 26 2:00s 0 -
Rule Greece 1978 only - Sep 24 4:00 0 -
Rule Greece 1979 only - Apr 1 9:00 1:00 S
Rule Greece 1979 only - Sep 29 2:00 0 -
Rule Greece 1980 only - Apr 1 0:00 1:00 S
Rule Greece 1980 only - Sep 28 0:00 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Europe/Athens 1:34:52 - LMT 1895 Sep 14
1:34:52 - AMT 1916 Jul 28 0:01 # Athens MT
2:00 Greece EE%sT 1941 Apr 30
1:00 Greece CE%sT 1944 Apr 4
2:00 Greece EE%sT 1981
# Shanks & Pottenger say it switched to C-Eur in 1981;
# go with EU instead, since Greece joined it on Jan 1.
2:00 EU EE%sT
# Hungary
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Hungary 1918 only - Apr 1 3:00 1:00 S
Rule Hungary 1918 only - Sep 29 3:00 0 -
Rule Hungary 1919 only - Apr 15 3:00 1:00 S
Rule Hungary 1919 only - Sep 15 3:00 0 -
Rule Hungary 1920 only - Apr 5 3:00 1:00 S
Rule Hungary 1920 only - Sep 30 3:00 0 -
Rule Hungary 1945 only - May 1 23:00 1:00 S
Rule Hungary 1945 only - Nov 3 0:00 0 -
Rule Hungary 1946 only - Mar 31 2:00s 1:00 S
Rule Hungary 1946 1949 - Oct Sun>=1 2:00s 0 -
Rule Hungary 1947 1949 - Apr Sun>=4 2:00s 1:00 S
Rule Hungary 1950 only - Apr 17 2:00s 1:00 S
Rule Hungary 1950 only - Oct 23 2:00s 0 -
Rule Hungary 1954 1955 - May 23 0:00 1:00 S
Rule Hungary 1954 1955 - Oct 3 0:00 0 -
Rule Hungary 1956 only - Jun Sun>=1 0:00 1:00 S
Rule Hungary 1956 only - Sep lastSun 0:00 0 -
Rule Hungary 1957 only - Jun Sun>=1 1:00 1:00 S
Rule Hungary 1957 only - Sep lastSun 3:00 0 -
Rule Hungary 1980 only - Apr 6 1:00 1:00 S
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Europe/Budapest 1:16:20 - LMT 1890 Oct
1:00 C-Eur CE%sT 1918
1:00 Hungary CE%sT 1941 Apr 6 2:00
1:00 C-Eur CE%sT 1945
1:00 Hungary CE%sT 1980 Sep 28 2:00s
1:00 EU CE%sT
# Iceland
#
# From Adam David (1993-11-06):
# The name of the timezone in Iceland for system / mail / news purposes is GMT.
#
# (1993-12-05):
# This material is paraphrased from the 1988 edition of the University of
# Iceland Almanak.
#
# From January 1st, 1908 the whole of Iceland was standardised at 1 hour
# behind GMT. Previously, local mean solar time was used in different parts
# of Iceland, the almanak had been based on Reykjavik mean solar time which
# was 1 hour and 28 minutes behind GMT.
#
# "first day of winter" referred to [below] means the first day of the 26 weeks
# of winter, according to the old icelandic calendar that dates back to the
# time the norsemen first settled Iceland. The first day of winter is always
# Saturday, but is not dependent on the Julian or Gregorian calendars.
#
# (1993-12-10):
# I have a reference from the Oxford Icelandic-English dictionary for the
# beginning of winter, which ties it to the ecclesiastical calendar (and thus
# to the julian/gregorian calendar) over the period in question.
# the winter begins on the Saturday next before St. Luke's day
# (old style), or on St. Luke's day, if a Saturday.
# St. Luke's day ought to be traceable from ecclesiastical sources. "old style"
# might be a reference to the Julian calendar as opposed to Gregorian, or it
# might mean something else (???).
#
# From Paul Eggert (2006-03-22):
# The Iceland Almanak, Shanks & Pottenger, and Whitman disagree on many points.
# We go with the Almanak, except for one claim from Shanks & Pottenger, namely
# that Reykavik was 21W57 from 1837 to 1908, local mean time before that.
#
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Iceland 1917 1918 - Feb 19 23:00 1:00 S
Rule Iceland 1917 only - Oct 21 1:00 0 -
Rule Iceland 1918 only - Nov 16 1:00 0 -
Rule Iceland 1939 only - Apr 29 23:00 1:00 S
Rule Iceland 1939 only - Nov 29 2:00 0 -
Rule Iceland 1940 only - Feb 25 2:00 1:00 S
Rule Iceland 1940 only - Nov 3 2:00 0 -
Rule Iceland 1941 only - Mar 2 1:00s 1:00 S
Rule Iceland 1941 only - Nov 2 1:00s 0 -
Rule Iceland 1942 only - Mar 8 1:00s 1:00 S
Rule Iceland 1942 only - Oct 25 1:00s 0 -
# 1943-1946 - first Sunday in March until first Sunday in winter
Rule Iceland 1943 1946 - Mar Sun>=1 1:00s 1:00 S
Rule Iceland 1943 1948 - Oct Sun>=22 1:00s 0 -
# 1947-1967 - first Sunday in April until first Sunday in winter
Rule Iceland 1947 1967 - Apr Sun>=1 1:00s 1:00 S
# 1949 Oct transition delayed by 1 week
Rule Iceland 1949 only - Oct 30 1:00s 0 -
Rule Iceland 1950 1966 - Oct Sun>=22 1:00s 0 -
Rule Iceland 1967 only - Oct 29 1:00s 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Atlantic/Reykjavik -1:27:24 - LMT 1837
-1:27:48 - RMT 1908 # Reykjavik Mean Time?
-1:00 Iceland IS%sT 1968 Apr 7 1:00s
0:00 - GMT
# Italy
#
# From Paul Eggert (2001-03-06):
# Sicily and Sardinia each had their own time zones from 1866 to 1893,
# called Palermo Time (+00:53:28) and Cagliari Time (+00:36:32).
# During World War II, German-controlled Italy used German time.
# But these events all occurred before the 1970 cutoff,
# so record only the time in Rome.
#
# From Paul Eggert (2006-03-22):
# For Italian DST we have three sources: Shanks & Pottenger, Whitman, and
# F. Pollastri
# <a href="http://toi.iriti.cnr.it/uk/ienitlt.html">
# Day-light Saving Time in Italy (2006-02-03)
# </a>
# (`FP' below), taken from an Italian National Electrotechnical Institute
# publication. When the three sources disagree, guess who's right, as follows:
#
# year FP Shanks&P. (S) Whitman (W) Go with:
# 1916 06-03 06-03 24:00 06-03 00:00 FP & W
# 09-30 09-30 24:00 09-30 01:00 FP; guess 24:00s
# 1917 04-01 03-31 24:00 03-31 00:00 FP & S
# 09-30 09-29 24:00 09-30 01:00 FP & W
# 1918 03-09 03-09 24:00 03-09 00:00 FP & S
# 10-06 10-05 24:00 10-06 01:00 FP & W
# 1919 03-01 03-01 24:00 03-01 00:00 FP & S
# 10-04 10-04 24:00 10-04 01:00 FP; guess 24:00s
# 1920 03-20 03-20 24:00 03-20 00:00 FP & S
# 09-18 09-18 24:00 10-01 01:00 FP; guess 24:00s
# 1944 04-02 04-03 02:00 S (see C-Eur)
# 09-16 10-02 03:00 FP; guess 24:00s
# 1945 09-14 09-16 24:00 FP; guess 24:00s
# 1970 05-21 05-31 00:00 S
# 09-20 09-27 00:00 S
#
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Italy 1916 only - Jun 3 0:00s 1:00 S
Rule Italy 1916 only - Oct 1 0:00s 0 -
Rule Italy 1917 only - Apr 1 0:00s 1:00 S
Rule Italy 1917 only - Sep 30 0:00s 0 -
Rule Italy 1918 only - Mar 10 0:00s 1:00 S
Rule Italy 1918 1919 - Oct Sun>=1 0:00s 0 -
Rule Italy 1919 only - Mar 2 0:00s 1:00 S
Rule Italy 1920 only - Mar 21 0:00s 1:00 S
Rule Italy 1920 only - Sep 19 0:00s 0 -
Rule Italy 1940 only - Jun 15 0:00s 1:00 S
Rule Italy 1944 only - Sep 17 0:00s 0 -
Rule Italy 1945 only - Apr 2 2:00 1:00 S
Rule Italy 1945 only - Sep 15 0:00s 0 -
Rule Italy 1946 only - Mar 17 2:00s 1:00 S
Rule Italy 1946 only - Oct 6 2:00s 0 -
Rule Italy 1947 only - Mar 16 0:00s 1:00 S
Rule Italy 1947 only - Oct 5 0:00s 0 -
Rule Italy 1948 only - Feb 29 2:00s 1:00 S
Rule Italy 1948 only - Oct 3 2:00s 0 -
Rule Italy 1966 1968 - May Sun>=22 0:00 1:00 S
Rule Italy 1966 1969 - Sep Sun>=22 0:00 0 -
Rule Italy 1969 only - Jun 1 0:00 1:00 S
Rule Italy 1970 only - May 31 0:00 1:00 S
Rule Italy 1970 only - Sep lastSun 0:00 0 -
Rule Italy 1971 1972 - May Sun>=22 0:00 1:00 S
Rule Italy 1971 only - Sep lastSun 1:00 0 -
Rule Italy 1972 only - Oct 1 0:00 0 -
Rule Italy 1973 only - Jun 3 0:00 1:00 S
Rule Italy 1973 1974 - Sep lastSun 0:00 0 -
Rule Italy 1974 only - May 26 0:00 1:00 S
Rule Italy 1975 only - Jun 1 0:00s 1:00 S
Rule Italy 1975 1977 - Sep lastSun 0:00s 0 -
Rule Italy 1976 only - May 30 0:00s 1:00 S
Rule Italy 1977 1979 - May Sun>=22 0:00s 1:00 S
Rule Italy 1978 only - Oct 1 0:00s 0 -
Rule Italy 1979 only - Sep 30 0:00s 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Europe/Rome 0:49:56 - LMT 1866 Sep 22
0:49:56 - RMT 1893 Nov 1 0:00s # Rome Mean
1:00 Italy CE%sT 1942 Nov 2 2:00s
1:00 C-Eur CE%sT 1944 Jul
1:00 Italy CE%sT 1980
1:00 EU CE%sT
Link Europe/Rome Europe/Vatican
Link Europe/Rome Europe/San_Marino
# Latvia
# From Liene Kanepe (1998-09-17):
# I asked about this matter Scientific Secretary of the Institute of Astronomy
# of The University of Latvia Dr. paed Mr. Ilgonis Vilks. I also searched the
# correct data in juridical acts and I found some juridical documents about
# changes in the counting of time in Latvia from 1981....
#
# Act No.35 of the Council of Ministers of Latvian SSR of 1981-01-22 ...
# according to the Act No.925 of the Council of Ministers of USSR of 1980-10-24
# ...: all year round the time of 2nd time zone + 1 hour, in addition turning
# the hands of the clock 1 hour forward on 1 April at 00:00 (GMT 31 March 21:00)
# and 1 hour backward on the 1 October at 00:00 (GMT 30 September 20:00).
#
# Act No.592 of the Council of Ministers of Latvian SSR of 1984-09-24 ...
# according to the Act No.967 of the Council of Ministers of USSR of 1984-09-13
# ...: all year round the time of 2nd time zone + 1 hour, in addition turning
# the hands of the clock 1 hour forward on the last Sunday of March at 02:00
# (GMT 23:00 on the previous day) and 1 hour backward on the last Sunday of
# September at 03:00 (GMT 23:00 on the previous day).
#
# Act No.81 of the Council of Ministers of Latvian SSR of 1989-03-22 ...
# according to the Act No.227 of the Council of Ministers of USSR of 1989-03-14
# ...: since the last Sunday of March 1989 in Lithuanian SSR, Latvian SSR,
# Estonian SSR and Kaliningrad region of Russian Federation all year round the
# time of 2nd time zone (Moscow time minus one hour). On the territory of Latvia
# transition to summer time is performed on the last Sunday of March at 02:00
# (GMT 00:00), turning the hands of the clock 1 hour forward. The end of
# daylight saving time is performed on the last Sunday of September at 03:00
# (GMT 00:00), turning the hands of the clock 1 hour backward. Exception is
# 1989-03-26, when we must not turn the hands of the clock....
#
# The Regulations of the Cabinet of Ministers of the Republic of Latvia of
# 1997-01-21 on transition to Summer time ... established the same order of
# daylight savings time settings as in the States of the European Union.
# From Andrei Ivanov (2000-03-06):
# This year Latvia will not switch to Daylight Savings Time (as specified in
# <a href="http://www.lv-laiks.lv/wwwraksti/2000/071072/vd4.htm">
# The Regulations of the Cabinet of Ministers of the Rep. of Latvia of
# 29-Feb-2000 (#79)</a>, in Latvian for subscribers only).
# <a href="http://www.rferl.org/newsline/2001/01/3-CEE/cee-030101.html">
# From RFE/RL Newsline (2001-01-03), noted after a heads-up by Rives McDow:
# </a>
# The Latvian government on 2 January decided that the country will
# institute daylight-saving time this spring, LETA reported.
# Last February the three Baltic states decided not to turn back their
# clocks one hour in the spring....
# Minister of Economy Aigars Kalvitis noted that Latvia had too few
# daylight hours and thus decided to comply with a draft European
# Commission directive that provides for instituting daylight-saving
# time in EU countries between 2002 and 2006. The Latvian government
# urged Lithuania and Estonia to adopt a similar time policy, but it
# appears that they will not do so....
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Latvia 1989 1996 - Mar lastSun 2:00s 1:00 S
Rule Latvia 1989 1996 - Sep lastSun 2:00s 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Europe/Riga 1:36:24 - LMT 1880
1:36:24 - RMT 1918 Apr 15 2:00 #Riga Mean Time
1:36:24 1:00 LST 1918 Sep 16 3:00 #Latvian Summer
1:36:24 - RMT 1919 Apr 1 2:00
1:36:24 1:00 LST 1919 May 22 3:00
1:36:24 - RMT 1926 May 11
2:00 - EET 1940 Aug 5
3:00 - MSK 1941 Jul
1:00 C-Eur CE%sT 1944 Oct 13
3:00 Russia MSK/MSD 1989 Mar lastSun 2:00s
2:00 1:00 EEST 1989 Sep lastSun 2:00s
2:00 Latvia EE%sT 1997 Jan 21
2:00 EU EE%sT 2000 Feb 29
2:00 - EET 2001 Jan 2
2:00 EU EE%sT
# Liechtenstein
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Europe/Vaduz 0:38:04 - LMT 1894 Jun
1:00 - CET 1981
1:00 EU CE%sT
# Lithuania
# From Paul Eggert (1996-11-22):
# IATA SSIM (1992/1996) says Lithuania uses W-Eur rules, but since it is
# known to be wrong about Estonia and Latvia, assume it's wrong here too.
# From Marius Gedminas (1998-08-07):
# I would like to inform that in this year Lithuanian time zone
# (Europe/Vilnius) was changed.
# From <a href="http://www.elta.lt/">ELTA</a> No. 972 (2582) (1999-09-29),
# via Steffen Thorsen:
# Lithuania has shifted back to the second time zone (GMT plus two hours)
# to be valid here starting from October 31,
# as decided by the national government on Wednesday....
# The Lithuanian government also announced plans to consider a
# motion to give up shifting to summer time in spring, as it was
# already done by Estonia.
# From the <a href="http://www.tourism.lt/informa/ff.htm">
# Fact File, Lithuanian State Department of Tourism
# </a> (2000-03-27): Local time is GMT+2 hours ..., no daylight saving.
# From a user via Klaus Marten (2003-02-07):
# As a candidate for membership of the European Union, Lithuania will
# observe Summer Time in 2003, changing its clocks at the times laid
# down in EU Directive 2000/84 of 19.I.01 (i.e. at the same times as its
# neighbour Latvia). The text of the Lithuanian government Order of
# 7.XI.02 to this effect can be found at
# http://www.lrvk.lt/nut/11/n1749.htm
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Europe/Vilnius 1:41:16 - LMT 1880
1:24:00 - WMT 1917 # Warsaw Mean Time
1:35:36 - KMT 1919 Oct 10 # Kaunas Mean Time
1:00 - CET 1920 Jul 12
2:00 - EET 1920 Oct 9
1:00 - CET 1940 Aug 3
3:00 - MSK 1941 Jun 24
1:00 C-Eur CE%sT 1944 Aug
3:00 Russia MSK/MSD 1991 Mar 31 2:00s
2:00 1:00 EEST 1991 Sep 29 2:00s
2:00 C-Eur EE%sT 1998
2:00 - EET 1998 Mar 29 1:00u
1:00 EU CE%sT 1999 Oct 31 1:00u
2:00 - EET 2003 Jan 1
2:00 EU EE%sT
# Luxembourg
# Whitman disagrees with most of these dates in minor ways;
# go with Shanks & Pottenger.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Lux 1916 only - May 14 23:00 1:00 S
Rule Lux 1916 only - Oct 1 1:00 0 -
Rule Lux 1917 only - Apr 28 23:00 1:00 S
Rule Lux 1917 only - Sep 17 1:00 0 -
Rule Lux 1918 only - Apr Mon>=15 2:00s 1:00 S
Rule Lux 1918 only - Sep Mon>=15 2:00s 0 -
Rule Lux 1919 only - Mar 1 23:00 1:00 S
Rule Lux 1919 only - Oct 5 3:00 0 -
Rule Lux 1920 only - Feb 14 23:00 1:00 S
Rule Lux 1920 only - Oct 24 2:00 0 -
Rule Lux 1921 only - Mar 14 23:00 1:00 S
Rule Lux 1921 only - Oct 26 2:00 0 -
Rule Lux 1922 only - Mar 25 23:00 1:00 S
Rule Lux 1922 only - Oct Sun>=2 1:00 0 -
Rule Lux 1923 only - Apr 21 23:00 1:00 S
Rule Lux 1923 only - Oct Sun>=2 2:00 0 -
Rule Lux 1924 only - Mar 29 23:00 1:00 S
Rule Lux 1924 1928 - Oct Sun>=2 1:00 0 -
Rule Lux 1925 only - Apr 5 23:00 1:00 S
Rule Lux 1926 only - Apr 17 23:00 1:00 S
Rule Lux 1927 only - Apr 9 23:00 1:00 S
Rule Lux 1928 only - Apr 14 23:00 1:00 S
Rule Lux 1929 only - Apr 20 23:00 1:00 S
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Europe/Luxembourg 0:24:36 - LMT 1904 Jun
1:00 Lux CE%sT 1918 Nov 25
0:00 Lux WE%sT 1929 Oct 6 2:00s
0:00 Belgium WE%sT 1940 May 14 3:00
1:00 C-Eur WE%sT 1944 Sep 18 3:00
1:00 Belgium CE%sT 1977
1:00 EU CE%sT
# Macedonia
# see Serbia
# Malta
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Malta 1973 only - Mar 31 0:00s 1:00 S
Rule Malta 1973 only - Sep 29 0:00s 0 -
Rule Malta 1974 only - Apr 21 0:00s 1:00 S
Rule Malta 1974 only - Sep 16 0:00s 0 -
Rule Malta 1975 1979 - Apr Sun>=15 2:00 1:00 S
Rule Malta 1975 1980 - Sep Sun>=15 2:00 0 -
Rule Malta 1980 only - Mar 31 2:00 1:00 S
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Europe/Malta 0:58:04 - LMT 1893 Nov 2 0:00s # Valletta
1:00 Italy CE%sT 1942 Nov 2 2:00s
1:00 C-Eur CE%sT 1945 Apr 2 2:00s
1:00 Italy CE%sT 1973 Mar 31
1:00 Malta CE%sT 1981
1:00 EU CE%sT
# Moldova
# From Paul Eggert (2006-03-22):
# A previous version of this database followed Shanks & Pottenger, who write
# that Tiraspol switched to Moscow time on 1992-01-19 at 02:00.
# However, this is most likely an error, as Moldova declared independence
# on 1991-08-27 (the 1992-01-19 date is that of a Russian decree).
# In early 1992 there was large-scale interethnic violence in the area
# and it's possible that some Russophones continued to observe Moscow time.
# But [two people] separately reported via
# Jesper Norgaard that as of 2001-01-24 Tiraspol was like Chisinau.
# The Tiraspol entry has therefore been removed for now.
#
# From Alexander Krivenyshev (2011-10-17):
# Pridnestrovian Moldavian Republic (PMR, also known as
# "Pridnestrovie") has abolished seasonal clock change (no transition
# to the Winter Time).
#
# News (in Russian):
# <a href="http://www.kyivpost.ua/russia/news/pridnestrove-otkazalos-ot-perehoda-na-zimnee-vremya-30954.html">
# http://www.kyivpost.ua/russia/news/pridnestrove-otkazalos-ot-perehoda-na-zimnee-vremya-30954.html
# </a>
#
# <a href="http://www.allmoldova.com/moldova-news/1249064116.html">
# http://www.allmoldova.com/moldova-news/1249064116.html
# </a>
#
# The substance of this change (reinstatement of the Tiraspol entry)
# is from a patch from Petr Machata (2011-10-17)
#
# From Tim Parenti (2011-10-19)
# In addition, being situated at +4651+2938 would give Tiraspol
# a pre-1880 LMT offset of 1:58:32.
#
# (which agrees with the earlier entry that had been removed)
#
# From Alexander Krivenyshev (2011-10-26)
# NO need to divide Moldova into two timezones at this point.
# As of today, Transnistria (Pridnestrovie)- Tiraspol reversed its own
# decision to abolish DST this winter.
# Following Moldova and neighboring Ukraine- Transnistria (Pridnestrovie)-
# Tiraspol will go back to winter time on October 30, 2011.
# News from Moldova (in russian):
# <a href="http://ru.publika.md/link_317061.html">
# http://ru.publika.md/link_317061.html
# </a>
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Europe/Chisinau 1:55:20 - LMT 1880
1:55 - CMT 1918 Feb 15 # Chisinau MT
1:44:24 - BMT 1931 Jul 24 # Bucharest MT
2:00 Romania EE%sT 1940 Aug 15
2:00 1:00 EEST 1941 Jul 17
1:00 C-Eur CE%sT 1944 Aug 24
3:00 Russia MSK/MSD 1990
3:00 - MSK 1990 May 6
2:00 - EET 1991
2:00 Russia EE%sT 1992
2:00 E-Eur EE%sT 1997
# See Romania commentary for the guessed 1997 transition to EU rules.
2:00 EU EE%sT
# Monaco
# Shanks & Pottenger give 0:09:20 for Paris Mean Time; go with Howse's
# more precise 0:09:21.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Europe/Monaco 0:29:32 - LMT 1891 Mar 15
0:09:21 - PMT 1911 Mar 11 # Paris Mean Time
0:00 France WE%sT 1945 Sep 16 3:00
1:00 France CE%sT 1977
1:00 EU CE%sT
# Montenegro
# see Serbia
# Netherlands
# Howse writes that the Netherlands' railways used GMT between 1892 and 1940,
# but for other purposes the Netherlands used Amsterdam mean time.
# However, Robert H. van Gent writes (2001-04-01):
# Howse's statement is only correct up to 1909. From 1909-05-01 (00:00:00
# Amsterdam mean time) onwards, the whole of the Netherlands (including
# the Dutch railways) was required by law to observe Amsterdam mean time
# (19 minutes 32.13 seconds ahead of GMT). This had already been the
# common practice (except for the railways) for many decades but it was
# not until 1909 when the Dutch government finally defined this by law.
# On 1937-07-01 this was changed to 20 minutes (exactly) ahead of GMT and
# was generally known as Dutch Time ("Nederlandse Tijd").
#
# (2001-04-08):
# 1892-05-01 was the date when the Dutch railways were by law required to
# observe GMT while the remainder of the Netherlands adhered to the common
# practice of following Amsterdam mean time.
#
# (2001-04-09):
# In 1835 the authorities of the province of North Holland requested the
# municipal authorities of the towns and cities in the province to observe
# Amsterdam mean time but I do not know in how many cases this request was
# actually followed.
#
# From 1852 onwards the Dutch telegraph offices were by law required to
# observe Amsterdam mean time. As the time signals from the observatory of
# Leiden were also distributed by the telegraph system, I assume that most
# places linked up with the telegraph (and railway) system automatically
# adopted Amsterdam mean time.
#
# Although the early Dutch railway companies initially observed a variety
# of times, most of them had adopted Amsterdam mean time by 1858 but it
# was not until 1866 when they were all required by law to observe
# Amsterdam mean time.
# The data before 1945 are taken from
# <http://www.phys.uu.nl/~vgent/wettijd/wettijd.htm>.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Neth 1916 only - May 1 0:00 1:00 NST # Netherlands Summer Time
Rule Neth 1916 only - Oct 1 0:00 0 AMT # Amsterdam Mean Time
Rule Neth 1917 only - Apr 16 2:00s 1:00 NST
Rule Neth 1917 only - Sep 17 2:00s 0 AMT
Rule Neth 1918 1921 - Apr Mon>=1 2:00s 1:00 NST
Rule Neth 1918 1921 - Sep lastMon 2:00s 0 AMT
Rule Neth 1922 only - Mar lastSun 2:00s 1:00 NST
Rule Neth 1922 1936 - Oct Sun>=2 2:00s 0 AMT
Rule Neth 1923 only - Jun Fri>=1 2:00s 1:00 NST
Rule Neth 1924 only - Mar lastSun 2:00s 1:00 NST
Rule Neth 1925 only - Jun Fri>=1 2:00s 1:00 NST
# From 1926 through 1939 DST began 05-15, except that it was delayed by a week
# in years when 05-15 fell in the Pentecost weekend.
Rule Neth 1926 1931 - May 15 2:00s 1:00 NST
Rule Neth 1932 only - May 22 2:00s 1:00 NST
Rule Neth 1933 1936 - May 15 2:00s 1:00 NST
Rule Neth 1937 only - May 22 2:00s 1:00 NST
Rule Neth 1937 only - Jul 1 0:00 1:00 S
Rule Neth 1937 1939 - Oct Sun>=2 2:00s 0 -
Rule Neth 1938 1939 - May 15 2:00s 1:00 S
Rule Neth 1945 only - Apr 2 2:00s 1:00 S
Rule Neth 1945 only - Sep 16 2:00s 0 -
#
# Amsterdam Mean Time was +00:19:32.13 exactly, but the .13 is omitted
# below because the current format requires GMTOFF to be an integer.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Europe/Amsterdam 0:19:32 - LMT 1835
0:19:32 Neth %s 1937 Jul 1
0:20 Neth NE%sT 1940 May 16 0:00 # Dutch Time
1:00 C-Eur CE%sT 1945 Apr 2 2:00
1:00 Neth CE%sT 1977
1:00 EU CE%sT
# Norway
# http://met.no/met/met_lex/q_u/sommertid.html (2004-01) agrees with Shanks &
# Pottenger.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Norway 1916 only - May 22 1:00 1:00 S
Rule Norway 1916 only - Sep 30 0:00 0 -
Rule Norway 1945 only - Apr 2 2:00s 1:00 S
Rule Norway 1945 only - Oct 1 2:00s 0 -
Rule Norway 1959 1964 - Mar Sun>=15 2:00s 1:00 S
Rule Norway 1959 1965 - Sep Sun>=15 2:00s 0 -
Rule Norway 1965 only - Apr 25 2:00s 1:00 S
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Europe/Oslo 0:43:00 - LMT 1895 Jan 1
1:00 Norway CE%sT 1940 Aug 10 23:00
1:00 C-Eur CE%sT 1945 Apr 2 2:00
1:00 Norway CE%sT 1980
1:00 EU CE%sT
# Svalbard & Jan Mayen
# From Steffen Thorsen (2001-05-01):
# Although I could not find it explicitly, it seems that Jan Mayen and
# Svalbard have been using the same time as Norway at least since the
# time they were declared as parts of Norway. Svalbard was declared
# as a part of Norway by law of 1925-07-17 no 11, section 4 and Jan
# Mayen by law of 1930-02-27 no 2, section 2. (From
# http://www.lovdata.no/all/nl-19250717-011.html and
# http://www.lovdata.no/all/nl-19300227-002.html). The law/regulation
# for normal/standard time in Norway is from 1894-06-29 no 1 (came
# into operation on 1895-01-01) and Svalbard/Jan Mayen seem to be a
# part of this law since 1925/1930. (From
# http://www.lovdata.no/all/nl-18940629-001.html ) I have not been
# able to find if Jan Mayen used a different time zone (e.g. -0100)
# before 1930. Jan Mayen has only been "inhabitated" since 1921 by
# Norwegian meteorologists and maybe used the same time as Norway ever
# since 1921. Svalbard (Arctic/Longyearbyen) has been inhabited since
# before 1895, and therefore probably changed the local time somewhere
# between 1895 and 1925 (inclusive).
# From Paul Eggert (2001-05-01):
#
# Actually, Jan Mayen was never occupied by Germany during World War II,
# so it must have diverged from Oslo time during the war, as Oslo was
# keeping Berlin time.
#
# <http://home.no.net/janmayen/history.htm> says that the meteorologists
# burned down their station in 1940 and left the island, but returned in
# 1941 with a small Norwegian garrison and continued operations despite
# frequent air ttacks from Germans. In 1943 the Americans established a
# radiolocating station on the island, called "Atlantic City". Possibly
# the UTC offset changed during the war, but I think it unlikely that
# Jan Mayen used German daylight-saving rules.
#
# Svalbard is more complicated, as it was raided in August 1941 by an
# Allied party that evacuated the civilian population to England (says
# <http://www.bartleby.com/65/sv/Svalbard.html>). The Svalbard FAQ
# <http://www.svalbard.com/SvalbardFAQ.html> says that the Germans were
# expelled on 1942-05-14. However, small parties of Germans did return,
# and according to Wilhelm Dege's book "War North of 80" (1954)
# <http://www.ucalgary.ca/UofC/departments/UP/1-55238/1-55238-110-2.html>
# the German armed forces at the Svalbard weather station code-named
# Haudegen did not surrender to the Allies until September 1945.
#
# All these events predate our cutoff date of 1970. Unless we can
# come up with more definitive info about the timekeeping during the
# war years it's probably best just do...the following for now:
Link Europe/Oslo Arctic/Longyearbyen
# Poland
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Poland 1918 1919 - Sep 16 2:00s 0 -
Rule Poland 1919 only - Apr 15 2:00s 1:00 S
Rule Poland 1944 only - Apr 3 2:00s 1:00 S
# Whitman gives 1944 Nov 30; go with Shanks & Pottenger.
Rule Poland 1944 only - Oct 4 2:00 0 -
# For 1944-1948 Whitman gives the previous day; go with Shanks & Pottenger.
Rule Poland 1945 only - Apr 29 0:00 1:00 S
Rule Poland 1945 only - Nov 1 0:00 0 -
# For 1946 on the source is Kazimierz Borkowski,
# Torun Center for Astronomy, Dept. of Radio Astronomy, Nicolaus Copernicus U.,
# <http://www.astro.uni.torun.pl/~kb/Artykuly/U-PA/Czas2.htm#tth_tAb1>
# Thanks to Przemyslaw Augustyniak (2005-05-28) for this reference.
# He also gives these further references:
# Mon Pol nr 13, poz 162 (1995) <http://www.abc.com.pl/serwis/mp/1995/0162.htm>
# Druk nr 2180 (2003) <http://www.senat.gov.pl/k5/dok/sejm/053/2180.pdf>
Rule Poland 1946 only - Apr 14 0:00s 1:00 S
Rule Poland 1946 only - Oct 7 2:00s 0 -
Rule Poland 1947 only - May 4 2:00s 1:00 S
Rule Poland 1947 1949 - Oct Sun>=1 2:00s 0 -
Rule Poland 1948 only - Apr 18 2:00s 1:00 S
Rule Poland 1949 only - Apr 10 2:00s 1:00 S
Rule Poland 1957 only - Jun 2 1:00s 1:00 S
Rule Poland 1957 1958 - Sep lastSun 1:00s 0 -
Rule Poland 1958 only - Mar 30 1:00s 1:00 S
Rule Poland 1959 only - May 31 1:00s 1:00 S
Rule Poland 1959 1961 - Oct Sun>=1 1:00s 0 -
Rule Poland 1960 only - Apr 3 1:00s 1:00 S
Rule Poland 1961 1964 - May lastSun 1:00s 1:00 S
Rule Poland 1962 1964 - Sep lastSun 1:00s 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Europe/Warsaw 1:24:00 - LMT 1880
1:24:00 - WMT 1915 Aug 5 # Warsaw Mean Time
1:00 C-Eur CE%sT 1918 Sep 16 3:00
2:00 Poland EE%sT 1922 Jun
1:00 Poland CE%sT 1940 Jun 23 2:00
1:00 C-Eur CE%sT 1944 Oct
1:00 Poland CE%sT 1977
1:00 W-Eur CE%sT 1988
1:00 EU CE%sT
# Portugal
#
# From Rui Pedro Salgueiro (1992-11-12):
# Portugal has recently (September, 27) changed timezone
# (from WET to MET or CET) to harmonize with EEC.
#
# Martin Bruckmann (1996-02-29) reports via Peter Ilieve
# that Portugal is reverting to 0:00 by not moving its clocks this spring.
# The new Prime Minister was fed up with getting up in the dark in the winter.
#
# From Paul Eggert (1996-11-12):
# IATA SSIM (1991-09) reports several 1991-09 and 1992-09 transitions
# at 02:00u, not 01:00u. Assume that these are typos.
# IATA SSIM (1991/1992) reports that the Azores were at -1:00.
# IATA SSIM (1993-02) says +0:00; later issues (through 1996-09) say -1:00.
# Guess that the Azores changed to EU rules in 1992 (since that's when Portugal
# harmonized with the EU), and that they stayed +0:00 that winter.
#
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
# DSH writes that despite Decree 1,469 (1915), the change to the clocks was not
# done every year, depending on what Spain did, because of railroad schedules.
# Go with Shanks & Pottenger.
Rule Port 1916 only - Jun 17 23:00 1:00 S
# Whitman gives 1916 Oct 31; go with Shanks & Pottenger.
Rule Port 1916 only - Nov 1 1:00 0 -
Rule Port 1917 only - Feb 28 23:00s 1:00 S
Rule Port 1917 1921 - Oct 14 23:00s 0 -
Rule Port 1918 only - Mar 1 23:00s 1:00 S
Rule Port 1919 only - Feb 28 23:00s 1:00 S
Rule Port 1920 only - Feb 29 23:00s 1:00 S
Rule Port 1921 only - Feb 28 23:00s 1:00 S
Rule Port 1924 only - Apr 16 23:00s 1:00 S
Rule Port 1924 only - Oct 14 23:00s 0 -
Rule Port 1926 only - Apr 17 23:00s 1:00 S
Rule Port 1926 1929 - Oct Sat>=1 23:00s 0 -
Rule Port 1927 only - Apr 9 23:00s 1:00 S
Rule Port 1928 only - Apr 14 23:00s 1:00 S
Rule Port 1929 only - Apr 20 23:00s 1:00 S
Rule Port 1931 only - Apr 18 23:00s 1:00 S
# Whitman gives 1931 Oct 8; go with Shanks & Pottenger.
Rule Port 1931 1932 - Oct Sat>=1 23:00s 0 -
Rule Port 1932 only - Apr 2 23:00s 1:00 S
Rule Port 1934 only - Apr 7 23:00s 1:00 S
# Whitman gives 1934 Oct 5; go with Shanks & Pottenger.
Rule Port 1934 1938 - Oct Sat>=1 23:00s 0 -
# Shanks & Pottenger give 1935 Apr 30; go with Whitman.
Rule Port 1935 only - Mar 30 23:00s 1:00 S
Rule Port 1936 only - Apr 18 23:00s 1:00 S
# Whitman gives 1937 Apr 2; go with Shanks & Pottenger.
Rule Port 1937 only - Apr 3 23:00s 1:00 S
Rule Port 1938 only - Mar 26 23:00s 1:00 S
Rule Port 1939 only - Apr 15 23:00s 1:00 S
# Whitman gives 1939 Oct 7; go with Shanks & Pottenger.
Rule Port 1939 only - Nov 18 23:00s 0 -
Rule Port 1940 only - Feb 24 23:00s 1:00 S
# Shanks & Pottenger give 1940 Oct 7; go with Whitman.
Rule Port 1940 1941 - Oct 5 23:00s 0 -
Rule Port 1941 only - Apr 5 23:00s 1:00 S
Rule Port 1942 1945 - Mar Sat>=8 23:00s 1:00 S
Rule Port 1942 only - Apr 25 22:00s 2:00 M # Midsummer
Rule Port 1942 only - Aug 15 22:00s 1:00 S
Rule Port 1942 1945 - Oct Sat>=24 23:00s 0 -
Rule Port 1943 only - Apr 17 22:00s 2:00 M
Rule Port 1943 1945 - Aug Sat>=25 22:00s 1:00 S
Rule Port 1944 1945 - Apr Sat>=21 22:00s 2:00 M
Rule Port 1946 only - Apr Sat>=1 23:00s 1:00 S
Rule Port 1946 only - Oct Sat>=1 23:00s 0 -
Rule Port 1947 1949 - Apr Sun>=1 2:00s 1:00 S
Rule Port 1947 1949 - Oct Sun>=1 2:00s 0 -
# Shanks & Pottenger say DST was observed in 1950; go with Whitman.
# Whitman gives Oct lastSun for 1952 on; go with Shanks & Pottenger.
Rule Port 1951 1965 - Apr Sun>=1 2:00s 1:00 S
Rule Port 1951 1965 - Oct Sun>=1 2:00s 0 -
Rule Port 1977 only - Mar 27 0:00s 1:00 S
Rule Port 1977 only - Sep 25 0:00s 0 -
Rule Port 1978 1979 - Apr Sun>=1 0:00s 1:00 S
Rule Port 1978 only - Oct 1 0:00s 0 -
Rule Port 1979 1982 - Sep lastSun 1:00s 0 -
Rule Port 1980 only - Mar lastSun 0:00s 1:00 S
Rule Port 1981 1982 - Mar lastSun 1:00s 1:00 S
Rule Port 1983 only - Mar lastSun 2:00s 1:00 S
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
# Shanks & Pottenger say the transition from LMT to WET occurred 1911-05-24;
# Willett says 1912-01-01. Go with Willett.
Zone Europe/Lisbon -0:36:32 - LMT 1884
-0:36:32 - LMT 1912 Jan 1 # Lisbon Mean Time
0:00 Port WE%sT 1966 Apr 3 2:00
1:00 - CET 1976 Sep 26 1:00
0:00 Port WE%sT 1983 Sep 25 1:00s
0:00 W-Eur WE%sT 1992 Sep 27 1:00s
1:00 EU CE%sT 1996 Mar 31 1:00u
0:00 EU WE%sT
Zone Atlantic/Azores -1:42:40 - LMT 1884 # Ponta Delgada
-1:54:32 - HMT 1911 May 24 # Horta Mean Time
-2:00 Port AZO%sT 1966 Apr 3 2:00 # Azores Time
-1:00 Port AZO%sT 1983 Sep 25 1:00s
-1:00 W-Eur AZO%sT 1992 Sep 27 1:00s
0:00 EU WE%sT 1993 Mar 28 1:00u
-1:00 EU AZO%sT
Zone Atlantic/Madeira -1:07:36 - LMT 1884 # Funchal
-1:07:36 - FMT 1911 May 24 # Funchal Mean Time
-1:00 Port MAD%sT 1966 Apr 3 2:00 # Madeira Time
0:00 Port WE%sT 1983 Sep 25 1:00s
0:00 EU WE%sT
# Romania
#
# From Paul Eggert (1999-10-07):
# <a href="http://www.nineoclock.ro/POL/1778pol.html">
# Nine O'clock</a> (1998-10-23) reports that the switch occurred at
# 04:00 local time in fall 1998. For lack of better info,
# assume that Romania and Moldova switched to EU rules in 1997,
# the same year as Bulgaria.
#
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Romania 1932 only - May 21 0:00s 1:00 S
Rule Romania 1932 1939 - Oct Sun>=1 0:00s 0 -
Rule Romania 1933 1939 - Apr Sun>=2 0:00s 1:00 S
Rule Romania 1979 only - May 27 0:00 1:00 S
Rule Romania 1979 only - Sep lastSun 0:00 0 -
Rule Romania 1980 only - Apr 5 23:00 1:00 S
Rule Romania 1980 only - Sep lastSun 1:00 0 -
Rule Romania 1991 1993 - Mar lastSun 0:00s 1:00 S
Rule Romania 1991 1993 - Sep lastSun 0:00s 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Europe/Bucharest 1:44:24 - LMT 1891 Oct
1:44:24 - BMT 1931 Jul 24 # Bucharest MT
2:00 Romania EE%sT 1981 Mar 29 2:00s
2:00 C-Eur EE%sT 1991
2:00 Romania EE%sT 1994
2:00 E-Eur EE%sT 1997
2:00 EU EE%sT
# Russia
# From Paul Eggert (2006-03-22):
# Except for Moscow after 1919-07-01, I invented the time zone abbreviations.
# Moscow time zone abbreviations after 1919-07-01, and Moscow rules after 1991,
# are from Andrey A. Chernov. The rest is from Shanks & Pottenger,
# except we follow Chernov's report that 1992 DST transitions were Sat
# 23:00, not Sun 02:00s.
#
# From Stanislaw A. Kuzikowski (1994-06-29):
# But now it is some months since Novosibirsk is 3 hours ahead of Moscow!
# I do not know why they have decided to make this change;
# as far as I remember it was done exactly during winter->summer switching
# so we (Novosibirsk) simply did not switch.
#
# From Andrey A. Chernov (1996-10-04):
# `MSK' and `MSD' were born and used initially on Moscow computers with
# UNIX-like OSes by several developer groups (e.g. Demos group, Kiae group)....
# The next step was the UUCP network, the Relcom predecessor
# (used mainly for mail), and MSK/MSD was actively used there.
#
# From Chris Carrier (1996-10-30):
# According to a friend of mine who rode the Trans-Siberian Railroad from
# Moscow to Irkutsk in 1995, public air and rail transport in Russia ...
# still follows Moscow time, no matter where in Russia it is located.
#
# For Grozny, Chechnya, we have the following story from
# John Daniszewski, "Scavengers in the Rubble", Los Angeles Times (2001-02-07):
# News--often false--is spread by word of mouth. A rumor that it was
# time to move the clocks back put this whole city out of sync with
# the rest of Russia for two weeks--even soldiers stationed here began
# enforcing curfew at the wrong time.
#
# From Gwillim Law (2001-06-05):
# There's considerable evidence that Sakhalin Island used to be in
# UTC+11, and has changed to UTC+10, in this decade. I start with the
# SSIM, which listed Yuzhno-Sakhalinsk in zone RU10 along with Magadan
# until February 1997, and then in RU9 with Khabarovsk and Vladivostok
# since September 1997.... Although the Kuril Islands are
# administratively part of Sakhalin oblast', they appear to have
# remained on UTC+11 along with Magadan.
#
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
#
# Kaliningradskaya oblast'.
Zone Europe/Kaliningrad 1:22:00 - LMT 1893 Apr
1:00 C-Eur CE%sT 1945
2:00 Poland CE%sT 1946
3:00 Russia MSK/MSD 1991 Mar 31 2:00s
2:00 Russia EE%sT 2011 Mar 27 2:00s
3:00 - FET # Further-eastern European Time
#
# From Oscar van Vlijmen (2001-08-25): [This region consists of]
# Respublika Adygeya, Arkhangel'skaya oblast',
# Belgorodskaya oblast', Bryanskaya oblast', Vladimirskaya oblast',
# Vologodskaya oblast', Voronezhskaya oblast',
# Respublika Dagestan, Ivanovskaya oblast', Respublika Ingushetiya,
# Kabarbino-Balkarskaya Respublika, Respublika Kalmykiya,
# Kalyzhskaya oblast', Respublika Karachaevo-Cherkessiya,
# Respublika Kareliya, Respublika Komi,
# Kostromskaya oblast', Krasnodarskij kraj, Kurskaya oblast',
# Leningradskaya oblast', Lipetskaya oblast', Respublika Marij El,
# Respublika Mordoviya, Moskva, Moskovskaya oblast',
# Murmanskaya oblast', Nenetskij avtonomnyj okrug,
# Nizhegorodskaya oblast', Novgorodskaya oblast', Orlovskaya oblast',
# Penzenskaya oblast', Pskovskaya oblast', Rostovskaya oblast',
# Ryazanskaya oblast', Sankt-Peterburg,
# Respublika Severnaya Osetiya, Smolenskaya oblast',
# Stavropol'skij kraj, Tambovskaya oblast', Respublika Tatarstan,
# Tverskaya oblast', Tyl'skaya oblast', Ul'yanovskaya oblast',
# Chechenskaya Respublika, Chuvashskaya oblast',
# Yaroslavskaya oblast'
Zone Europe/Moscow 2:30:20 - LMT 1880
2:30 - MMT 1916 Jul 3 # Moscow Mean Time
2:30:48 Russia %s 1919 Jul 1 2:00
3:00 Russia MSK/MSD 1922 Oct
2:00 - EET 1930 Jun 21
3:00 Russia MSK/MSD 1991 Mar 31 2:00s
2:00 Russia EE%sT 1992 Jan 19 2:00s
3:00 Russia MSK/MSD 2011 Mar 27 2:00s
4:00 - MSK
#
# Astrakhanskaya oblast', Kirovskaya oblast', Saratovskaya oblast',
# Volgogradskaya oblast'. Shanks & Pottenger say Kirov is still at +0400
# but Wikipedia (2006-05-09) says +0300. Perhaps it switched after the
# others? But we have no data.
Zone Europe/Volgograd 2:57:40 - LMT 1920 Jan 3
3:00 - TSAT 1925 Apr 6 # Tsaritsyn Time
3:00 - STAT 1930 Jun 21 # Stalingrad Time
4:00 - STAT 1961 Nov 11
4:00 Russia VOL%sT 1989 Mar 26 2:00s # Volgograd T
3:00 Russia VOL%sT 1991 Mar 31 2:00s
4:00 - VOLT 1992 Mar 29 2:00s
3:00 Russia VOL%sT 2011 Mar 27 2:00s
4:00 - VOLT
#
# From Oscar van Vlijmen (2001-08-25): [This region consists of]
# Samarskaya oblast', Udmyrtskaya respublika
Zone Europe/Samara 3:20:36 - LMT 1919 Jul 1 2:00
3:00 - SAMT 1930 Jun 21
4:00 - SAMT 1935 Jan 27
4:00 Russia KUY%sT 1989 Mar 26 2:00s # Kuybyshev
3:00 Russia KUY%sT 1991 Mar 31 2:00s
2:00 Russia KUY%sT 1991 Sep 29 2:00s
3:00 - KUYT 1991 Oct 20 3:00
4:00 Russia SAM%sT 2010 Mar 28 2:00s # Samara Time
3:00 Russia SAM%sT 2011 Mar 27 2:00s
4:00 - SAMT
#
# From Oscar van Vlijmen (2001-08-25): [This region consists of]
# Respublika Bashkortostan, Komi-Permyatskij avtonomnyj okrug,
# Kurganskaya oblast', Orenburgskaya oblast', Permskaya oblast',
# Sverdlovskaya oblast', Tyumenskaya oblast',
# Khanty-Manskijskij avtonomnyj okrug, Chelyabinskaya oblast',
# Yamalo-Nenetskij avtonomnyj okrug.
Zone Asia/Yekaterinburg 4:02:24 - LMT 1919 Jul 15 4:00
4:00 - SVET 1930 Jun 21 # Sverdlovsk Time
5:00 Russia SVE%sT 1991 Mar 31 2:00s
4:00 Russia SVE%sT 1992 Jan 19 2:00s
5:00 Russia YEK%sT 2011 Mar 27 2:00s
6:00 - YEKT # Yekaterinburg Time
#
# From Oscar van Vlijmen (2001-08-25): [This region consists of]
# Respublika Altaj, Altajskij kraj, Omskaya oblast'.
Zone Asia/Omsk 4:53:36 - LMT 1919 Nov 14
5:00 - OMST 1930 Jun 21 # Omsk TIme
6:00 Russia OMS%sT 1991 Mar 31 2:00s
5:00 Russia OMS%sT 1992 Jan 19 2:00s
6:00 Russia OMS%sT 2011 Mar 27 2:00s
7:00 - OMST
#
# From Paul Eggert (2006-08-19): I'm guessing about Tomsk here; it's
# not clear when it switched from +7 to +6.
# Novosibirskaya oblast', Tomskaya oblast'.
Zone Asia/Novosibirsk 5:31:40 - LMT 1919 Dec 14 6:00
6:00 - NOVT 1930 Jun 21 # Novosibirsk Time
7:00 Russia NOV%sT 1991 Mar 31 2:00s
6:00 Russia NOV%sT 1992 Jan 19 2:00s
7:00 Russia NOV%sT 1993 May 23 # say Shanks & P.
6:00 Russia NOV%sT 2011 Mar 27 2:00s
7:00 - NOVT
# From Alexander Krivenyshev (2009-10-13):
# Kemerovo oblast' (Kemerovo region) in Russia will change current time zone on
# March 28, 2010:
# from current Russia Zone 6 - Krasnoyarsk Time Zone (KRA) UTC +0700
# to Russia Zone 5 - Novosibirsk Time Zone (NOV) UTC +0600
#
# This is according to Government of Russia decree # 740, on September
# 14, 2009 "Application in the territory of the Kemerovo region the Fifth
# time zone." ("Russia Zone 5" or old "USSR Zone 5" is GMT +0600)
#
# Russian Government web site (Russian language)
# <a href="http://www.government.ru/content/governmentactivity/rfgovernmentdecisions/archiv">
# http://www.government.ru/content/governmentactivity/rfgovernmentdecisions/archive/2009/09/14/991633.htm
# </a>
# or Russian-English translation by WorldTimeZone.com with reference
# map to local region and new Russia Time Zone map after March 28, 2010
# <a href="http://www.worldtimezone.com/dst_news/dst_news_russia03.html">
# http://www.worldtimezone.com/dst_news/dst_news_russia03.html
# </a>
#
# Thus, when Russia will switch to DST on the night of March 28, 2010
# Kemerovo region (Kemerovo oblast') will not change the clock.
#
# As a result, Kemerovo oblast' will be in the same time zone as
# Novosibirsk, Omsk, Tomsk, Barnaul and Altai Republic.
Zone Asia/Novokuznetsk 5:48:48 - NMT 1920 Jan 6
6:00 - KRAT 1930 Jun 21 # Krasnoyarsk Time
7:00 Russia KRA%sT 1991 Mar 31 2:00s
6:00 Russia KRA%sT 1992 Jan 19 2:00s
7:00 Russia KRA%sT 2010 Mar 28 2:00s
6:00 Russia NOV%sT 2011 Mar 27 2:00s
7:00 - NOVT # Novosibirsk/Novokuznetsk Time
#
# From Oscar van Vlijmen (2001-08-25): [This region consists of]
# Krasnoyarskij kraj,
# Tajmyrskij (Dolgano-Nenetskij) avtonomnyj okrug,
# Respublika Tuva, Respublika Khakasiya, Evenkijskij avtonomnyj okrug.
Zone Asia/Krasnoyarsk 6:11:20 - LMT 1920 Jan 6
6:00 - KRAT 1930 Jun 21 # Krasnoyarsk Time
7:00 Russia KRA%sT 1991 Mar 31 2:00s
6:00 Russia KRA%sT 1992 Jan 19 2:00s
7:00 Russia KRA%sT 2011 Mar 27 2:00s
8:00 - KRAT
#
# From Oscar van Vlijmen (2001-08-25): [This region consists of]
# Respublika Buryatiya, Irkutskaya oblast',
# Ust'-Ordynskij Buryatskij avtonomnyj okrug.
Zone Asia/Irkutsk 6:57:20 - LMT 1880
6:57:20 - IMT 1920 Jan 25 # Irkutsk Mean Time
7:00 - IRKT 1930 Jun 21 # Irkutsk Time
8:00 Russia IRK%sT 1991 Mar 31 2:00s
7:00 Russia IRK%sT 1992 Jan 19 2:00s
8:00 Russia IRK%sT 2011 Mar 27 2:00s
9:00 - IRKT
#
# From Oscar van Vlijmen (2003-10-18): [This region consists of]
# Aginskij Buryatskij avtonomnyj okrug, Amurskaya oblast',
# [parts of] Respublika Sakha (Yakutiya), Chitinskaya oblast'.
# From Oscar van Vlijmen (2009-11-29):
# ...some regions of [Russia] were merged with others since 2005...
# Some names were changed, no big deal, except for one instance: a new name.
# YAK/YAKST: UTC+9 Zabajkal'skij kraj.
# From Oscar van Vlijmen (2009-11-29):
# The Sakha districts are: Aldanskij, Amginskij, Anabarskij,
# Verkhnevilyujskij, Vilyujskij, Gornyj,
# Zhiganskij, Kobyajskij, Lenskij, Megino-Kangalasskij, Mirninskij,
# Namskij, Nyurbinskij, Olenyokskij, Olyokminskij,
# Suntarskij, Tattinskij, Ust'-Aldanskij, Khangalasskij,
# Churapchinskij, Eveno-Bytantajskij Natsional'nij.
Zone Asia/Yakutsk 8:38:40 - LMT 1919 Dec 15
8:00 - YAKT 1930 Jun 21 # Yakutsk Time
9:00 Russia YAK%sT 1991 Mar 31 2:00s
8:00 Russia YAK%sT 1992 Jan 19 2:00s
9:00 Russia YAK%sT 2011 Mar 27 2:00s
10:00 - YAKT
#
# From Oscar van Vlijmen (2003-10-18): [This region consists of]
# Evrejskaya avtonomnaya oblast', Khabarovskij kraj, Primorskij kraj,
# [parts of] Respublika Sakha (Yakutiya).
# From Oscar van Vlijmen (2009-11-29):
# The Sakha districts are: Bulunskij, Verkhoyanskij, Tomponskij, Ust'-Majskij,
# Ust'-Yanskij.
Zone Asia/Vladivostok 8:47:44 - LMT 1922 Nov 15
9:00 - VLAT 1930 Jun 21 # Vladivostok Time
10:00 Russia VLA%sT 1991 Mar 31 2:00s
9:00 Russia VLA%sST 1992 Jan 19 2:00s
10:00 Russia VLA%sT 2011 Mar 27 2:00s
11:00 - VLAT
#
# Sakhalinskaya oblast'.
# The Zone name should be Yuzhno-Sakhalinsk, but that's too long.
Zone Asia/Sakhalin 9:30:48 - LMT 1905 Aug 23
9:00 - CJT 1938
9:00 - JST 1945 Aug 25
11:00 Russia SAK%sT 1991 Mar 31 2:00s # Sakhalin T.
10:00 Russia SAK%sT 1992 Jan 19 2:00s
11:00 Russia SAK%sT 1997 Mar lastSun 2:00s
10:00 Russia SAK%sT 2011 Mar 27 2:00s
11:00 - SAKT
#
# From Oscar van Vlijmen (2003-10-18): [This region consists of]
# Magadanskaya oblast', Respublika Sakha (Yakutiya).
# Probably also: Kuril Islands.
# From Oscar van Vlijmen (2009-11-29):
# The Sakha districts are: Abyjskij, Allaikhovskij, Verkhhhnekolymskij, Momskij,
# Nizhnekolymskij, Ojmyakonskij, Srednekolymskij.
Zone Asia/Magadan 10:03:12 - LMT 1924 May 2
10:00 - MAGT 1930 Jun 21 # Magadan Time
11:00 Russia MAG%sT 1991 Mar 31 2:00s
10:00 Russia MAG%sT 1992 Jan 19 2:00s
11:00 Russia MAG%sT 2011 Mar 27 2:00s
12:00 - MAGT
#
# From Oscar van Vlijmen (2001-08-25): [This region consists of]
# Kamchatskaya oblast', Koryakskij avtonomnyj okrug.
#
# The Zone name should be Asia/Petropavlovsk-Kamchatski, but that's too long.
Zone Asia/Kamchatka 10:34:36 - LMT 1922 Nov 10
11:00 - PETT 1930 Jun 21 # P-K Time
12:00 Russia PET%sT 1991 Mar 31 2:00s
11:00 Russia PET%sT 1992 Jan 19 2:00s
12:00 Russia PET%sT 2010 Mar 28 2:00s
11:00 Russia PET%sT 2011 Mar 27 2:00s
12:00 - PETT
#
# Chukotskij avtonomnyj okrug
Zone Asia/Anadyr 11:49:56 - LMT 1924 May 2
12:00 - ANAT 1930 Jun 21 # Anadyr Time
13:00 Russia ANA%sT 1982 Apr 1 0:00s
12:00 Russia ANA%sT 1991 Mar 31 2:00s
11:00 Russia ANA%sT 1992 Jan 19 2:00s
12:00 Russia ANA%sT 2010 Mar 28 2:00s
11:00 Russia ANA%sT 2011 Mar 27 2:00s
12:00 - ANAT
# Serbia
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Europe/Belgrade 1:22:00 - LMT 1884
1:00 - CET 1941 Apr 18 23:00
1:00 C-Eur CE%sT 1945
1:00 - CET 1945 May 8 2:00s
1:00 1:00 CEST 1945 Sep 16 2:00s
# Metod Kozelj reports that the legal date of
# transition to EU rules was 1982-11-27, for all of Yugoslavia at the time.
# Shanks & Pottenger don't give as much detail, so go with Kozelj.
1:00 - CET 1982 Nov 27
1:00 EU CE%sT
Link Europe/Belgrade Europe/Ljubljana # Slovenia
Link Europe/Belgrade Europe/Podgorica # Montenegro
Link Europe/Belgrade Europe/Sarajevo # Bosnia and Herzegovina
Link Europe/Belgrade Europe/Skopje # Macedonia
Link Europe/Belgrade Europe/Zagreb # Croatia
# Slovakia
Link Europe/Prague Europe/Bratislava
# Slovenia
# see Serbia
# Spain
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
# For 1917-1919 Whitman gives Apr Sat>=1 - Oct Sat>=1;
# go with Shanks & Pottenger.
Rule Spain 1917 only - May 5 23:00s 1:00 S
Rule Spain 1917 1919 - Oct 6 23:00s 0 -
Rule Spain 1918 only - Apr 15 23:00s 1:00 S
Rule Spain 1919 only - Apr 5 23:00s 1:00 S
# Whitman gives 1921 Feb 28 - Oct 14; go with Shanks & Pottenger.
Rule Spain 1924 only - Apr 16 23:00s 1:00 S
# Whitman gives 1924 Oct 14; go with Shanks & Pottenger.
Rule Spain 1924 only - Oct 4 23:00s 0 -
Rule Spain 1926 only - Apr 17 23:00s 1:00 S
# Whitman says no DST in 1929; go with Shanks & Pottenger.
Rule Spain 1926 1929 - Oct Sat>=1 23:00s 0 -
Rule Spain 1927 only - Apr 9 23:00s 1:00 S
Rule Spain 1928 only - Apr 14 23:00s 1:00 S
Rule Spain 1929 only - Apr 20 23:00s 1:00 S
# Whitman gives 1937 Jun 16, 1938 Apr 16, 1940 Apr 13;
# go with Shanks & Pottenger.
Rule Spain 1937 only - May 22 23:00s 1:00 S
Rule Spain 1937 1939 - Oct Sat>=1 23:00s 0 -
Rule Spain 1938 only - Mar 22 23:00s 1:00 S
Rule Spain 1939 only - Apr 15 23:00s 1:00 S
Rule Spain 1940 only - Mar 16 23:00s 1:00 S
# Whitman says no DST 1942-1945; go with Shanks & Pottenger.
Rule Spain 1942 only - May 2 22:00s 2:00 M # Midsummer
Rule Spain 1942 only - Sep 1 22:00s 1:00 S
Rule Spain 1943 1946 - Apr Sat>=13 22:00s 2:00 M
Rule Spain 1943 only - Oct 3 22:00s 1:00 S
Rule Spain 1944 only - Oct 10 22:00s 1:00 S
Rule Spain 1945 only - Sep 30 1:00 1:00 S
Rule Spain 1946 only - Sep 30 0:00 0 -
Rule Spain 1949 only - Apr 30 23:00 1:00 S
Rule Spain 1949 only - Sep 30 1:00 0 -
Rule Spain 1974 1975 - Apr Sat>=13 23:00 1:00 S
Rule Spain 1974 1975 - Oct Sun>=1 1:00 0 -
Rule Spain 1976 only - Mar 27 23:00 1:00 S
Rule Spain 1976 1977 - Sep lastSun 1:00 0 -
Rule Spain 1977 1978 - Apr 2 23:00 1:00 S
Rule Spain 1978 only - Oct 1 1:00 0 -
# The following rules are copied from Morocco from 1967 through 1978.
Rule SpainAfrica 1967 only - Jun 3 12:00 1:00 S
Rule SpainAfrica 1967 only - Oct 1 0:00 0 -
Rule SpainAfrica 1974 only - Jun 24 0:00 1:00 S
Rule SpainAfrica 1974 only - Sep 1 0:00 0 -
Rule SpainAfrica 1976 1977 - May 1 0:00 1:00 S
Rule SpainAfrica 1976 only - Aug 1 0:00 0 -
Rule SpainAfrica 1977 only - Sep 28 0:00 0 -
Rule SpainAfrica 1978 only - Jun 1 0:00 1:00 S
Rule SpainAfrica 1978 only - Aug 4 0:00 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Europe/Madrid -0:14:44 - LMT 1901 Jan 1 0:00s
0:00 Spain WE%sT 1946 Sep 30
1:00 Spain CE%sT 1979
1:00 EU CE%sT
Zone Africa/Ceuta -0:21:16 - LMT 1901
0:00 - WET 1918 May 6 23:00
0:00 1:00 WEST 1918 Oct 7 23:00
0:00 - WET 1924
0:00 Spain WE%sT 1929
0:00 SpainAfrica WE%sT 1984 Mar 16
1:00 - CET 1986
1:00 EU CE%sT
Zone Atlantic/Canary -1:01:36 - LMT 1922 Mar # Las Palmas de Gran C.
-1:00 - CANT 1946 Sep 30 1:00 # Canaries Time
0:00 - WET 1980 Apr 6 0:00s
0:00 1:00 WEST 1980 Sep 28 0:00s
0:00 EU WE%sT
# IATA SSIM (1996-09) says the Canaries switch at 2:00u, not 1:00u.
# Ignore this for now, as the Canaries are part of the EU.
# Sweden
# From Ivan Nilsson (2001-04-13), superseding Shanks & Pottenger:
#
# The law "Svensk forfattningssamling 1878, no 14" about standard time in 1879:
# From the beginning of 1879 (that is 01-01 00:00) the time for all
# places in the country is "the mean solar time for the meridian at
# three degrees, or twelve minutes of time, to the west of the
# meridian of the Observatory of Stockholm". The law is dated 1878-05-31.
#
# The observatory at that time had the meridian 18 degrees 03' 30"
# eastern longitude = 01:12:14 in time. Less 12 minutes gives the
# national standard time as 01:00:14 ahead of GMT....
#
# About the beginning of CET in Sweden. The lawtext ("Svensk
# forfattningssamling 1899, no 44") states, that "from the beginning
# of 1900... ... the same as the mean solar time for the meridian at
# the distance of one hour of time from the meridian of the English
# observatory at Greenwich, or at 12 minutes 14 seconds to the west
# from the meridian of the Observatory of Stockholm". The law is dated
# 1899-06-16. In short: At 1900-01-01 00:00:00 the new standard time
# in Sweden is 01:00:00 ahead of GMT.
#
# 1916: The lawtext ("Svensk forfattningssamling 1916, no 124") states
# that "1916-05-15 is considered to begin one hour earlier". It is
# pretty obvious that at 05-14 23:00 the clocks are set to 05-15 00:00....
# Further the law says, that "1916-09-30 is considered to end one hour later".
#
# The laws regulating [DST] are available on the site of the Swedish
# Parliament beginning with 1985 - the laws regulating 1980/1984 are
# not available on the site (to my knowledge they are only available
# in Swedish): <http://www.riksdagen.se/english/work/sfst.asp> (type
# "sommartid" without the quotes in the field "Fritext" and then click
# the Sok-button).
#
# (2001-05-13):
#
# I have now found a newspaper stating that at 1916-10-01 01:00
# summertime the church-clocks etc were set back one hour to show
# 1916-10-01 00:00 standard time. The article also reports that some
# people thought the switch to standard time would take place already
# at 1916-10-01 00:00 summer time, but they had to wait for another
# hour before the event took place.
#
# Source: The newspaper "Dagens Nyheter", 1916-10-01, page 7 upper left.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Europe/Stockholm 1:12:12 - LMT 1879 Jan 1
1:00:14 - SET 1900 Jan 1 # Swedish Time
1:00 - CET 1916 May 14 23:00
1:00 1:00 CEST 1916 Oct 1 01:00
1:00 - CET 1980
1:00 EU CE%sT
# Switzerland
# From Howse:
# By the end of the 18th century clocks and watches became commonplace
# and their performance improved enormously. Communities began to keep
# mean time in preference to apparent time -- Geneva from 1780 ....
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
# From Whitman (who writes ``Midnight?''):
# Rule Swiss 1940 only - Nov 2 0:00 1:00 S
# Rule Swiss 1940 only - Dec 31 0:00 0 -
# From Shanks & Pottenger:
# Rule Swiss 1941 1942 - May Sun>=1 2:00 1:00 S
# Rule Swiss 1941 1942 - Oct Sun>=1 0:00 0 -
# From Alois Treindl (2008-12-17):
# I have researched the DST usage in Switzerland during the 1940ies.
#
# As I wrote in an earlier message, I suspected the current tzdata values
# to be wrong. This is now verified.
#
# I have found copies of the original ruling by the Swiss Federal
# government, in 'Eidgen[o]ssische Gesetzessammlung 1941 and 1942' (Swiss
# federal law collection)...
#
# DST began on Monday 5 May 1941, 1:00 am by shifting the clocks to 2:00 am
# DST ended on Monday 6 Oct 1941, 2:00 am by shifting the clocks to 1:00 am.
#
# DST began on Monday, 4 May 1942 at 01:00 am
# DST ended on Monday, 5 Oct 1942 at 02:00 am
#
# There was no DST in 1940, I have checked the law collection carefully.
# It is also indicated by the fact that the 1942 entry in the law
# collection points back to 1941 as a reference, but no reference to any
# other years are made.
#
# Newspaper articles I have read in the archives on 6 May 1941 reported
# about the introduction of DST (Sommerzeit in German) during the previous
# night as an absolute novelty, because this was the first time that such
# a thing had happened in Switzerland.
#
# I have also checked 1916, because one book source (Gabriel, Traite de
# l'heure dans le monde) claims that Switzerland had DST in 1916. This is
# false, no official document could be found. Probably Gabriel got misled
# by references to Germany, which introduced DST in 1916 for the first time.
#
# The tzdata rules for Switzerland must be changed to:
# Rule Swiss 1941 1942 - May Mon>=1 1:00 1:00 S
# Rule Swiss 1941 1942 - Oct Mon>=1 2:00 0 -
#
# The 1940 rules must be deleted.
#
# One further detail for Switzerland, which is probably out of scope for
# most users of tzdata:
# The zone file
# Zone Europe/Zurich 0:34:08 - LMT 1848 Sep 12
# 0:29:44 - BMT 1894 Jun #Bern Mean Time
# 1:00 Swiss CE%sT 1981
# 1:00 EU CE%sT
# describes all of Switzerland correctly, with the exception of
# the Cantone Geneve (Geneva, Genf). Between 1848 and 1894 Geneve did not
# follow Bern Mean Time but kept its own local mean time.
# To represent this, an extra zone would be needed.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Swiss 1941 1942 - May Mon>=1 1:00 1:00 S
Rule Swiss 1941 1942 - Oct Mon>=1 2:00 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Europe/Zurich 0:34:08 - LMT 1848 Sep 12
0:29:44 - BMT 1894 Jun # Bern Mean Time
1:00 Swiss CE%sT 1981
1:00 EU CE%sT
# Turkey
# From Amar Devegowda (2007-01-03):
# The time zone rules for Istanbul, Turkey have not been changed for years now.
# ... The latest rules are available at -
# http://www.timeanddate.com/worldclock/timezone.html?n=107
# From Steffen Thorsen (2007-01-03):
# I have been able to find press records back to 1996 which all say that
# DST started 01:00 local time and end at 02:00 local time. I am not sure
# what happened before that. One example for each year from 1996 to 2001:
# http://newspot.byegm.gov.tr/arsiv/1996/21/N4.htm
# http://www.byegm.gov.tr/YAYINLARIMIZ/CHR/ING97/03/97X03X25.TXT
# http://www.byegm.gov.tr/YAYINLARIMIZ/CHR/ING98/03/98X03X02.HTM
# http://www.byegm.gov.tr/YAYINLARIMIZ/CHR/ING99/10/99X10X26.HTM#%2016
# http://www.byegm.gov.tr/YAYINLARIMIZ/CHR/ING2000/03/00X03X06.HTM#%2021
# http://www.byegm.gov.tr/YAYINLARIMIZ/CHR/ING2001/03/23x03x01.HTM#%2027
# From Paul Eggert (2007-01-03):
# Prefer the above source to Shanks & Pottenger for time stamps after 1990.
# From Steffen Thorsen (2007-03-09):
# Starting 2007 though, it seems that they are adopting EU's 1:00 UTC
# start/end time, according to the following page (2007-03-07):
# http://www.ntvmsnbc.com/news/402029.asp
# The official document is located here - it is in Turkish...:
# http://rega.basbakanlik.gov.tr/eskiler/2007/03/20070307-7.htm
# I was able to locate the following seemingly official document
# (on a non-government server though) describing dates between 2002 and 2006:
# http://www.alomaliye.com/bkk_2002_3769.htm
# From Gökdeniz Karadağ (2011-03-10):
#
# According to the articles linked below, Turkey will change into summer
# time zone (GMT+3) on March 28, 2011 at 3:00 a.m. instead of March 27.
# This change is due to a nationwide exam on 27th.
#
# <a href="http://www.worldbulletin.net/?aType=haber&ArticleID=70872">
# http://www.worldbulletin.net/?aType=haber&ArticleID=70872
# </a>
# Turkish:
# <a href="http://www.hurriyet.com.tr/ekonomi/17230464.asp?gid=373">
# http://www.hurriyet.com.tr/ekonomi/17230464.asp?gid=373
# </a>
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Turkey 1916 only - May 1 0:00 1:00 S
Rule Turkey 1916 only - Oct 1 0:00 0 -
Rule Turkey 1920 only - Mar 28 0:00 1:00 S
Rule Turkey 1920 only - Oct 25 0:00 0 -
Rule Turkey 1921 only - Apr 3 0:00 1:00 S
Rule Turkey 1921 only - Oct 3 0:00 0 -
Rule Turkey 1922 only - Mar 26 0:00 1:00 S
Rule Turkey 1922 only - Oct 8 0:00 0 -
# Whitman gives 1923 Apr 28 - Sep 16 and no DST in 1924-1925;
# go with Shanks & Pottenger.
Rule Turkey 1924 only - May 13 0:00 1:00 S
Rule Turkey 1924 1925 - Oct 1 0:00 0 -
Rule Turkey 1925 only - May 1 0:00 1:00 S
Rule Turkey 1940 only - Jun 30 0:00 1:00 S
Rule Turkey 1940 only - Oct 5 0:00 0 -
Rule Turkey 1940 only - Dec 1 0:00 1:00 S
Rule Turkey 1941 only - Sep 21 0:00 0 -
Rule Turkey 1942 only - Apr 1 0:00 1:00 S
# Whitman omits the next two transition and gives 1945 Oct 1;
# go with Shanks & Pottenger.
Rule Turkey 1942 only - Nov 1 0:00 0 -
Rule Turkey 1945 only - Apr 2 0:00 1:00 S
Rule Turkey 1945 only - Oct 8 0:00 0 -
Rule Turkey 1946 only - Jun 1 0:00 1:00 S
Rule Turkey 1946 only - Oct 1 0:00 0 -
Rule Turkey 1947 1948 - Apr Sun>=16 0:00 1:00 S
Rule Turkey 1947 1950 - Oct Sun>=2 0:00 0 -
Rule Turkey 1949 only - Apr 10 0:00 1:00 S
Rule Turkey 1950 only - Apr 19 0:00 1:00 S
Rule Turkey 1951 only - Apr 22 0:00 1:00 S
Rule Turkey 1951 only - Oct 8 0:00 0 -
Rule Turkey 1962 only - Jul 15 0:00 1:00 S
Rule Turkey 1962 only - Oct 8 0:00 0 -
Rule Turkey 1964 only - May 15 0:00 1:00 S
Rule Turkey 1964 only - Oct 1 0:00 0 -
Rule Turkey 1970 1972 - May Sun>=2 0:00 1:00 S
Rule Turkey 1970 1972 - Oct Sun>=2 0:00 0 -
Rule Turkey 1973 only - Jun 3 1:00 1:00 S
Rule Turkey 1973 only - Nov 4 3:00 0 -
Rule Turkey 1974 only - Mar 31 2:00 1:00 S
Rule Turkey 1974 only - Nov 3 5:00 0 -
Rule Turkey 1975 only - Mar 30 0:00 1:00 S
Rule Turkey 1975 1976 - Oct lastSun 0:00 0 -
Rule Turkey 1976 only - Jun 1 0:00 1:00 S
Rule Turkey 1977 1978 - Apr Sun>=1 0:00 1:00 S
Rule Turkey 1977 only - Oct 16 0:00 0 -
Rule Turkey 1979 1980 - Apr Sun>=1 3:00 1:00 S
Rule Turkey 1979 1982 - Oct Mon>=11 0:00 0 -
Rule Turkey 1981 1982 - Mar lastSun 3:00 1:00 S
Rule Turkey 1983 only - Jul 31 0:00 1:00 S
Rule Turkey 1983 only - Oct 2 0:00 0 -
Rule Turkey 1985 only - Apr 20 0:00 1:00 S
Rule Turkey 1985 only - Sep 28 0:00 0 -
Rule Turkey 1986 1990 - Mar lastSun 2:00s 1:00 S
Rule Turkey 1986 1990 - Sep lastSun 2:00s 0 -
Rule Turkey 1991 2006 - Mar lastSun 1:00s 1:00 S
Rule Turkey 1991 1995 - Sep lastSun 1:00s 0 -
Rule Turkey 1996 2006 - Oct lastSun 1:00s 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Europe/Istanbul 1:55:52 - LMT 1880
1:56:56 - IMT 1910 Oct # Istanbul Mean Time?
2:00 Turkey EE%sT 1978 Oct 15
3:00 Turkey TR%sT 1985 Apr 20 # Turkey Time
2:00 Turkey EE%sT 2007
2:00 EU EE%sT 2011 Mar 27 1:00u
2:00 - EET 2011 Mar 28 1:00u
2:00 EU EE%sT
Link Europe/Istanbul Asia/Istanbul # Istanbul is in both continents.
# Ukraine
#
# From Igor Karpov, who works for the Ukranian Ministry of Justice,
# via Garrett Wollman (2003-01-27):
# BTW, I've found the official document on this matter. It's goverment
# regulations number 509, May 13, 1996. In my poor translation it says:
# "Time in Ukraine is set to second timezone (Kiev time). Each last Sunday
# of March at 3am the time is changing to 4am and each last Sunday of
# October the time at 4am is changing to 3am"
# From Alexander Krivenyshev (2011-09-20):
# On September 20, 2011 the deputies of the Verkhovna Rada agreed to
# abolish the transfer clock to winter time.
#
# Bill number 8330 of MP from the Party of Regions Oleg Nadoshi got
# approval from 266 deputies.
#
# Ukraine abolishes transter back to the winter time (in Russian)
# <a href="http://news.mail.ru/politics/6861560/">
# http://news.mail.ru/politics/6861560/
# </a>
#
# The Ukrainians will no longer change the clock (in Russian)
# <a href="http://www.segodnya.ua/news/14290482.html">
# http://www.segodnya.ua/news/14290482.html
# </a>
#
# Deputies cancelled the winter time (in Russian)
# <a href="http://www.pravda.com.ua/rus/news/2011/09/20/6600616/">
# http://www.pravda.com.ua/rus/news/2011/09/20/6600616/
# </a>
#
# From Philip Pizzey (2011-10-18):
# Today my Ukrainian colleagues have informed me that the
# Ukrainian parliament have decided that they will go to winter
# time this year after all.
#
# From Udo Schwedt (2011-10-18):
# As far as I understand, the recent change to the Ukranian time zone
# (Europe/Kiev) to introduce permanent daylight saving time (similar
# to Russia) was reverted today:
#
# <a href="http://portal.rada.gov.ua/rada/control/en/publish/article/info_left?art_id=287324&cat_id=105995">
# http://portal.rada.gov.ua/rada/control/en/publish/article/info_left?art_id=287324&cat_id=105995
# </a>
#
# Also reported by Alexander Bokovoy (2011-10-18) who also noted:
# The law documents themselves are at
#
# <a href="http://w1.c1.rada.gov.ua/pls/zweb_n/webproc4_1?id=&pf3511=41484">
# http://w1.c1.rada.gov.ua/pls/zweb_n/webproc4_1?id=&pf3511=41484
# </a>
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
# Most of Ukraine since 1970 has been like Kiev.
# "Kyiv" is the transliteration of the Ukrainian name, but
# "Kiev" is more common in English.
Zone Europe/Kiev 2:02:04 - LMT 1880
2:02:04 - KMT 1924 May 2 # Kiev Mean Time
2:00 - EET 1930 Jun 21
3:00 - MSK 1941 Sep 20
1:00 C-Eur CE%sT 1943 Nov 6
3:00 Russia MSK/MSD 1990
3:00 - MSK 1990 Jul 1 2:00
2:00 - EET 1992
2:00 E-Eur EE%sT 1995
2:00 EU EE%sT
# Ruthenia used CET 1990/1991.
# "Uzhhorod" is the transliteration of the Ukrainian name, but
# "Uzhgorod" is more common in English.
Zone Europe/Uzhgorod 1:29:12 - LMT 1890 Oct
1:00 - CET 1940
1:00 C-Eur CE%sT 1944 Oct
1:00 1:00 CEST 1944 Oct 26
1:00 - CET 1945 Jun 29
3:00 Russia MSK/MSD 1990
3:00 - MSK 1990 Jul 1 2:00
1:00 - CET 1991 Mar 31 3:00
2:00 - EET 1992
2:00 E-Eur EE%sT 1995
2:00 EU EE%sT
# Zaporozh'ye and eastern Lugansk oblasts observed DST 1990/1991.
# "Zaporizhia" is the transliteration of the Ukrainian name, but
# "Zaporozh'ye" is more common in English. Use the common English
# spelling, except omit the apostrophe as it is not allowed in
# portable Posix file names.
Zone Europe/Zaporozhye 2:20:40 - LMT 1880
2:20 - CUT 1924 May 2 # Central Ukraine T
2:00 - EET 1930 Jun 21
3:00 - MSK 1941 Aug 25
1:00 C-Eur CE%sT 1943 Oct 25
3:00 Russia MSK/MSD 1991 Mar 31 2:00
2:00 E-Eur EE%sT 1995
2:00 EU EE%sT
# Central Crimea used Moscow time 1994/1997.
Zone Europe/Simferopol 2:16:24 - LMT 1880
2:16 - SMT 1924 May 2 # Simferopol Mean T
2:00 - EET 1930 Jun 21
3:00 - MSK 1941 Nov
1:00 C-Eur CE%sT 1944 Apr 13
3:00 Russia MSK/MSD 1990
3:00 - MSK 1990 Jul 1 2:00
2:00 - EET 1992
# From Paul Eggert (2006-03-22):
# The _Economist_ (1994-05-28, p 45) reports that central Crimea switched
# from Kiev to Moscow time sometime after the January 1994 elections.
# Shanks (1999) says ``date of change uncertain'', but implies that it happened
# sometime between the 1994 DST switches. Shanks & Pottenger simply say
# 1994-09-25 03:00, but that can't be right. For now, guess it
# changed in May.
2:00 E-Eur EE%sT 1994 May
# From IATA SSIM (1994/1997), which also says that Kerch is still like Kiev.
3:00 E-Eur MSK/MSD 1996 Mar 31 3:00s
3:00 1:00 MSD 1996 Oct 27 3:00s
# IATA SSIM (1997-09) says Crimea switched to EET/EEST.
# Assume it happened in March by not changing the clocks.
3:00 Russia MSK/MSD 1997
3:00 - MSK 1997 Mar lastSun 1:00u
2:00 EU EE%sT
###############################################################################
# One source shows that Bulgaria, Cyprus, Finland, and Greece observe DST from
# the last Sunday in March to the last Sunday in September in 1986.
# The source shows Romania changing a day later than everybody else.
#
# According to Bernard Sieloff's source, Poland is in the MET time zone but
# uses the WE DST rules. The Western USSR uses EET+1 and ME DST rules.
# Bernard Sieloff's source claims Romania switches on the same day, but at
# 00:00 standard time (i.e., 01:00 DST). It also claims that Turkey
# switches on the same day, but switches on at 01:00 standard time
# and off at 00:00 standard time (i.e., 01:00 DST)
# ...
# Date: Wed, 28 Jan 87 16:56:27 -0100
# From: Tom Hofmann
# ...
#
# ...the European time rules are...standardized since 1981, when
# most European coun[tr]ies started DST. Before that year, only
# a few countries (UK, France, Italy) had DST, each according
# to own national rules. In 1981, however, DST started on
# 'Apr firstSun', and not on 'Mar lastSun' as in the following
# years...
# But also since 1981 there are some more national exceptions
# than listed in 'europe': Switzerland, for example, joined DST
# one year later, Denmark ended DST on 'Oct 1' instead of 'Sep
# lastSun' in 1981---I don't know how they handle now.
#
# Finally, DST ist always from 'Apr 1' to 'Oct 1' in the
# Soviet Union (as far as I know).
#
# Tom Hofmann, Scientific Computer Center, CIBA-GEIGY AG,
# 4002 Basle, Switzerland
# ...
# ...
# Date: Wed, 4 Feb 87 22:35:22 +0100
# From: Dik T. Winter
# ...
#
# The information from Tom Hofmann is (as far as I know) not entirely correct.
# After a request from chongo at amdahl I tried to retrieve all information
# about DST in Europe. I was able to find all from about 1969.
#
# ...standardization on DST in Europe started in about 1977 with switches on
# first Sunday in April and last Sunday in September...
# In 1981 UK joined Europe insofar that
# the starting day for both shifted to last Sunday in March. And from 1982
# the whole of Europe used DST, with switch dates April 1 and October 1 in
# the Sov[i]et Union. In 1985 the SU reverted to standard Europe[a]n switch
# dates...
#
# It should also be remembered that time-zones are not constants; e.g.
# Portugal switched in 1976 from MET (or CET) to WET with DST...
# Note also that though there were rules for switch dates not
# all countries abided to these dates, and many individual deviations
# occurred, though not since 1982 I believe. Another note: it is always
# assumed that DST is 1 hour ahead of normal time, this need not be the
# case; at least in the Netherlands there have been times when DST was 2 hours
# in advance of normal time.
#
# ...
# dik t. winter, cwi, amsterdam, nederland
# ...
# From Bob Devine (1988-01-28):
# ...
# Greece: Last Sunday in April to last Sunday in September (iffy on dates).
# Since 1978. Change at midnight.
# ...
# Monaco: has same DST as France.
# ...
|
274056675/springboot-openai-chatgpt | 7,103 | mng_web/src/views/tool/datasource.vue | <template>
<basic-container>
<avue-crud :option="option"
:table-loading="loading"
:data="data"
:page="page"
:permission="permissionList"
:before-open="beforeOpen"
v-model="form"
ref="crud"
@row-update="rowUpdate"
@row-save="rowSave"
@row-del="rowDel"
@search-change="searchChange"
@search-reset="searchReset"
@selection-change="selectionChange"
@current-change="currentChange"
@size-change="sizeChange"
@on-load="onLoad">
<template slot="menuLeft">
<el-button type="danger"
size="small"
icon="el-icon-delete"
plain
v-if="permission.datasource_delete"
@click="handleDelete">删 除
</el-button>
</template>
</avue-crud>
</basic-container>
</template>
<script>
import {getList, getDetail, add, update, remove} from "@/api/tool/datasource";
import {mapGetters} from "vuex";
export default {
data() {
return {
form: {},
query: {},
loading: true,
page: {
pageSize: 10,
currentPage: 1,
total: 0
},
selectionList: [],
option: {
height: 'auto',
calcHeight: 210,
searchShow: true,
searchMenuSpan: 6,
tip: false,
border: true,
index: true,
viewBtn: true,
selection: true,
column: [
{
label: "名称",
prop: "name",
width: 120,
rules: [{
required: true,
message: "请输入数据源名称",
trigger: "blur"
}]
},
{
label: "驱动类",
prop: "driverClass",
type: 'select',
dicData: [
{
label: 'com.mysql.cj.jdbc.Driver',
value: 'com.mysql.cj.jdbc.Driver',
}, {
label: 'org.postgresql.Driver',
value: 'org.postgresql.Driver',
}, {
label: 'oracle.jdbc.OracleDriver',
value: 'oracle.jdbc.OracleDriver',
}
],
width: 200,
rules: [{
required: true,
message: "请输入驱动类",
trigger: "blur"
}]
},
{
label: "用户名",
prop: "username",
width: 120,
rules: [{
required: true,
message: "请输入用户名",
trigger: "blur"
}]
},
{
label: "密码",
prop: "password",
hide: true,
rules: [{
required: true,
message: "请输入密码",
trigger: "blur"
}]
},
{
label: "连接地址",
prop: "url",
span: 24,
rules: [{
required: true,
message: "请输入连接地址",
trigger: "blur"
}]
},
{
label: "备注",
prop: "remark",
span: 24,
minRows: 3,
hide: true,
type: "textarea"
},
]
},
data: []
};
},
computed: {
...mapGetters(["permission"]),
permissionList() {
return {
addBtn: this.vaildData(this.permission.datasource_add, false),
viewBtn: this.vaildData(this.permission.datasource_view, false),
delBtn: this.vaildData(this.permission.datasource_delete, false),
editBtn: this.vaildData(this.permission.datasource_edit, false)
};
},
ids() {
let ids = [];
this.selectionList.forEach(ele => {
ids.push(ele.id);
});
return ids.join(",");
}
},
methods: {
rowSave(row, done, loading) {
add(row).then(() => {
done();
this.onLoad(this.page);
this.$message({
type: "success",
message: "操作成功!"
});
}, error => {
window.console.log(error);
loading();
});
},
rowUpdate(row, index, done, loading) {
update(row).then(() => {
done();
this.onLoad(this.page);
this.$message({
type: "success",
message: "操作成功!"
});
}, error => {
window.console.log(error);
loading();
});
},
rowDel(row) {
this.$confirm("确定将选择数据删除?", {
confirmButtonText: "确定",
cancelButtonText: "取消",
type: "warning"
})
.then(() => {
return remove(row.id);
})
.then(() => {
this.onLoad(this.page);
this.$message({
type: "success",
message: "操作成功!"
});
});
},
handleDelete() {
if (this.selectionList.length === 0) {
this.$message.warning("请选择至少一条数据");
return;
}
this.$confirm("确定将选择数据删除?", {
confirmButtonText: "确定",
cancelButtonText: "取消",
type: "warning"
})
.then(() => {
return remove(this.ids);
})
.then(() => {
this.onLoad(this.page);
this.$message({
type: "success",
message: "操作成功!"
});
this.$refs.crud.toggleSelection();
});
},
beforeOpen(done, type) {
if (["edit", "view"].includes(type)) {
getDetail(this.form.id).then(res => {
this.form = res.data.data;
});
}
done();
},
searchReset() {
this.query = {};
this.onLoad(this.page);
},
searchChange(params, done) {
this.query = params;
this.page.currentPage = 1;
this.onLoad(this.page, params);
done();
},
selectionChange(list) {
this.selectionList = list;
},
selectionClear() {
this.selectionList = [];
this.$refs.crud.toggleSelection();
},
currentChange(currentPage) {
this.page.currentPage = currentPage;
},
sizeChange(pageSize) {
this.page.pageSize = pageSize;
},
onLoad(page, params = {}) {
this.loading = true;
getList(page.currentPage, page.pageSize, Object.assign(params, this.query)).then(res => {
const data = res.data.data;
this.page.total = data.total;
this.data = data.records;
this.loading = false;
this.selectionClear();
});
}
}
};
</script>
<style>
</style>
|
233zzh/TitanDataOperationSystem | 2,955 | 代码/web代码/titanApp/src/main/resources/static/src/assets/extra-libs/flot/examples/axes-time-zones/tz/etcetera | # <pre>
# This file is in the public domain, so clarified as of
# 2009-05-17 by Arthur David Olson.
# These entries are mostly present for historical reasons, so that
# people in areas not otherwise covered by the tz files could "zic -l"
# to a time zone that was right for their area. These days, the
# tz files cover almost all the inhabited world, and the only practical
# need now for the entries that are not on UTC are for ships at sea
# that cannot use POSIX TZ settings.
Zone Etc/GMT 0 - GMT
Zone Etc/UTC 0 - UTC
Zone Etc/UCT 0 - UCT
# The following link uses older naming conventions,
# but it belongs here, not in the file `backward',
# as functions like gmtime load the "GMT" file to handle leap seconds properly.
# We want this to work even on installations that omit the other older names.
Link Etc/GMT GMT
Link Etc/UTC Etc/Universal
Link Etc/UTC Etc/Zulu
Link Etc/GMT Etc/Greenwich
Link Etc/GMT Etc/GMT-0
Link Etc/GMT Etc/GMT+0
Link Etc/GMT Etc/GMT0
# We use POSIX-style signs in the Zone names and the output abbreviations,
# even though this is the opposite of what many people expect.
# POSIX has positive signs west of Greenwich, but many people expect
# positive signs east of Greenwich. For example, TZ='Etc/GMT+4' uses
# the abbreviation "GMT+4" and corresponds to 4 hours behind UTC
# (i.e. west of Greenwich) even though many people would expect it to
# mean 4 hours ahead of UTC (i.e. east of Greenwich).
#
# In the draft 5 of POSIX 1003.1-200x, the angle bracket notation allows for
# TZ='<GMT-4>+4'; if you want time zone abbreviations conforming to
# ISO 8601 you can use TZ='<-0400>+4'. Thus the commonly-expected
# offset is kept within the angle bracket (and is used for display)
# while the POSIX sign is kept outside the angle bracket (and is used
# for calculation).
#
# Do not use a TZ setting like TZ='GMT+4', which is four hours behind
# GMT but uses the completely misleading abbreviation "GMT".
# Earlier incarnations of this package were not POSIX-compliant,
# and had lines such as
# Zone GMT-12 -12 - GMT-1200
# We did not want things to change quietly if someone accustomed to the old
# way does a
# zic -l GMT-12
# so we moved the names into the Etc subdirectory.
Zone Etc/GMT-14 14 - GMT-14 # 14 hours ahead of GMT
Zone Etc/GMT-13 13 - GMT-13
Zone Etc/GMT-12 12 - GMT-12
Zone Etc/GMT-11 11 - GMT-11
Zone Etc/GMT-10 10 - GMT-10
Zone Etc/GMT-9 9 - GMT-9
Zone Etc/GMT-8 8 - GMT-8
Zone Etc/GMT-7 7 - GMT-7
Zone Etc/GMT-6 6 - GMT-6
Zone Etc/GMT-5 5 - GMT-5
Zone Etc/GMT-4 4 - GMT-4
Zone Etc/GMT-3 3 - GMT-3
Zone Etc/GMT-2 2 - GMT-2
Zone Etc/GMT-1 1 - GMT-1
Zone Etc/GMT+1 -1 - GMT+1
Zone Etc/GMT+2 -2 - GMT+2
Zone Etc/GMT+3 -3 - GMT+3
Zone Etc/GMT+4 -4 - GMT+4
Zone Etc/GMT+5 -5 - GMT+5
Zone Etc/GMT+6 -6 - GMT+6
Zone Etc/GMT+7 -7 - GMT+7
Zone Etc/GMT+8 -8 - GMT+8
Zone Etc/GMT+9 -9 - GMT+9
Zone Etc/GMT+10 -10 - GMT+10
Zone Etc/GMT+11 -11 - GMT+11
Zone Etc/GMT+12 -12 - GMT+12
|
274056675/springboot-openai-chatgpt | 10,018 | mng_web/src/views/tool/code.vue | <template>
<basic-container>
<avue-crud :option="option"
:table-loading="loading"
:data="data"
ref="crud"
v-model="form"
:permission="permissionList"
:page="page"
@row-del="rowDel"
@row-update="rowUpdate"
@row-save="rowSave"
:before-open="beforeOpen"
@search-change="searchChange"
@search-reset="searchReset"
@selection-change="selectionChange"
@current-change="currentChange"
@size-change="sizeChange"
@on-load="onLoad">
<template slot="menuLeft">
<el-button type="danger"
size="small"
icon="el-icon-delete"
v-if="permission.code_delete"
plain
@click="handleDelete">删 除
</el-button>
<el-button type="primary"
size="small"
plain
icon="el-icon-refresh"
@click="handleBuild">代码生成
</el-button>
</template>
<template slot-scope="scope" slot="menu">
<el-button type="text"
size="small"
icon="el-icon-document-copy"
v-if="permission.code_edit"
@click.stop="handleCopy(scope.row)">复制
</el-button>
</template>
</avue-crud>
</basic-container>
</template>
<script>
import {getList, getCode, build, remove, add, update, copy} from "@/api/tool/code";
import {mapGetters} from "vuex";
export default {
data() {
return {
form: {},
selectionList: [],
loading: true,
query: {},
page: {
pageSize: 10,
currentPage: 1,
total: 0
},
option: {
height: 'auto',
calcHeight: 210,
searchShow: true,
searchMenuSpan: 6,
tip: false,
border: true,
index: true,
selection: true,
labelWidth: 120,
menuWidth: 250,
viewBtn: true,
column: [
{
label: "数据源",
prop: "datasourceId",
search: true,
span: 24,
type: "select",
dicUrl: "/api/blade-develop/datasource/select",
props: {
label: "name",
value: "id"
},
rules: [{
required: true,
message: "请选择数据源",
trigger: "blur"
}]
},
{
label: "模块名",
prop: "codeName",
search: true,
rules: [{
required: true,
message: "请输入模块名",
trigger: "blur"
}]
},
{
label: "服务名",
prop: "serviceName",
search: true,
rules: [{
required: true,
message: "请输入服务名",
trigger: "blur"
}]
},
{
label: "表名",
prop: "tableName",
rules: [{
required: true,
message: "请输入表名",
trigger: "blur"
}]
},
{
label: "表前缀",
prop: "tablePrefix",
hide: true,
rules: [{
required: true,
message: "请输入表前缀",
trigger: "blur"
}]
},
{
label: "主键名",
prop: "pkName",
hide: true,
rules: [{
required: true,
message: "请输入主键名",
trigger: "blur"
}]
},
{
label: "包名",
prop: "packageName",
overHidden: true,
rules: [{
required: true,
message: "请输入包名",
trigger: "blur"
}]
},
{
label: "基础业务",
prop: "baseMode",
type: 'radio',
dicUrl: "/api/blade-system/dict/dictionary?code=yes_no",
props: {
label: "dictValue",
value: "dictKey"
},
hide: true,
rules: [{
required: true,
message: "请选择基础业务",
trigger: "blur"
}]
},
{
label: "包装器",
prop: "wrapMode",
type: 'radio',
dicUrl: "/api/blade-system/dict/dictionary?code=yes_no",
props: {
label: "dictValue",
value: "dictKey"
},
hide: true,
rules: [{
required: true,
message: "请选择包装器",
trigger: "blur"
}]
},
{
label: "后端生成路径",
prop: "apiPath",
span: 24,
hide: true,
rules: [{
required: true,
message: "请输入后端生成路径",
trigger: "blur"
}]
},
{
label: "前端生成路径",
prop: "webPath",
span: 24,
hide: true,
rules: [{
required: true,
message: "请输入前端生成路径",
trigger: "blur"
}]
}
]
},
data: []
};
},
computed: {
...mapGetters(["permission"]),
permissionList() {
return {
addBtn: this.vaildData(this.permission.code_add, false),
viewBtn: this.vaildData(this.permission.code_view, false),
delBtn: this.vaildData(this.permission.code_delete, false),
editBtn: this.vaildData(this.permission.code_edit, false)
};
},
ids() {
let ids = [];
this.selectionList.forEach(ele => {
ids.push(ele.id);
});
return ids.join(",");
}
},
methods: {
rowSave(row, done, loading) {
add(row).then(() => {
done();
this.onLoad(this.page);
this.$message({
type: "success",
message: "操作成功!"
});
}, error => {
window.console.log(error);
loading();
});
},
rowUpdate(row, index, done, loading) {
update(row).then(() => {
done();
this.onLoad(this.page);
this.$message({
type: "success",
message: "操作成功!"
});
}, error => {
window.console.log(error);
loading();
});
},
rowDel(row) {
this.$confirm("确定将选择数据删除?", {
confirmButtonText: "确定",
cancelButtonText: "取消",
type: "warning"
})
.then(() => {
return remove(row.id);
})
.then(() => {
this.onLoad(this.page);
this.$message({
type: "success",
message: "操作成功!"
});
});
},
searchReset() {
this.query = {};
this.onLoad(this.page);
},
searchChange(params, done) {
this.query = params;
this.page.currentPage = 1;
this.onLoad(this.page, params);
done();
},
selectionChange(list) {
this.selectionList = list;
},
selectionClear() {
this.selectionList = [];
this.$refs.crud.toggleSelection();
},
handleDelete() {
if (this.selectionList.length === 0) {
this.$message.warning("请选择至少一条数据");
return;
}
this.$confirm("确定将选择数据删除?", {
confirmButtonText: "确定",
cancelButtonText: "取消",
type: "warning"
})
.then(() => {
return remove(this.ids);
})
.then(() => {
this.onLoad(this.page);
this.$message({
type: "success",
message: "操作成功!"
});
this.$refs.crud.toggleSelection();
});
},
handleBuild() {
if (this.selectionList.length === 0) {
this.$message.warning("请选择至少一条数据");
return;
}
this.$confirm("是否生成选中模块的代码?", {
title: "代码生成确认",
confirmButtonText: "确定",
cancelButtonText: "取消",
type: "warning"
})
.then(() => {
return build(this.ids);
})
.then(() => {
this.onLoad(this.page);
this.$message({
type: "success",
message: "操作成功!"
});
this.$refs.crud.toggleSelection();
});
},
handleCopy(row) {
copy(row.id).then(() => {
this.onLoad(this.page);
this.$message({
type: "success",
message: "复制成功!"
});
});
},
beforeOpen(done, type) {
if (["edit", "view"].includes(type)) {
getCode(this.form.id).then(res => {
this.form = res.data.data;
});
}
done();
},
currentChange(currentPage) {
this.page.currentPage = currentPage;
},
sizeChange(pageSize) {
this.page.pageSize = pageSize;
},
onLoad(page, params = {}) {
this.loading = true;
getList(page.currentPage, page.pageSize, Object.assign(params, this.query)).then(res => {
const data = res.data.data;
this.page.total = data.total;
this.data = data.records;
this.loading = false;
this.selectionClear();
});
}
}
};
</script>
|
274056675/springboot-openai-chatgpt | 7,833 | mng_web/src/views/system/dept.vue | <template>
<basic-container>
<avue-crud :option="option"
:table-loading="loading"
:data="data"
ref="crud"
v-model="form"
:permission="permissionList"
:before-open="beforeOpen"
@row-del="rowDel"
@row-update="rowUpdate"
@row-save="rowSave"
@search-change="searchChange"
@search-reset="searchReset"
@selection-change="selectionChange"
@current-change="currentChange"
@size-change="sizeChange"
@on-load="onLoad">
<template slot="menuLeft">
<el-button type="danger"
size="small"
icon="el-icon-delete"
v-if="permission.dept_delete"
plain
@click="handleDelete">删 除
</el-button>
</template>
<template slot-scope="scope" slot="menu">
<el-button
type="text"
icon="el-icon-circle-plus-outline"
size="small"
@click.stop="handleAdd(scope.row,scope.index)"
v-if="userInfo.authority.includes('admin')"
>新增子项
</el-button>
</template>
</avue-crud>
</basic-container>
</template>
<script>
import {add, getDept, getDeptTree, getList, remove, update} from "@/api/system/dept";
import {mapGetters} from "vuex";
import website from '@/config/website';
export default {
data() {
return {
form: {},
selectionList: [],
loading: true,
query: {},
page: {
pageSize: 10,
currentPage: 1,
total: 0
},
option: {
searchShow: true,
searchMenuSpan: 6,
tip: false,
tree: true,
border: true,
index: true,
selection: true,
viewBtn: true,
menuWidth: 300,
column: [
{
label: "部门名称",
prop: "deptName",
search: true,
rules: [{
required: true,
message: "请输入部门名称",
trigger: "blur"
}]
},
{
label: "所属租户",
prop: "tenantId",
type: "tree",
dicUrl: "/api/blade-system/tenant/select",
addDisplay: false,
editDisplay: false,
viewDisplay: website.tenantMode,
span: 24,
props: {
label: "tenantName",
value: "tenantId"
},
hide: !website.tenantMode,
search: website.tenantMode,
rules: [{
required: true,
message: "请输入所属租户",
trigger: "click"
}]
},
{
label: "部门全称",
prop: "fullName",
search: true,
rules: [{
required: true,
message: "请输入部门全称",
trigger: "blur"
}]
},
{
label: "上级部门",
prop: "parentId",
dicData: [],
type: "tree",
hide: true,
props: {
label: "title"
},
rules: [{
required: false,
message: "请选择上级部门",
trigger: "click"
}]
},
{
label: "排序",
prop: "sort",
type: "number",
rules: [{
required: true,
message: "请输入排序",
trigger: "blur"
}]
},
{
label: "备注",
prop: "remark",
span: 24,
hide: true,
rules: [{
required: false,
message: "请输入备注",
trigger: "blur"
}]
}
]
},
data: []
};
},
computed: {
...mapGetters(["userInfo", "permission"]),
permissionList() {
return {
addBtn: this.vaildData(this.permission.dept_add, false),
viewBtn: this.vaildData(this.permission.dept_view, false),
delBtn: this.vaildData(this.permission.dept_delete, false),
editBtn: this.vaildData(this.permission.dept_edit, false)
};
},
ids() {
let ids = [];
this.selectionList.forEach(ele => {
ids.push(ele.id);
});
return ids.join(",");
}
},
methods: {
handleAdd(row) {
this.$refs.crud.value.parentId = row.id;
this.$refs.crud.option.column.filter(item => {
if (item.prop === "parentId") {
item.value = row.id;
item.addDisabled = true;
}
});
this.$refs.crud.rowAdd();
},
rowSave(row, done, loading) {
add(row).then(() => {
done();
this.onLoad(this.page);
this.$message({
type: "success",
message: "操作成功!"
});
}, error => {
window.console.log(error);
loading();
});
},
rowUpdate(row, index, done, loading) {
update(row).then(() => {
done();
this.onLoad(this.page);
this.$message({
type: "success",
message: "操作成功!"
});
}, error => {
window.console.log(error);
loading();
});
},
rowDel(row) {
this.$confirm("确定将选择数据删除?", {
confirmButtonText: "确定",
cancelButtonText: "取消",
type: "warning"
})
.then(() => {
return remove(row.id);
})
.then(() => {
this.onLoad(this.page);
this.$message({
type: "success",
message: "操作成功!"
});
});
},
handleDelete() {
if (this.selectionList.length === 0) {
this.$message.warning("请选择至少一条数据");
return;
}
this.$confirm("确定将选择数据删除?", {
confirmButtonText: "确定",
cancelButtonText: "取消",
type: "warning"
})
.then(() => {
return remove(this.ids);
})
.then(() => {
this.onLoad(this.page);
this.$message({
type: "success",
message: "操作成功!"
});
this.$refs.crud.toggleSelection();
});
},
searchReset() {
this.query = {};
this.onLoad(this.page);
},
searchChange(params, done) {
this.query = params;
this.page.currentPage = 1;
this.onLoad(this.page, params);
done();
},
selectionChange(list) {
this.selectionList = list;
},
beforeOpen(done, type) {
if (["edit", "view"].includes(type)) {
getDept(this.form.id).then(res => {
this.form = res.data.data;
});
}
done();
},
currentChange(currentPage){
this.page.currentPage = currentPage;
},
sizeChange(pageSize){
this.page.pageSize = pageSize;
},
onLoad(page, params = {}) {
this.loading = true;
getList(page.currentPage, page.pageSize, Object.assign(params, this.query)).then(res => {
this.data = res.data.data;
this.loading = false;
getDeptTree().then(res => {
const column = this.findObject(this.option.column, "parentId");
column.dicData = res.data.data;
});
});
}
}
};
</script>
<style>
</style>
|
233zzh/TitanDataOperationSystem | 1,190 | 代码/web代码/titanApp/src/main/resources/static/src/assets/extra-libs/flot/examples/axes-time-zones/tz/pacificnew | # <pre>
# This file is in the public domain, so clarified as of
# 2009-05-17 by Arthur David Olson.
# From Arthur David Olson (1989-04-05):
# On 1989-04-05, the U. S. House of Representatives passed (238-154) a bill
# establishing "Pacific Presidential Election Time"; it was not acted on
# by the Senate or signed into law by the President.
# You might want to change the "PE" (Presidential Election) below to
# "Q" (Quadrennial) to maintain three-character zone abbreviations.
# If you're really conservative, you might want to change it to "D".
# Avoid "L" (Leap Year), which won't be true in 2100.
# If Presidential Election Time is ever established, replace "XXXX" below
# with the year the law takes effect and uncomment the "##" lines.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
## Rule Twilite XXXX max - Apr Sun>=1 2:00 1:00 D
## Rule Twilite XXXX max uspres Oct lastSun 2:00 1:00 PE
## Rule Twilite XXXX max uspres Nov Sun>=7 2:00 0 S
## Rule Twilite XXXX max nonpres Oct lastSun 2:00 0 S
# Zone NAME GMTOFF RULES/SAVE FORMAT [UNTIL]
## Zone America/Los_Angeles-PET -8:00 US P%sT XXXX
## -8:00 Twilite P%sT
# For now...
Link America/Los_Angeles US/Pacific-New ##
|
27182812/ChatGLM-LLaMA-chinese-insturct | 4,979 | src/transformers/utils/hp_naming.py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import re
class TrialShortNamer:
PREFIX = "hp"
DEFAULTS = {}
NAMING_INFO = None
@classmethod
def set_defaults(cls, prefix, defaults):
cls.PREFIX = prefix
cls.DEFAULTS = defaults
cls.build_naming_info()
@staticmethod
def shortname_for_word(info, word):
if len(word) == 0:
return ""
short_word = None
if any(char.isdigit() for char in word):
raise Exception(f"Parameters should not contain numbers: '{word}' contains a number")
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1, len(word) + 1):
prefix = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
short_word = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(integer):
s = ""
while integer != 0:
s = chr(ord("A") + integer % 10) + s
integer //= 10
return s
i = 0
while True:
sword = word + "#" + int_to_alphabetic(i)
if sword in info["reverse_short_word"]:
continue
else:
short_word = sword
break
info["short_word"][word] = short_word
info["reverse_short_word"][short_word] = word
return short_word
@staticmethod
def shortname_for_key(info, param_name):
words = param_name.split("_")
shortname_parts = [TrialShortNamer.shortname_for_word(info, word) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
separators = ["", "_"]
for separator in separators:
shortname = separator.join(shortname_parts)
if shortname not in info["reverse_short_param"]:
info["short_param"][param_name] = shortname
info["reverse_short_param"][shortname] = param_name
return shortname
return param_name
@staticmethod
def add_new_param_name(info, param_name):
short_name = TrialShortNamer.shortname_for_key(info, param_name)
info["short_param"][param_name] = short_name
info["reverse_short_param"][short_name] = param_name
@classmethod
def build_naming_info(cls):
if cls.NAMING_INFO is not None:
return
info = {
"short_word": {},
"reverse_short_word": {},
"short_param": {},
"reverse_short_param": {},
}
field_keys = list(cls.DEFAULTS.keys())
for k in field_keys:
cls.add_new_param_name(info, k)
cls.NAMING_INFO = info
@classmethod
def shortname(cls, params):
cls.build_naming_info()
assert cls.PREFIX is not None
name = [copy.copy(cls.PREFIX)]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f"You should provide a default value for the param name {k} with value {v}")
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
key = cls.NAMING_INFO["short_param"][k]
if isinstance(v, bool):
v = 1 if v else 0
sep = "" if isinstance(v, (int, float)) else "-"
e = f"{key}{sep}{v}"
name.append(e)
return "_".join(name)
@classmethod
def parse_repr(cls, repr):
repr = repr[len(cls.PREFIX) + 1 :]
if repr == "":
values = []
else:
values = repr.split("_")
parameters = {}
for value in values:
if "-" in value:
p_k, p_v = value.split("-")
else:
p_k = re.sub("[0-9.]", "", value)
p_v = float(re.sub("[^0-9.]", "", value))
key = cls.NAMING_INFO["reverse_short_param"][p_k]
parameters[key] = p_v
for k in cls.DEFAULTS:
if k not in parameters:
parameters[k] = cls.DEFAULTS[k]
return parameters
|
27182812/ChatGLM-LLaMA-chinese-insturct | 6,149 | src/transformers/utils/__init__.py | #!/usr/bin/env python
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bs4_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectron2_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_py3nvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tf2onnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bf16_available,
is_torch_bf16_cpu_available,
is_torch_bf16_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_neuroncore_available,
is_torch_onnx_dict_inputs_support_available,
is_torch_tensorrt_fx_available,
is_torch_tf32_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
torch_version,
)
WEIGHTS_NAME = "pytorch_model.bin"
WEIGHTS_INDEX_NAME = "pytorch_model.bin.index.json"
TF2_WEIGHTS_NAME = "tf_model.h5"
TF2_WEIGHTS_INDEX_NAME = "tf_model.h5.index.json"
TF_WEIGHTS_NAME = "model.ckpt"
FLAX_WEIGHTS_NAME = "flax_model.msgpack"
FLAX_WEIGHTS_INDEX_NAME = "flax_model.msgpack.index.json"
SAFE_WEIGHTS_NAME = "model.safetensors"
SAFE_WEIGHTS_INDEX_NAME = "model.safetensors.index.json"
CONFIG_NAME = "config.json"
FEATURE_EXTRACTOR_NAME = "preprocessor_config.json"
IMAGE_PROCESSOR_NAME = FEATURE_EXTRACTOR_NAME
GENERATION_CONFIG_NAME = "generation_config.json"
MODEL_CARD_NAME = "modelcard.json"
SENTENCEPIECE_UNDERLINE = "▁"
SPIECE_UNDERLINE = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
MULTIPLE_CHOICE_DUMMY_INPUTS = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
DUMMY_INPUTS = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
DUMMY_MASK = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def check_min_version(min_version):
if version.parse(__version__) < version.parse(min_version):
if "dev" in min_version:
error_message = (
"This example requires a source install from HuggingFace Transformers (see "
"`https://huggingface.co/transformers/installation.html#installing-from-source`),"
)
else:
error_message = f"This example requires a minimum version of {min_version},"
error_message += f" but the version found is {__version__}.\n"
raise ImportError(
error_message
+ "Check out https://huggingface.co/transformers/examples.html for the examples corresponding to other "
"versions of HuggingFace Transformers."
)
|
274056675/springboot-openai-chatgpt | 7,558 | mng_web/src/views/system/post.vue | <template>
<basic-container>
<avue-crud :option="option"
:table-loading="loading"
:data="data"
:page="page"
:permission="permissionList"
:before-open="beforeOpen"
v-model="form"
ref="crud"
@row-update="rowUpdate"
@row-save="rowSave"
@row-del="rowDel"
@search-change="searchChange"
@search-reset="searchReset"
@selection-change="selectionChange"
@current-change="currentChange"
@size-change="sizeChange"
@refresh-change="refreshChange"
@on-load="onLoad">
<template slot="menuLeft">
<el-button type="danger"
size="small"
icon="el-icon-delete"
plain
v-if="permission.post_delete"
@click="handleDelete">删 除
</el-button>
</template>
<template slot-scope="{row}"
slot="category">
<el-tag>{{row.categoryName}}</el-tag>
</template>
</avue-crud>
</basic-container>
</template>
<script>
import {getList, getDetail, add, update, remove} from "@/api/system/post";
import {mapGetters} from "vuex";
import website from "@/config/website";
export default {
data() {
return {
form: {},
query: {},
loading: true,
page: {
pageSize: 10,
currentPage: 1,
total: 0
},
selectionList: [],
option: {
height: 'auto',
calcHeight: 210,
tip: false,
searchShow: true,
searchMenuSpan: 6,
border: true,
index: true,
viewBtn: true,
selection: true,
column: [
{
label: "所属租户",
prop: "tenantId",
type: "tree",
dicUrl: "/api/blade-system/tenant/select",
addDisplay: false,
editDisplay: false,
viewDisplay: website.tenantMode,
span: 24,
props: {
label: "tenantName",
value: "tenantId"
},
hide: !website.tenantMode,
rules: [{
required: true,
message: "请输入所属租户",
trigger: "click"
}]
},
{
label: "岗位类型",
prop: "category",
type: "select",
dicUrl: "/api/blade-system/dict/dictionary?code=post_category",
props: {
label: "dictValue",
value: "dictKey"
},
dataType: "number",
slot: true,
search: true,
rules: [{
required: true,
message: "请选择岗位类型",
trigger: "blur"
}]
},
{
label: "岗位编号",
prop: "postCode",
search: true,
rules: [{
required: true,
message: "请输入岗位编号",
trigger: "blur"
}]
},
{
label: "岗位名称",
prop: "postName",
search: true,
rules: [{
required: true,
message: "请输入岗位名称",
trigger: "blur"
}]
},
{
label: "岗位排序",
prop: "sort",
type: "number",
rules: [{
required: true,
message: "请输入岗位排序",
trigger: "blur"
}]
},
{
label: "岗位描述",
prop: "remark",
type: "textarea",
span: 24,
minRows: 6,
hide: true,
},
]
},
data: []
};
},
computed: {
...mapGetters(["permission"]),
permissionList() {
return {
addBtn: this.vaildData(this.permission.post_add, false),
viewBtn: this.vaildData(this.permission.post_view, false),
delBtn: this.vaildData(this.permission.post_delete, false),
editBtn: this.vaildData(this.permission.post_edit, false)
};
},
ids() {
let ids = [];
this.selectionList.forEach(ele => {
ids.push(ele.id);
});
return ids.join(",");
}
},
methods: {
rowSave(row, done, loading) {
add(row).then(() => {
this.onLoad(this.page);
this.$message({
type: "success",
message: "操作成功!"
});
done();
}, error => {
window.console.log(error);
loading();
});
},
rowUpdate(row, index, done, loading) {
update(row).then(() => {
this.onLoad(this.page);
this.$message({
type: "success",
message: "操作成功!"
});
done();
}, error => {
window.console.log(error);
loading();
});
},
rowDel(row) {
this.$confirm("确定将选择数据删除?", {
confirmButtonText: "确定",
cancelButtonText: "取消",
type: "warning"
})
.then(() => {
return remove(row.id);
})
.then(() => {
this.onLoad(this.page);
this.$message({
type: "success",
message: "操作成功!"
});
});
},
handleDelete() {
if (this.selectionList.length === 0) {
this.$message.warning("请选择至少一条数据");
return;
}
this.$confirm("确定将选择数据删除?", {
confirmButtonText: "确定",
cancelButtonText: "取消",
type: "warning"
})
.then(() => {
return remove(this.ids);
})
.then(() => {
this.onLoad(this.page);
this.$message({
type: "success",
message: "操作成功!"
});
this.$refs.crud.toggleSelection();
});
},
beforeOpen(done, type) {
if (["edit", "view"].includes(type)) {
getDetail(this.form.id).then(res => {
this.form = res.data.data;
});
}
done();
},
searchReset() {
this.query = {};
this.onLoad(this.page);
},
searchChange(params, done) {
this.query = params;
this.page.currentPage = 1;
this.onLoad(this.page, params);
done();
},
selectionChange(list) {
this.selectionList = list;
},
selectionClear() {
this.selectionList = [];
this.$refs.crud.toggleSelection();
},
currentChange(currentPage) {
this.page.currentPage = currentPage;
},
sizeChange(pageSize) {
this.page.pageSize = pageSize;
},
refreshChange() {
this.onLoad(this.page, this.query);
},
onLoad(page, params = {}) {
this.loading = true;
getList(page.currentPage, page.pageSize, Object.assign(params, this.query)).then(res => {
const data = res.data.data;
this.page.total = data.total;
this.data = data.records;
this.loading = false;
this.selectionClear();
});
}
}
};
</script>
<style>
</style>
|
27182812/ChatGLM-LLaMA-chinese-insturct | 2,272 | src/transformers/utils/model_parallel_utils.py | # coding=utf-8
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from math import ceil
def assert_device_map(device_map, num_blocks):
blocks = list(range(0, num_blocks))
device_map_blocks = [item for sublist in list(device_map.values()) for item in sublist]
# Duplicate check
duplicate_blocks = []
for i in device_map_blocks:
if device_map_blocks.count(i) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(i)
# Missing blocks
missing_blocks = [i for i in blocks if i not in device_map_blocks]
extra_blocks = [i for i in device_map_blocks if i not in blocks]
if len(duplicate_blocks) != 0:
raise ValueError(
"Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."
" These attention blocks were specified more than once: " + str(duplicate_blocks)
)
if len(missing_blocks) != 0:
raise ValueError(
"There are attention blocks for this model that are not specified in the device_map. Add these attention "
"blocks to a device on the device_map: " + str(missing_blocks)
)
if len(extra_blocks) != 0:
raise ValueError(
"The device_map contains more attention blocks than this model has. Remove these from the device_map:"
+ str(extra_blocks)
)
def get_device_map(n_layers, devices):
"""Returns a dictionary of layers distributed evenly across all devices."""
layers = list(range(n_layers))
n_blocks = int(ceil(n_layers / len(devices)))
layers_list = [layers[i : i + n_blocks] for i in range(0, n_layers, n_blocks)]
return dict(zip(devices, layers_list))
|
274056675/springboot-openai-chatgpt | 6,065 | mng_web/src/views/system/tenant.vue | <template>
<basic-container>
<avue-crud :option="option"
:table-loading="loading"
:data="data"
ref="crud"
v-model="form"
:page="page"
:permission="permissionList"
@row-del="rowDel"
@row-update="rowUpdate"
@row-save="rowSave"
@search-change="searchChange"
@search-reset="searchReset"
@selection-change="selectionChange"
@current-change="currentChange"
@size-change="sizeChange"
@on-load="onLoad">
<template slot="menuLeft">
<el-button type="danger"
size="small"
icon="el-icon-delete"
v-if="permission.tenant_delete"
plain
@click="handleDelete">删 除
</el-button>
</template>
</avue-crud>
</basic-container>
</template>
<script>
import {getList, remove, update, add} from "@/api/system/tenant";
import {mapGetters} from "vuex";
export default {
data() {
return {
form: {},
selectionList: [],
query: {},
loading: true,
page: {
pageSize: 10,
currentPage: 1,
total: 0
},
option: {
height: 'auto',
calcHeight: 210,
searchShow: true,
searchMenuSpan: 6,
tip: false,
border: true,
index: true,
selection: true,
viewBtn: true,
column: [
{
label: "租户ID",
prop: "tenantId",
search: true,
addDisplay: false,
editDisplay: false,
span: 24,
rules: [{
required: true,
message: "请输入租户ID",
trigger: "blur"
}]
},
{
label: "租户名称",
prop: "tenantName",
search: true,
span: 24,
rules: [{
required: true,
message: "请输入参数名称",
trigger: "blur"
}]
},
{
label: "联系人",
prop: "linkman",
search: true,
span: 24,
rules: [{
required: true,
message: "请输入联系人",
trigger: "blur"
}]
},
{
label: "联系电话",
prop: "contactNumber",
span: 24,
},
{
label: "联系地址",
prop: "address",
span: 24,
minRows: 6,
type: "textarea",
},
{
label: "域名地址",
prop: "domain",
span: 24,
}
]
},
data: []
};
},
computed: {
...mapGetters(["permission"]),
permissionList() {
return {
addBtn: this.vaildData(this.permission.tenant_add, false),
viewBtn: this.vaildData(this.permission.tenant_view, false),
delBtn: this.vaildData(this.permission.tenant_delete, false),
editBtn: this.vaildData(this.permission.tenant_edit, false)
};
},
ids() {
let ids = [];
this.selectionList.forEach(ele => {
ids.push(ele.id);
});
return ids.join(",");
}
},
methods: {
rowSave(row, done, loading) {
add(row).then(() => {
done();
this.onLoad(this.page);
this.$message({
type: "success",
message: "操作成功!"
});
}, error => {
window.console.log(error);
loading();
});
},
rowUpdate(row, index, done, loading) {
update(row).then(() => {
done();
this.onLoad(this.page);
this.$message({
type: "success",
message: "操作成功!"
});
}, error => {
window.console.log(error);
loading();
});
},
rowDel(row) {
this.$confirm("确定将选择数据删除?", {
confirmButtonText: "确定",
cancelButtonText: "取消",
type: "warning"
})
.then(() => {
return remove(row.id);
})
.then(() => {
this.onLoad(this.page);
this.$message({
type: "success",
message: "操作成功!"
});
});
},
searchReset() {
this.query = {};
this.onLoad(this.page);
},
searchChange(params, done) {
this.query = params;
this.page.currentPage = 1;
this.onLoad(this.page, params);
done();
},
selectionChange(list) {
this.selectionList = list;
},
handleDelete() {
if (this.selectionList.length === 0) {
this.$message.warning("请选择至少一条数据");
return;
}
this.$confirm("确定将选择数据删除?", {
confirmButtonText: "确定",
cancelButtonText: "取消",
type: "warning"
})
.then(() => {
return remove(this.ids);
})
.then(() => {
this.onLoad(this.page);
this.$message({
type: "success",
message: "操作成功!"
});
this.$refs.crud.toggleSelection();
});
},
currentChange(currentPage) {
this.page.currentPage = currentPage;
},
sizeChange(pageSize) {
this.page.pageSize = pageSize;
},
onLoad(page, params = {}) {
this.loading = true;
getList(page.currentPage, page.pageSize, Object.assign(params, this.query)).then(res => {
const data = res.data.data;
this.page.total = data.total;
this.data = data.records;
this.loading = false;
});
}
}
};
</script>
<style>
</style>
|
274056675/springboot-openai-chatgpt | 7,487 | mng_web/src/views/system/dict.vue | <template>
<basic-container>
<avue-crud :option="option"
:table-loading="loading"
:data="data"
ref="crud"
v-model="form"
:permission="permissionList"
:before-open="beforeOpen"
@row-del="rowDel"
@row-update="rowUpdate"
@row-save="rowSave"
@search-change="searchChange"
@search-reset="searchReset"
@selection-change="selectionChange"
@current-change="currentChange"
@size-change="sizeChange"
@on-load="onLoad">
<template slot="menuLeft">
<el-button type="danger"
size="small"
icon="el-icon-delete"
v-if="permission.dict_delete"
plain
@click="handleDelete">删 除
</el-button>
</template>
<template slot-scope="scope" slot="menu">
<el-button
type="text"
icon="el-icon-circle-plus-outline"
size="small"
@click.stop="handleAdd(scope.row,scope.index)"
v-if="userInfo.authority.includes('admin')"
>新增子项
</el-button>
</template>
</avue-crud>
</basic-container>
</template>
<script>
import {add, getDict, getDictTree, getList, remove, update} from "@/api/system/dict";
import {mapGetters} from "vuex";
export default {
data() {
return {
form: {},
selectionList: [],
loading: true,
query: {},
page: {
pageSize: 10,
currentPage: 1,
total: 0
},
option: {
searchShow: true,
searchMenuSpan: 6,
tip: false,
tree: true,
border: true,
index: true,
selection: true,
viewBtn: true,
menuWidth: 300,
column: [
{
label: "字典编号",
prop: "code",
search: true,
span: 24,
rules: [{
required: true,
message: "请输入字典编号",
trigger: "blur"
}]
},
{
label: "字典名称",
prop: "dictValue",
search: true,
rules: [{
required: true,
message: "请输入字典名称",
trigger: "blur"
}]
},
{
label: "上级字典",
prop: "parentId",
type: "tree",
dicData: [],
hide: true,
props: {
label: "title"
},
rules: [{
required: false,
message: "请选择上级字典",
trigger: "click"
}]
},
{
label: "字典键值",
prop: "dictKey",
type: "number",
rules: [{
required: true,
message: "请输入字典键值",
trigger: "blur"
}]
},
{
label: "字典排序",
prop: "sort",
type: "number",
rules: [{
required: true,
message: "请输入字典排序",
trigger: "blur"
}]
},
{
label: "字典备注",
prop: "remark",
search: true,
span: 24,
hide: true,
},
]
},
data: []
};
},
computed: {
...mapGetters(["userInfo", "permission"]),
permissionList() {
return {
addBtn: this.vaildData(this.permission.dict_add, false),
viewBtn: this.vaildData(this.permission.dict_view, false),
delBtn: this.vaildData(this.permission.dict_delete, false),
editBtn: this.vaildData(this.permission.dict_edit, false)
};
},
ids() {
let ids = [];
this.selectionList.forEach(ele => {
ids.push(ele.id);
});
return ids.join(",");
}
},
methods: {
handleAdd(row) {
this.$refs.crud.value.code = row.code;
this.$refs.crud.value.parentId = row.id;
this.$refs.crud.option.column.filter(item => {
if (item.prop === "code") {
item.value = row.code;
item.addDisabled = true;
}
if (item.prop === "parentId") {
item.value = row.id;
item.addDisabled = true;
}
});
this.$refs.crud.rowAdd();
},
rowSave(row, done, loading) {
add(row).then(() => {
done();
this.onLoad(this.page);
this.$message({
type: "success",
message: "操作成功!"
});
}, error => {
window.console.log(error);
loading();
});
},
rowUpdate(row, index, done, loading) {
update(row).then(() => {
done();
this.onLoad(this.page);
this.$message({
type: "success",
message: "操作成功!"
});
}, error => {
window.console.log(error);
loading();
});
},
rowDel(row) {
this.$confirm("确定将选择数据删除?", {
confirmButtonText: "确定",
cancelButtonText: "取消",
type: "warning"
})
.then(() => {
return remove(row.id);
})
.then(() => {
this.onLoad(this.page);
this.$message({
type: "success",
message: "操作成功!"
});
});
},
searchReset() {
this.query = {};
this.onLoad(this.page);
},
searchChange(params, done) {
this.query = params;
this.page.currentPage = 1;
this.onLoad(this.page, params);
done();
},
selectionChange(list) {
this.selectionList = list;
},
handleDelete() {
if (this.selectionList.length === 0) {
this.$message.warning("请选择至少一条数据");
return;
}
this.$confirm("确定将选择数据删除?", {
confirmButtonText: "确定",
cancelButtonText: "取消",
type: "warning"
})
.then(() => {
return remove(this.ids);
})
.then(() => {
this.onLoad(this.page);
this.$message({
type: "success",
message: "操作成功!"
});
this.$refs.crud.toggleSelection();
});
},
beforeOpen(done, type) {
if (["edit", "view"].includes(type)) {
getDict(this.form.id).then(res => {
this.form = res.data.data;
});
}
done();
},
currentChange(currentPage){
this.page.currentPage = currentPage;
},
sizeChange(pageSize){
this.page.pageSize = pageSize;
},
onLoad(page, params = {}) {
this.loading = true;
getList(page.currentPage, page.pageSize, Object.assign(params, this.query)).then(res => {
this.data = res.data.data;
this.loading = false;
getDictTree().then(res => {
const column = this.findObject(this.option.column, "parentId");
column.dicData = res.data.data;
});
});
}
}
};
</script>
<style>
</style>
|
233zzh/TitanDataOperationSystem | 69,499 | 代码/web代码/titanApp/src/main/resources/static/src/assets/extra-libs/flot/examples/axes-time-zones/tz/australasia | # <pre>
# This file is in the public domain, so clarified as of
# 2009-05-17 by Arthur David Olson.
# This file also includes Pacific islands.
# Notes are at the end of this file
###############################################################################
# Australia
# Please see the notes below for the controversy about "EST" versus "AEST" etc.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Aus 1917 only - Jan 1 0:01 1:00 -
Rule Aus 1917 only - Mar 25 2:00 0 -
Rule Aus 1942 only - Jan 1 2:00 1:00 -
Rule Aus 1942 only - Mar 29 2:00 0 -
Rule Aus 1942 only - Sep 27 2:00 1:00 -
Rule Aus 1943 1944 - Mar lastSun 2:00 0 -
Rule Aus 1943 only - Oct 3 2:00 1:00 -
# Go with Whitman and the Australian National Standards Commission, which
# says W Australia didn't use DST in 1943/1944. Ignore Whitman's claim that
# 1944/1945 was just like 1943/1944.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
# Northern Territory
Zone Australia/Darwin 8:43:20 - LMT 1895 Feb
9:00 - CST 1899 May
9:30 Aus CST
# Western Australia
#
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule AW 1974 only - Oct lastSun 2:00s 1:00 -
Rule AW 1975 only - Mar Sun>=1 2:00s 0 -
Rule AW 1983 only - Oct lastSun 2:00s 1:00 -
Rule AW 1984 only - Mar Sun>=1 2:00s 0 -
Rule AW 1991 only - Nov 17 2:00s 1:00 -
Rule AW 1992 only - Mar Sun>=1 2:00s 0 -
Rule AW 2006 only - Dec 3 2:00s 1:00 -
Rule AW 2007 2009 - Mar lastSun 2:00s 0 -
Rule AW 2007 2008 - Oct lastSun 2:00s 1:00 -
Zone Australia/Perth 7:43:24 - LMT 1895 Dec
8:00 Aus WST 1943 Jul
8:00 AW WST
Zone Australia/Eucla 8:35:28 - LMT 1895 Dec
8:45 Aus CWST 1943 Jul
8:45 AW CWST
# Queensland
#
# From Alex Livingston (1996-11-01):
# I have heard or read more than once that some resort islands off the coast
# of Queensland chose to keep observing daylight-saving time even after
# Queensland ceased to.
#
# From Paul Eggert (1996-11-22):
# IATA SSIM (1993-02/1994-09) say that the Holiday Islands (Hayman, Lindeman,
# Hamilton) observed DST for two years after the rest of Queensland stopped.
# Hamilton is the largest, but there is also a Hamilton in Victoria,
# so use Lindeman.
#
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule AQ 1971 only - Oct lastSun 2:00s 1:00 -
Rule AQ 1972 only - Feb lastSun 2:00s 0 -
Rule AQ 1989 1991 - Oct lastSun 2:00s 1:00 -
Rule AQ 1990 1992 - Mar Sun>=1 2:00s 0 -
Rule Holiday 1992 1993 - Oct lastSun 2:00s 1:00 -
Rule Holiday 1993 1994 - Mar Sun>=1 2:00s 0 -
Zone Australia/Brisbane 10:12:08 - LMT 1895
10:00 Aus EST 1971
10:00 AQ EST
Zone Australia/Lindeman 9:55:56 - LMT 1895
10:00 Aus EST 1971
10:00 AQ EST 1992 Jul
10:00 Holiday EST
# South Australia
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule AS 1971 1985 - Oct lastSun 2:00s 1:00 -
Rule AS 1986 only - Oct 19 2:00s 1:00 -
Rule AS 1987 2007 - Oct lastSun 2:00s 1:00 -
Rule AS 1972 only - Feb 27 2:00s 0 -
Rule AS 1973 1985 - Mar Sun>=1 2:00s 0 -
Rule AS 1986 1990 - Mar Sun>=15 2:00s 0 -
Rule AS 1991 only - Mar 3 2:00s 0 -
Rule AS 1992 only - Mar 22 2:00s 0 -
Rule AS 1993 only - Mar 7 2:00s 0 -
Rule AS 1994 only - Mar 20 2:00s 0 -
Rule AS 1995 2005 - Mar lastSun 2:00s 0 -
Rule AS 2006 only - Apr 2 2:00s 0 -
Rule AS 2007 only - Mar lastSun 2:00s 0 -
Rule AS 2008 max - Apr Sun>=1 2:00s 0 -
Rule AS 2008 max - Oct Sun>=1 2:00s 1:00 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Australia/Adelaide 9:14:20 - LMT 1895 Feb
9:00 - CST 1899 May
9:30 Aus CST 1971
9:30 AS CST
# Tasmania
#
# From Paul Eggert (2005-08-16):
# <http://www.bom.gov.au/climate/averages/tables/dst_times.shtml>
# says King Island didn't observe DST from WWII until late 1971.
#
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule AT 1967 only - Oct Sun>=1 2:00s 1:00 -
Rule AT 1968 only - Mar lastSun 2:00s 0 -
Rule AT 1968 1985 - Oct lastSun 2:00s 1:00 -
Rule AT 1969 1971 - Mar Sun>=8 2:00s 0 -
Rule AT 1972 only - Feb lastSun 2:00s 0 -
Rule AT 1973 1981 - Mar Sun>=1 2:00s 0 -
Rule AT 1982 1983 - Mar lastSun 2:00s 0 -
Rule AT 1984 1986 - Mar Sun>=1 2:00s 0 -
Rule AT 1986 only - Oct Sun>=15 2:00s 1:00 -
Rule AT 1987 1990 - Mar Sun>=15 2:00s 0 -
Rule AT 1987 only - Oct Sun>=22 2:00s 1:00 -
Rule AT 1988 1990 - Oct lastSun 2:00s 1:00 -
Rule AT 1991 1999 - Oct Sun>=1 2:00s 1:00 -
Rule AT 1991 2005 - Mar lastSun 2:00s 0 -
Rule AT 2000 only - Aug lastSun 2:00s 1:00 -
Rule AT 2001 max - Oct Sun>=1 2:00s 1:00 -
Rule AT 2006 only - Apr Sun>=1 2:00s 0 -
Rule AT 2007 only - Mar lastSun 2:00s 0 -
Rule AT 2008 max - Apr Sun>=1 2:00s 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Australia/Hobart 9:49:16 - LMT 1895 Sep
10:00 - EST 1916 Oct 1 2:00
10:00 1:00 EST 1917 Feb
10:00 Aus EST 1967
10:00 AT EST
Zone Australia/Currie 9:35:28 - LMT 1895 Sep
10:00 - EST 1916 Oct 1 2:00
10:00 1:00 EST 1917 Feb
10:00 Aus EST 1971 Jul
10:00 AT EST
# Victoria
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule AV 1971 1985 - Oct lastSun 2:00s 1:00 -
Rule AV 1972 only - Feb lastSun 2:00s 0 -
Rule AV 1973 1985 - Mar Sun>=1 2:00s 0 -
Rule AV 1986 1990 - Mar Sun>=15 2:00s 0 -
Rule AV 1986 1987 - Oct Sun>=15 2:00s 1:00 -
Rule AV 1988 1999 - Oct lastSun 2:00s 1:00 -
Rule AV 1991 1994 - Mar Sun>=1 2:00s 0 -
Rule AV 1995 2005 - Mar lastSun 2:00s 0 -
Rule AV 2000 only - Aug lastSun 2:00s 1:00 -
Rule AV 2001 2007 - Oct lastSun 2:00s 1:00 -
Rule AV 2006 only - Apr Sun>=1 2:00s 0 -
Rule AV 2007 only - Mar lastSun 2:00s 0 -
Rule AV 2008 max - Apr Sun>=1 2:00s 0 -
Rule AV 2008 max - Oct Sun>=1 2:00s 1:00 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Australia/Melbourne 9:39:52 - LMT 1895 Feb
10:00 Aus EST 1971
10:00 AV EST
# New South Wales
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule AN 1971 1985 - Oct lastSun 2:00s 1:00 -
Rule AN 1972 only - Feb 27 2:00s 0 -
Rule AN 1973 1981 - Mar Sun>=1 2:00s 0 -
Rule AN 1982 only - Apr Sun>=1 2:00s 0 -
Rule AN 1983 1985 - Mar Sun>=1 2:00s 0 -
Rule AN 1986 1989 - Mar Sun>=15 2:00s 0 -
Rule AN 1986 only - Oct 19 2:00s 1:00 -
Rule AN 1987 1999 - Oct lastSun 2:00s 1:00 -
Rule AN 1990 1995 - Mar Sun>=1 2:00s 0 -
Rule AN 1996 2005 - Mar lastSun 2:00s 0 -
Rule AN 2000 only - Aug lastSun 2:00s 1:00 -
Rule AN 2001 2007 - Oct lastSun 2:00s 1:00 -
Rule AN 2006 only - Apr Sun>=1 2:00s 0 -
Rule AN 2007 only - Mar lastSun 2:00s 0 -
Rule AN 2008 max - Apr Sun>=1 2:00s 0 -
Rule AN 2008 max - Oct Sun>=1 2:00s 1:00 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Australia/Sydney 10:04:52 - LMT 1895 Feb
10:00 Aus EST 1971
10:00 AN EST
Zone Australia/Broken_Hill 9:25:48 - LMT 1895 Feb
10:00 - EST 1896 Aug 23
9:00 - CST 1899 May
9:30 Aus CST 1971
9:30 AN CST 2000
9:30 AS CST
# Lord Howe Island
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule LH 1981 1984 - Oct lastSun 2:00 1:00 -
Rule LH 1982 1985 - Mar Sun>=1 2:00 0 -
Rule LH 1985 only - Oct lastSun 2:00 0:30 -
Rule LH 1986 1989 - Mar Sun>=15 2:00 0 -
Rule LH 1986 only - Oct 19 2:00 0:30 -
Rule LH 1987 1999 - Oct lastSun 2:00 0:30 -
Rule LH 1990 1995 - Mar Sun>=1 2:00 0 -
Rule LH 1996 2005 - Mar lastSun 2:00 0 -
Rule LH 2000 only - Aug lastSun 2:00 0:30 -
Rule LH 2001 2007 - Oct lastSun 2:00 0:30 -
Rule LH 2006 only - Apr Sun>=1 2:00 0 -
Rule LH 2007 only - Mar lastSun 2:00 0 -
Rule LH 2008 max - Apr Sun>=1 2:00 0 -
Rule LH 2008 max - Oct Sun>=1 2:00 0:30 -
Zone Australia/Lord_Howe 10:36:20 - LMT 1895 Feb
10:00 - EST 1981 Mar
10:30 LH LHST
# Australian miscellany
#
# Ashmore Is, Cartier
# no indigenous inhabitants; only seasonal caretakers
# no times are set
#
# Coral Sea Is
# no indigenous inhabitants; only meteorologists
# no times are set
#
# Macquarie
# permanent occupation (scientific station) since 1948;
# sealing and penguin oil station operated 1888/1917
# like Australia/Hobart
# Christmas
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Indian/Christmas 7:02:52 - LMT 1895 Feb
7:00 - CXT # Christmas Island Time
# Cook Is
# From Shanks & Pottenger:
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Cook 1978 only - Nov 12 0:00 0:30 HS
Rule Cook 1979 1991 - Mar Sun>=1 0:00 0 -
Rule Cook 1979 1990 - Oct lastSun 0:00 0:30 HS
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Rarotonga -10:39:04 - LMT 1901 # Avarua
-10:30 - CKT 1978 Nov 12 # Cook Is Time
-10:00 Cook CK%sT
# Cocos
# These islands were ruled by the Ross family from about 1830 to 1978.
# We don't know when standard time was introduced; for now, we guess 1900.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Indian/Cocos 6:27:40 - LMT 1900
6:30 - CCT # Cocos Islands Time
# Fiji
# From Alexander Krivenyshev (2009-11-10):
# According to Fiji Broadcasting Corporation, Fiji plans to re-introduce DST
# from November 29th 2009 to April 25th 2010.
#
# "Daylight savings to commence this month"
# <a href="http://www.radiofiji.com.fj/fullstory.php?id=23719">
# http://www.radiofiji.com.fj/fullstory.php?id=23719
# </a>
# or
# <a href="http://www.worldtimezone.com/dst_news/dst_news_fiji01.html">
# http://www.worldtimezone.com/dst_news/dst_news_fiji01.html
# </a>
# From Steffen Thorsen (2009-11-10):
# The Fiji Government has posted some more details about the approved
# amendments:
# <a href="http://www.fiji.gov.fj/publish/page_16198.shtml">
# http://www.fiji.gov.fj/publish/page_16198.shtml
# </a>
# From Steffen Thorsen (2010-03-03):
# The Cabinet in Fiji has decided to end DST about a month early, on
# 2010-03-28 at 03:00.
# The plan is to observe DST again, from 2010-10-24 to sometime in March
# 2011 (last Sunday a good guess?).
#
# Official source:
# <a href="http://www.fiji.gov.fj/index.php?option=com_content&view=article&id=1096:3310-cabinet-approves-change-in-daylight-savings-dates&catid=49:cabinet-releases&Itemid=166">
# http://www.fiji.gov.fj/index.php?option=com_content&view=article&id=1096:3310-cabinet-approves-change-in-daylight-savings-dates&catid=49:cabinet-releases&Itemid=166
# </a>
#
# A bit more background info here:
# <a href="http://www.timeanddate.com/news/time/fiji-dst-ends-march-2010.html">
# http://www.timeanddate.com/news/time/fiji-dst-ends-march-2010.html
# </a>
# From Alexander Krivenyshev (2010-10-24):
# According to Radio Fiji and Fiji Times online, Fiji will end DST 3
# weeks earlier than expected - on March 6, 2011, not March 27, 2011...
# Here is confirmation from Government of the Republic of the Fiji Islands,
# Ministry of Information (fiji.gov.fj) web site:
# <a href="http://www.fiji.gov.fj/index.php?option=com_content&view=article&id=2608:daylight-savings&catid=71:press-releases&Itemid=155">
# http://www.fiji.gov.fj/index.php?option=com_content&view=article&id=2608:daylight-savings&catid=71:press-releases&Itemid=155
# </a>
# or
# <a href="http://www.worldtimezone.com/dst_news/dst_news_fiji04.html">
# http://www.worldtimezone.com/dst_news/dst_news_fiji04.html
# </a>
# From Steffen Thorsen (2011-10-03):
# Now the dates have been confirmed, and at least our start date
# assumption was correct (end date was one week wrong).
#
# <a href="http://www.fiji.gov.fj/index.php?option=com_content&view=article&id=4966:daylight-saving-starts-in-fiji&catid=71:press-releases&Itemid=155">
# www.fiji.gov.fj/index.php?option=com_content&view=article&id=4966:daylight-saving-starts-in-fiji&catid=71:press-releases&Itemid=155
# </a>
# which says
# Members of the public are reminded to change their time to one hour in
# advance at 2am to 3am on October 23, 2011 and one hour back at 3am to
# 2am on February 26 next year.
# From Ken Rylander (2011-10-24)
# Another change to the Fiji DST end date. In the TZ database the end date for
# Fiji DST 2012, is currently Feb 26. This has been changed to Jan 22.
#
# <a href="http://www.fiji.gov.fj/index.php?option=com_content&view=article&id=5017:amendments-to-daylight-savings&catid=71:press-releases&Itemid=155">
# http://www.fiji.gov.fj/index.php?option=com_content&view=article&id=5017:amendments-to-daylight-savings&catid=71:press-releases&Itemid=155
# </a>
# states:
#
# The end of daylight saving scheduled initially for the 26th of February 2012
# has been brought forward to the 22nd of January 2012.
# The commencement of daylight saving will remain unchanged and start
# on the 23rd of October, 2011.
# From the Fiji Government Online Portal (2012-08-21) via Steffen Thorsen:
# The Minister for Labour, Industrial Relations and Employment Mr Jone Usamate
# today confirmed that Fiji will start daylight savings at 2 am on Sunday 21st
# October 2012 and end at 3 am on Sunday 20th January 2013.
# http://www.fiji.gov.fj/index.php?option=com_content&view=article&id=6702&catid=71&Itemid=155
#
# From Paul Eggert (2012-08-31):
# For now, guess a pattern of the penultimate Sundays in October and January.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Fiji 1998 1999 - Nov Sun>=1 2:00 1:00 S
Rule Fiji 1999 2000 - Feb lastSun 3:00 0 -
Rule Fiji 2009 only - Nov 29 2:00 1:00 S
Rule Fiji 2010 only - Mar lastSun 3:00 0 -
Rule Fiji 2010 max - Oct Sun>=18 2:00 1:00 S
Rule Fiji 2011 only - Mar Sun>=1 3:00 0 -
Rule Fiji 2012 max - Jan Sun>=18 3:00 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Fiji 11:53:40 - LMT 1915 Oct 26 # Suva
12:00 Fiji FJ%sT # Fiji Time
# French Polynesia
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Gambier -8:59:48 - LMT 1912 Oct # Rikitea
-9:00 - GAMT # Gambier Time
Zone Pacific/Marquesas -9:18:00 - LMT 1912 Oct
-9:30 - MART # Marquesas Time
Zone Pacific/Tahiti -9:58:16 - LMT 1912 Oct # Papeete
-10:00 - TAHT # Tahiti Time
# Clipperton (near North America) is administered from French Polynesia;
# it is uninhabited.
# Guam
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Guam -14:21:00 - LMT 1844 Dec 31
9:39:00 - LMT 1901 # Agana
10:00 - GST 2000 Dec 23 # Guam
10:00 - ChST # Chamorro Standard Time
# Kiribati
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Tarawa 11:32:04 - LMT 1901 # Bairiki
12:00 - GILT # Gilbert Is Time
Zone Pacific/Enderbury -11:24:20 - LMT 1901
-12:00 - PHOT 1979 Oct # Phoenix Is Time
-11:00 - PHOT 1995
13:00 - PHOT
Zone Pacific/Kiritimati -10:29:20 - LMT 1901
-10:40 - LINT 1979 Oct # Line Is Time
-10:00 - LINT 1995
14:00 - LINT
# N Mariana Is
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Saipan -14:17:00 - LMT 1844 Dec 31
9:43:00 - LMT 1901
9:00 - MPT 1969 Oct # N Mariana Is Time
10:00 - MPT 2000 Dec 23
10:00 - ChST # Chamorro Standard Time
# Marshall Is
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Majuro 11:24:48 - LMT 1901
11:00 - MHT 1969 Oct # Marshall Islands Time
12:00 - MHT
Zone Pacific/Kwajalein 11:09:20 - LMT 1901
11:00 - MHT 1969 Oct
-12:00 - KWAT 1993 Aug 20 # Kwajalein Time
12:00 - MHT
# Micronesia
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Chuuk 10:07:08 - LMT 1901
10:00 - CHUT # Chuuk Time
Zone Pacific/Pohnpei 10:32:52 - LMT 1901 # Kolonia
11:00 - PONT # Pohnpei Time
Zone Pacific/Kosrae 10:51:56 - LMT 1901
11:00 - KOST 1969 Oct # Kosrae Time
12:00 - KOST 1999
11:00 - KOST
# Nauru
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Nauru 11:07:40 - LMT 1921 Jan 15 # Uaobe
11:30 - NRT 1942 Mar 15 # Nauru Time
9:00 - JST 1944 Aug 15
11:30 - NRT 1979 May
12:00 - NRT
# New Caledonia
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule NC 1977 1978 - Dec Sun>=1 0:00 1:00 S
Rule NC 1978 1979 - Feb 27 0:00 0 -
Rule NC 1996 only - Dec 1 2:00s 1:00 S
# Shanks & Pottenger say the following was at 2:00; go with IATA.
Rule NC 1997 only - Mar 2 2:00s 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Noumea 11:05:48 - LMT 1912 Jan 13
11:00 NC NC%sT
###############################################################################
# New Zealand
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule NZ 1927 only - Nov 6 2:00 1:00 S
Rule NZ 1928 only - Mar 4 2:00 0 M
Rule NZ 1928 1933 - Oct Sun>=8 2:00 0:30 S
Rule NZ 1929 1933 - Mar Sun>=15 2:00 0 M
Rule NZ 1934 1940 - Apr lastSun 2:00 0 M
Rule NZ 1934 1940 - Sep lastSun 2:00 0:30 S
Rule NZ 1946 only - Jan 1 0:00 0 S
# Since 1957 Chatham has been 45 minutes ahead of NZ, but there's no
# convenient notation for this so we must duplicate the Rule lines.
Rule NZ 1974 only - Nov Sun>=1 2:00s 1:00 D
Rule Chatham 1974 only - Nov Sun>=1 2:45s 1:00 D
Rule NZ 1975 only - Feb lastSun 2:00s 0 S
Rule Chatham 1975 only - Feb lastSun 2:45s 0 S
Rule NZ 1975 1988 - Oct lastSun 2:00s 1:00 D
Rule Chatham 1975 1988 - Oct lastSun 2:45s 1:00 D
Rule NZ 1976 1989 - Mar Sun>=1 2:00s 0 S
Rule Chatham 1976 1989 - Mar Sun>=1 2:45s 0 S
Rule NZ 1989 only - Oct Sun>=8 2:00s 1:00 D
Rule Chatham 1989 only - Oct Sun>=8 2:45s 1:00 D
Rule NZ 1990 2006 - Oct Sun>=1 2:00s 1:00 D
Rule Chatham 1990 2006 - Oct Sun>=1 2:45s 1:00 D
Rule NZ 1990 2007 - Mar Sun>=15 2:00s 0 S
Rule Chatham 1990 2007 - Mar Sun>=15 2:45s 0 S
Rule NZ 2007 max - Sep lastSun 2:00s 1:00 D
Rule Chatham 2007 max - Sep lastSun 2:45s 1:00 D
Rule NZ 2008 max - Apr Sun>=1 2:00s 0 S
Rule Chatham 2008 max - Apr Sun>=1 2:45s 0 S
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Auckland 11:39:04 - LMT 1868 Nov 2
11:30 NZ NZ%sT 1946 Jan 1
12:00 NZ NZ%sT
Zone Pacific/Chatham 12:13:48 - LMT 1957 Jan 1
12:45 Chatham CHA%sT
# Auckland Is
# uninhabited; Maori and Moriori, colonial settlers, pastoralists, sealers,
# and scientific personnel have wintered
# Campbell I
# minor whaling stations operated 1909/1914
# scientific station operated 1941/1995;
# previously whalers, sealers, pastoralists, and scientific personnel wintered
# was probably like Pacific/Auckland
###############################################################################
# Niue
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Niue -11:19:40 - LMT 1901 # Alofi
-11:20 - NUT 1951 # Niue Time
-11:30 - NUT 1978 Oct 1
-11:00 - NUT
# Norfolk
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Norfolk 11:11:52 - LMT 1901 # Kingston
11:12 - NMT 1951 # Norfolk Mean Time
11:30 - NFT # Norfolk Time
# Palau (Belau)
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Palau 8:57:56 - LMT 1901 # Koror
9:00 - PWT # Palau Time
# Papua New Guinea
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Port_Moresby 9:48:40 - LMT 1880
9:48:32 - PMMT 1895 # Port Moresby Mean Time
10:00 - PGT # Papua New Guinea Time
# Pitcairn
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Pitcairn -8:40:20 - LMT 1901 # Adamstown
-8:30 - PNT 1998 Apr 27 00:00
-8:00 - PST # Pitcairn Standard Time
# American Samoa
Zone Pacific/Pago_Pago 12:37:12 - LMT 1879 Jul 5
-11:22:48 - LMT 1911
-11:30 - SAMT 1950 # Samoa Time
-11:00 - NST 1967 Apr # N=Nome
-11:00 - BST 1983 Nov 30 # B=Bering
-11:00 - SST # S=Samoa
# Samoa
# From Steffen Thorsen (2009-10-16):
# We have been in contact with the government of Samoa again, and received
# the following info:
#
# "Cabinet has now approved Daylight Saving to be effected next year
# commencing from the last Sunday of September 2010 and conclude first
# Sunday of April 2011."
#
# Background info:
# <a href="http://www.timeanddate.com/news/time/samoa-dst-plan-2009.html">
# http://www.timeanddate.com/news/time/samoa-dst-plan-2009.html
# </a>
#
# Samoa's Daylight Saving Time Act 2009 is available here, but does not
# contain any dates:
# <a href="http://www.parliament.gov.ws/documents/acts/Daylight%20Saving%20Act%20%202009%20%28English%29%20-%20Final%207-7-091.pdf">
# http://www.parliament.gov.ws/documents/acts/Daylight%20Saving%20Act%20%202009%20%28English%29%20-%20Final%207-7-091.pdf
# </a>
# From Laupue Raymond Hughes (2010-10-07):
# Please see
# <a href="http://www.mcil.gov.ws">
# http://www.mcil.gov.ws
# </a>,
# the Ministry of Commerce, Industry and Labour (sideframe) "Last Sunday
# September 2010 (26/09/10) - adjust clocks forward from 12:00 midnight
# to 01:00am and First Sunday April 2011 (03/04/11) - adjust clocks
# backwards from 1:00am to 12:00am"
# From Laupue Raymond Hughes (2011-03-07):
# I believe this will be posted shortly on the website
# <a href="http://www.mcil.gov.ws">
# www.mcil.gov.ws
# </a>
#
# PUBLIC NOTICE ON DAYLIGHT SAVING TIME
#
# Pursuant to the Daylight Saving Act 2009 and Cabinets decision,
# businesses and the general public are hereby advised that daylight
# saving time is on the first Saturday of April 2011 (02/04/11).
#
# The public is therefore advised that when the standard time strikes
# the hour of four oclock (4.00am or 0400 Hours) on the 2nd April 2011,
# then all instruments used to measure standard time are to be
# adjusted/changed to three oclock (3:00am or 0300Hrs).
#
# Margaret Fruean ACTING CHIEF EXECUTIVE OFFICER MINISTRY OF COMMERCE,
# INDUSTRY AND LABOUR 28th February 2011
# From David Zuelke (2011-05-09):
# Subject: Samoa to move timezone from east to west of international date line
#
# <a href="http://www.morningstar.co.uk/uk/markets/newsfeeditem.aspx?id=138501958347963">
# http://www.morningstar.co.uk/uk/markets/newsfeeditem.aspx?id=138501958347963
# </a>
# From Mark Sim-Smith (2011-08-17):
# I have been in contact with Leilani Tuala Warren from the Samoa Law
# Reform Commission, and she has sent me a copy of the Bill that she
# confirmed has been passed...Most of the sections are about maps rather
# than the time zone change, but I'll paste the relevant bits below. But
# the essence is that at midnight 29 Dec (UTC-11 I suppose), Samoa
# changes from UTC-11 to UTC+13:
#
# International Date Line Bill 2011
#
# AN ACT to provide for the change to standard time in Samoa and to make
# consequential amendments to the position of the International Date
# Line, and for related purposes.
#
# BE IT ENACTED by the Legislative Assembly of Samoa in Parliament
# assembled as follows:
#
# 1. Short title and commencement-(1) This Act may be cited as the
# International Date Line Act 2011. (2) Except for section 5(3) this Act
# commences at 12 o'clock midnight, on Thursday 29th December 2011. (3)
# Section 5(3) commences on the date of assent by the Head of State.
#
# [snip]
#
# 3. Interpretation - [snip] "Samoa standard time" in this Act and any
# other statute of Samoa which refers to 'Samoa standard time' means the
# time 13 hours in advance of Co-ordinated Universal Time.
#
# 4. Samoa standard time - (1) Upon the commencement of this Act, Samoa
# standard time shall be set at 13 hours in advance of Co-ordinated
# Universal Time for the whole of Samoa. (2) All references to Samoa's
# time zone and to Samoa standard time in Samoa in all legislation and
# instruments after the commencement of this Act shall be references to
# Samoa standard time as provided for in this Act. (3) Nothing in this
# Act affects the provisions of the Daylight Saving Act 2009, except that
# it defines Samoa standard time....
# From Laupue Raymond Hughes (2011-09-02):
# <a href="http://www.mcil.gov.ws/mcil_publications.html">
# http://www.mcil.gov.ws/mcil_publications.html
# </a>
#
# here is the official website publication for Samoa DST and dateline change
#
# DST
# Year End Time Start Time
# 2011 - - - - - - 24 September 3:00am to 4:00am
# 2012 01 April 4:00am to 3:00am - - - - - -
#
# Dateline Change skip Friday 30th Dec 2011
# Thursday 29th December 2011 23:59:59 Hours
# Saturday 31st December 2011 00:00:00 Hours
#
# Clarification by Tim Parenti (2012-01-03):
# Although Samoa has used Daylight Saving Time in the 2010-2011 and 2011-2012
# seasons, there is not yet any indication that this trend will continue on
# a regular basis. For now, we have explicitly listed the transitions below.
#
# From Nicky (2012-09-10):
# Daylight Saving Time commences on Sunday 30th September 2012 and
# ends on Sunday 7th of April 2013.
#
# Please find link below for more information.
# http://www.mcil.gov.ws/mcil_publications.html
#
# That publication also includes dates for Summer of 2013/4 as well
# which give the impression of a pattern in selecting dates for the
# future, so for now, we will guess this will continue.
# Western Samoa
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule WS 2012 max - Sep lastSun 3:00 1 D
Rule WS 2012 max - Apr Sun>=1 4:00 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Apia 12:33:04 - LMT 1879 Jul 5
-11:26:56 - LMT 1911
-11:30 - SAMT 1950 # Samoa Time
-11:00 - WST 2010 Sep 26
-11:00 1:00 WSDT 2011 Apr 2 4:00
-11:00 - WST 2011 Sep 24 3:00
-11:00 1:00 WSDT 2011 Dec 30
13:00 1:00 WSDT 2012 Apr Sun>=1 4:00
13:00 WS WS%sT
# Solomon Is
# excludes Bougainville, for which see Papua New Guinea
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Guadalcanal 10:39:48 - LMT 1912 Oct # Honiara
11:00 - SBT # Solomon Is Time
# Tokelau Is
#
# From Gwillim Law (2011-12-29)
# A correspondent informed me that Tokelau, like Samoa, will be skipping
# December 31 this year ...
#
# From Steffen Thorsen (2012-07-25)
# ... we double checked by calling hotels and offices based in Tokelau asking
# about the time there, and they all told a time that agrees with UTC+13....
# Shanks says UTC-10 from 1901 [but] ... there is a good chance the change
# actually was to UTC-11 back then.
#
# From Paul Eggert (2012-07-25)
# A Google Books snippet of Appendix to the Journals of the House of
# Representatives of New Zealand, Session 1948,
# <http://books.google.com/books?id=ZaVCAQAAIAAJ>, page 65, says Tokelau
# was "11 hours slow on G.M.T." Go with Thorsen and assume Shanks & Pottenger
# are off by an hour starting in 1901.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Fakaofo -11:24:56 - LMT 1901
-11:00 - TKT 2011 Dec 30 # Tokelau Time
13:00 - TKT
# Tonga
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Tonga 1999 only - Oct 7 2:00s 1:00 S
Rule Tonga 2000 only - Mar 19 2:00s 0 -
Rule Tonga 2000 2001 - Nov Sun>=1 2:00 1:00 S
Rule Tonga 2001 2002 - Jan lastSun 2:00 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Tongatapu 12:19:20 - LMT 1901
12:20 - TOT 1941 # Tonga Time
13:00 - TOT 1999
13:00 Tonga TO%sT
# Tuvalu
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Funafuti 11:56:52 - LMT 1901
12:00 - TVT # Tuvalu Time
# US minor outlying islands
# Howland, Baker
# Howland was mined for guano by American companies 1857-1878 and British
# 1886-1891; Baker was similar but exact dates are not known.
# Inhabited by civilians 1935-1942; U.S. military bases 1943-1944;
# uninhabited thereafter.
# Howland observed Hawaii Standard Time (UTC-10:30) in 1937;
# see page 206 of Elgen M. Long and Marie K. Long,
# Amelia Earhart: the Mystery Solved, Simon & Schuster (2000).
# So most likely Howland and Baker observed Hawaii Time from 1935
# until they were abandoned after the war.
# Jarvis
# Mined for guano by American companies 1857-1879 and British 1883?-1891?.
# Inhabited by civilians 1935-1942; IGY scientific base 1957-1958;
# uninhabited thereafter.
# no information; was probably like Pacific/Kiritimati
# Johnston
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Johnston -10:00 - HST
# Kingman
# uninhabited
# Midway
#
# From Mark Brader (2005-01-23):
# [Fallacies and Fantasies of Air Transport History, by R.E.G. Davies,
# published 1994 by Paladwr Press, McLean, VA, USA; ISBN 0-9626483-5-3]
# reproduced a Pan American Airways timeables from 1936, for their weekly
# "Orient Express" flights between San Francisco and Manila, and connecting
# flights to Chicago and the US East Coast. As it uses some time zone
# designations that I've never seen before:....
# Fri. 6:30A Lv. HONOLOLU (Pearl Harbor), H.I. H.L.T. Ar. 5:30P Sun.
# " 3:00P Ar. MIDWAY ISLAND . . . . . . . . . M.L.T. Lv. 6:00A "
#
Zone Pacific/Midway -11:49:28 - LMT 1901
-11:00 - NST 1956 Jun 3
-11:00 1:00 NDT 1956 Sep 2
-11:00 - NST 1967 Apr # N=Nome
-11:00 - BST 1983 Nov 30 # B=Bering
-11:00 - SST # S=Samoa
# Palmyra
# uninhabited since World War II; was probably like Pacific/Kiritimati
# Wake
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Wake 11:06:28 - LMT 1901
12:00 - WAKT # Wake Time
# Vanuatu
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Vanuatu 1983 only - Sep 25 0:00 1:00 S
Rule Vanuatu 1984 1991 - Mar Sun>=23 0:00 0 -
Rule Vanuatu 1984 only - Oct 23 0:00 1:00 S
Rule Vanuatu 1985 1991 - Sep Sun>=23 0:00 1:00 S
Rule Vanuatu 1992 1993 - Jan Sun>=23 0:00 0 -
Rule Vanuatu 1992 only - Oct Sun>=23 0:00 1:00 S
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Efate 11:13:16 - LMT 1912 Jan 13 # Vila
11:00 Vanuatu VU%sT # Vanuatu Time
# Wallis and Futuna
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Wallis 12:15:20 - LMT 1901
12:00 - WFT # Wallis & Futuna Time
###############################################################################
# NOTES
# This data is by no means authoritative; if you think you know better,
# go ahead and edit the file (and please send any changes to
# tz@iana.org for general use in the future).
# From Paul Eggert (2006-03-22):
# A good source for time zone historical data outside the U.S. is
# Thomas G. Shanks and Rique Pottenger, The International Atlas (6th edition),
# San Diego: ACS Publications, Inc. (2003).
#
# Gwillim Law writes that a good source
# for recent time zone data is the International Air Transport
# Association's Standard Schedules Information Manual (IATA SSIM),
# published semiannually. Law sent in several helpful summaries
# of the IATA's data after 1990.
#
# Except where otherwise noted, Shanks & Pottenger is the source for
# entries through 1990, and IATA SSIM is the source for entries afterwards.
#
# Another source occasionally used is Edward W. Whitman, World Time Differences,
# Whitman Publishing Co, 2 Niagara Av, Ealing, London (undated), which
# I found in the UCLA library.
#
# A reliable and entertaining source about time zones is
# Derek Howse, Greenwich time and longitude, Philip Wilson Publishers (1997).
#
# I invented the abbreviations marked `*' in the following table;
# the rest are from earlier versions of this file, or from other sources.
# Corrections are welcome!
# std dst
# LMT Local Mean Time
# 8:00 WST WST Western Australia
# 8:45 CWST CWST Central Western Australia*
# 9:00 JST Japan
# 9:30 CST CST Central Australia
# 10:00 EST EST Eastern Australia
# 10:00 ChST Chamorro
# 10:30 LHST LHST Lord Howe*
# 11:30 NZMT NZST New Zealand through 1945
# 12:00 NZST NZDT New Zealand 1946-present
# 12:45 CHAST CHADT Chatham*
# -11:00 SST Samoa
# -10:00 HST Hawaii
# - 8:00 PST Pitcairn*
#
# See the `northamerica' file for Hawaii.
# See the `southamerica' file for Easter I and the Galapagos Is.
###############################################################################
# Australia
# From Paul Eggert (2005-12-08):
# <a href="http://www.bom.gov.au/climate/averages/tables/dst_times.shtml">
# Implementation Dates of Daylight Saving Time within Australia
# </a> summarizes daylight saving issues in Australia.
# From Arthur David Olson (2005-12-12):
# <a href="http://www.lawlink.nsw.gov.au/lawlink/Corporate/ll_agdinfo.nsf/pages/community_relations_daylight_saving">
# Lawlink NSW:Daylight Saving in New South Wales
# </a> covers New South Wales in particular.
# From John Mackin (1991-03-06):
# We in Australia have _never_ referred to DST as `daylight' time.
# It is called `summer' time. Now by a happy coincidence, `summer'
# and `standard' happen to start with the same letter; hence, the
# abbreviation does _not_ change...
# The legislation does not actually define abbreviations, at least
# in this State, but the abbreviation is just commonly taken to be the
# initials of the phrase, and the legislation here uniformly uses
# the phrase `summer time' and does not use the phrase `daylight
# time'.
# Announcers on the Commonwealth radio network, the ABC (for Australian
# Broadcasting Commission), use the phrases `Eastern Standard Time'
# or `Eastern Summer Time'. (Note, though, that as I say in the
# current australasia file, there is really no such thing.) Announcers
# on its overseas service, Radio Australia, use the same phrases
# prefixed by the word `Australian' when referring to local times;
# time announcements on that service, naturally enough, are made in UTC.
# From Arthur David Olson (1992-03-08):
# Given the above, what's chosen for year-round use is:
# CST for any place operating at a GMTOFF of 9:30
# WST for any place operating at a GMTOFF of 8:00
# EST for any place operating at a GMTOFF of 10:00
# From Chuck Soper (2006-06-01):
# I recently found this Australian government web page on time zones:
# <http://www.australia.gov.au/about-australia-13time>
# And this government web page lists time zone names and abbreviations:
# <http://www.bom.gov.au/climate/averages/tables/daysavtm.shtml>
# From Paul Eggert (2001-04-05), summarizing a long discussion about "EST"
# versus "AEST" etc.:
#
# I see the following points of dispute:
#
# * How important are unique time zone abbreviations?
#
# Here I tend to agree with the point (most recently made by Chris
# Newman) that unique abbreviations should not be essential for proper
# operation of software. We have other instances of ambiguity
# (e.g. "IST" denoting both "Israel Standard Time" and "Indian
# Standard Time"), and they are not likely to go away any time soon.
# In the old days, some software mistakenly relied on unique
# abbreviations, but this is becoming less true with time, and I don't
# think it's that important to cater to such software these days.
#
# On the other hand, there is another motivation for unambiguous
# abbreviations: it cuts down on human confusion. This is
# particularly true for Australia, where "EST" can mean one thing for
# time T and a different thing for time T plus 1 second.
#
# * Does the relevant legislation indicate which abbreviations should be used?
#
# Here I tend to think that things are a mess, just as they are in
# many other countries. We Americans are currently disagreeing about
# which abbreviation to use for the newly legislated Chamorro Standard
# Time, for example.
#
# Personally, I would prefer to use common practice; I would like to
# refer to legislation only for examples of common practice, or as a
# tiebreaker.
#
# * Do Australians more often use "Eastern Daylight Time" or "Eastern
# Summer Time"? Do they typically prefix the time zone names with
# the word "Australian"?
#
# My own impression is that both "Daylight Time" and "Summer Time" are
# common and are widely understood, but that "Summer Time" is more
# popular; and that the leading "A" is also common but is omitted more
# often than not. I just used AltaVista advanced search and got the
# following count of page hits:
#
# 1,103 "Eastern Summer Time" AND domain:au
# 971 "Australian Eastern Summer Time" AND domain:au
# 613 "Eastern Daylight Time" AND domain:au
# 127 "Australian Eastern Daylight Time" AND domain:au
#
# Here "Summer" seems quite a bit more popular than "Daylight",
# particularly when we know the time zone is Australian and not US,
# say. The "Australian" prefix seems to be popular for Eastern Summer
# Time, but unpopular for Eastern Daylight Time.
#
# For abbreviations, tools like AltaVista are less useful because of
# ambiguity. Many hits are not really time zones, unfortunately, and
# many hits denote US time zones and not Australian ones. But here
# are the hit counts anyway:
#
# 161,304 "EST" and domain:au
# 25,156 "EDT" and domain:au
# 18,263 "AEST" and domain:au
# 10,416 "AEDT" and domain:au
#
# 14,538 "CST" and domain:au
# 5,728 "CDT" and domain:au
# 176 "ACST" and domain:au
# 29 "ACDT" and domain:au
#
# 7,539 "WST" and domain:au
# 68 "AWST" and domain:au
#
# This data suggest that Australians tend to omit the "A" prefix in
# practice. The situation for "ST" versus "DT" is less clear, given
# the ambiguities involved.
#
# * How do Australians feel about the abbreviations in the tz database?
#
# If you just count Australians on this list, I count 2 in favor and 3
# against. One of the "against" votes (David Keegel) counseled delay,
# saying that both AEST/AEDT and EST/EST are widely used and
# understood in Australia.
# From Paul Eggert (1995-12-19):
# Shanks & Pottenger report 2:00 for all autumn changes in Australia and NZ.
# Mark Prior writes that his newspaper
# reports that NSW's fall 1995 change will occur at 2:00,
# but Robert Elz says it's been 3:00 in Victoria since 1970
# and perhaps the newspaper's `2:00' is referring to standard time.
# For now we'll continue to assume 2:00s for changes since 1960.
# From Eric Ulevik (1998-01-05):
#
# Here are some URLs to Australian time legislation. These URLs are stable,
# and should probably be included in the data file. There are probably more
# relevant entries in this database.
#
# NSW (including LHI and Broken Hill):
# <a href="http://www.austlii.edu.au/au/legis/nsw/consol_act/sta1987137/index.html">
# Standard Time Act 1987 (updated 1995-04-04)
# </a>
# ACT
# <a href="http://www.austlii.edu.au/au/legis/act/consol_act/stasta1972279/index.html">
# Standard Time and Summer Time Act 1972
# </a>
# SA
# <a href="http://www.austlii.edu.au/au/legis/sa/consol_act/sta1898137/index.html">
# Standard Time Act, 1898
# </a>
# From David Grosz (2005-06-13):
# It was announced last week that Daylight Saving would be extended by
# one week next year to allow for the 2006 Commonwealth Games.
# Daylight Saving is now to end for next year only on the first Sunday
# in April instead of the last Sunday in March.
#
# From Gwillim Law (2005-06-14):
# I did some Googling and found that all of those states (and territory) plan
# to extend DST together in 2006.
# ACT: http://www.cmd.act.gov.au/mediareleases/fileread.cfm?file=86.txt
# New South Wales: http://www.thecouriermail.news.com.au/common/story_page/0,5936,15538869%255E1702,00.html
# South Australia: http://www.news.com.au/story/0,10117,15555031-1246,00.html
# Tasmania: http://www.media.tas.gov.au/release.php?id=14772
# Victoria: I wasn't able to find anything separate, but the other articles
# allude to it.
# But not Queensland
# http://www.news.com.au/story/0,10117,15564030-1248,00.html.
# Northern Territory
# From George Shepherd via Simon Woodhead via Robert Elz (1991-03-06):
# # The NORTHERN TERRITORY.. [ Courtesy N.T. Dept of the Chief Minister ]
# # [ Nov 1990 ]
# # N.T. have never utilised any DST due to sub-tropical/tropical location.
# ...
# Zone Australia/North 9:30 - CST
# From Bradley White (1991-03-04):
# A recent excerpt from an Australian newspaper...
# the Northern Territory do[es] not have daylight saving.
# Western Australia
# From George Shepherd via Simon Woodhead via Robert Elz (1991-03-06):
# # The state of WESTERN AUSTRALIA.. [ Courtesy W.A. dept Premier+Cabinet ]
# # [ Nov 1990 ]
# # W.A. suffers from a great deal of public and political opposition to
# # DST in principle. A bill is brought before parliament in most years, but
# # usually defeated either in the upper house, or in party caucus
# # before reaching parliament.
# ...
# Zone Australia/West 8:00 AW %sST
# ...
# Rule AW 1974 only - Oct lastSun 2:00 1:00 D
# Rule AW 1975 only - Mar Sun>=1 3:00 0 W
# Rule AW 1983 only - Oct lastSun 2:00 1:00 D
# Rule AW 1984 only - Mar Sun>=1 3:00 0 W
# From Bradley White (1991-03-04):
# A recent excerpt from an Australian newspaper...
# Western Australia...do[es] not have daylight saving.
# From John D. Newman via Bradley White (1991-11-02):
# Western Australia is still on "winter time". Some DH in Sydney
# rang me at home a few days ago at 6.00am. (He had just arrived at
# work at 9.00am.)
# W.A. is switching to Summer Time on Nov 17th just to confuse
# everybody again.
# From Arthur David Olson (1992-03-08):
# The 1992 ending date used in the rules is a best guess;
# it matches what was used in the past.
# <a href="http://www.bom.gov.au/faq/faqgen.htm">
# The Australian Bureau of Meteorology FAQ
# </a> (1999-09-27) writes that Giles Meteorological Station uses
# South Australian time even though it's located in Western Australia.
# Queensland
# From George Shepherd via Simon Woodhead via Robert Elz (1991-03-06):
# # The state of QUEENSLAND.. [ Courtesy Qld. Dept Premier Econ&Trade Devel ]
# # [ Dec 1990 ]
# ...
# Zone Australia/Queensland 10:00 AQ %sST
# ...
# Rule AQ 1971 only - Oct lastSun 2:00 1:00 D
# Rule AQ 1972 only - Feb lastSun 3:00 0 E
# Rule AQ 1989 max - Oct lastSun 2:00 1:00 D
# Rule AQ 1990 max - Mar Sun>=1 3:00 0 E
# From Bradley White (1989-12-24):
# "Australia/Queensland" now observes daylight time (i.e. from
# October 1989).
# From Bradley White (1991-03-04):
# A recent excerpt from an Australian newspaper...
# ...Queensland...[has] agreed to end daylight saving
# at 3am tomorrow (March 3)...
# From John Mackin (1991-03-06):
# I can certainly confirm for my part that Daylight Saving in NSW did in fact
# end on Sunday, 3 March. I don't know at what hour, though. (It surprised
# me.)
# From Bradley White (1992-03-08):
# ...there was recently a referendum in Queensland which resulted
# in the experimental daylight saving system being abandoned. So, ...
# ...
# Rule QLD 1989 1991 - Oct lastSun 2:00 1:00 D
# Rule QLD 1990 1992 - Mar Sun>=1 3:00 0 S
# ...
# From Arthur David Olson (1992-03-08):
# The chosen rules the union of the 1971/1972 change and the 1989-1992 changes.
# From Christopher Hunt (2006-11-21), after an advance warning
# from Jesper Norgaard Welen (2006-11-01):
# WA are trialing DST for three years.
# <http://www.parliament.wa.gov.au/parliament/bills.nsf/9A1B183144403DA54825721200088DF1/$File/Bill175-1B.pdf>
# From Rives McDow (2002-04-09):
# The most interesting region I have found consists of three towns on the
# southern coast.... South Australia observes daylight saving time; Western
# Australia does not. The two states are one and a half hours apart. The
# residents decided to forget about this nonsense of changing the clock so
# much and set the local time 20 hours and 45 minutes from the
# international date line, or right in the middle of the time of South
# Australia and Western Australia....
#
# From Paul Eggert (2002-04-09):
# This is confirmed by the section entitled
# "What's the deal with time zones???" in
# <http://www.earthsci.unimelb.edu.au/~awatkins/null.html>.
#
# From Alex Livingston (2006-12-07):
# ... it was just on four years ago that I drove along the Eyre Highway,
# which passes through eastern Western Australia close to the southern
# coast of the continent.
#
# I paid particular attention to the time kept there. There can be no
# dispute that UTC+08:45 was considered "the time" from the border
# village just inside the border with South Australia to as far west
# as just east of Caiguna. There can also be no dispute that Eucla is
# the largest population centre in this zone....
#
# Now that Western Australia is observing daylight saving, the
# question arose whether this part of the state would follow suit. I
# just called the border village and confirmed that indeed they have,
# meaning that they are now observing UTC+09:45.
#
# (2006-12-09):
# I personally doubt that either experimentation with daylight saving
# in WA or its introduction in SA had anything to do with the genesis
# of this time zone. My hunch is that it's been around since well
# before 1975. I remember seeing it noted on road maps decades ago.
# From Paul Eggert (2006-12-15):
# For lack of better info, assume the tradition dates back to the
# introduction of standard time in 1895.
# southeast Australia
#
# From Paul Eggert (2007-07-23):
# Starting autumn 2008 Victoria, NSW, South Australia, Tasmania and the ACT
# end DST the first Sunday in April and start DST the first Sunday in October.
# http://www.theage.com.au/news/national/daylight-savings-to-span-six-months/2007/06/27/1182623966703.html
# South Australia
# From Bradley White (1991-03-04):
# A recent excerpt from an Australian newspaper...
# ...South Australia...[has] agreed to end daylight saving
# at 3am tomorrow (March 3)...
# From George Shepherd via Simon Woodhead via Robert Elz (1991-03-06):
# # The state of SOUTH AUSTRALIA....[ Courtesy of S.A. Dept of Labour ]
# # [ Nov 1990 ]
# ...
# Zone Australia/South 9:30 AS %sST
# ...
# Rule AS 1971 max - Oct lastSun 2:00 1:00 D
# Rule AS 1972 1985 - Mar Sun>=1 3:00 0 C
# Rule AS 1986 1990 - Mar Sun>=15 3:00 0 C
# Rule AS 1991 max - Mar Sun>=1 3:00 0 C
# From Bradley White (1992-03-11):
# Recent correspondence with a friend in Adelaide
# contained the following exchange: "Due to the Adelaide Festival,
# South Australia delays setting back our clocks for a few weeks."
# From Robert Elz (1992-03-13):
# I heard that apparently (or at least, it appears that)
# South Aus will have an extra 3 weeks daylight saving every even
# numbered year (from 1990). That's when the Adelaide Festival
# is on...
# From Robert Elz (1992-03-16, 00:57:07 +1000):
# DST didn't end in Adelaide today (yesterday)....
# But whether it's "4th Sunday" or "2nd last Sunday" I have no idea whatever...
# (it's just as likely to be "the Sunday we pick for this year"...).
# From Bradley White (1994-04-11):
# If Sun, 15 March, 1992 was at +1030 as kre asserts, but yet Sun, 20 March,
# 1994 was at +0930 as John Connolly's customer seems to assert, then I can
# only conclude that the actual rule is more complicated....
# From John Warburton (1994-10-07):
# The new Daylight Savings dates for South Australia ...
# was gazetted in the Government Hansard on Sep 26 1994....
# start on last Sunday in October and end in last sunday in March.
# From Paul Eggert (2007-07-23):
# See "southeast Australia" above for 2008 and later.
# Tasmania
# The rules for 1967 through 1991 were reported by George Shepherd
# via Simon Woodhead via Robert Elz (1991-03-06):
# # The state of TASMANIA.. [Courtesy Tasmanian Dept of Premier + Cabinet ]
# # [ Nov 1990 ]
# From Bill Hart via Guy Harris (1991-10-10):
# Oh yes, the new daylight savings rules are uniquely tasmanian, we have
# 6 weeks a year now when we are out of sync with the rest of Australia
# (but nothing new about that).
# From Alex Livingston (1999-10-04):
# I heard on the ABC (Australian Broadcasting Corporation) radio news on the
# (long) weekend that Tasmania, which usually goes its own way in this regard,
# has decided to join with most of NSW, the ACT, and most of Victoria
# (Australia) and start daylight saving on the last Sunday in August in 2000
# instead of the first Sunday in October.
# Sim Alam (2000-07-03) reported a legal citation for the 2000/2001 rules:
# http://www.thelaw.tas.gov.au/fragview/42++1968+GS3A@EN+2000070300
# From Paul Eggert (2007-07-23):
# See "southeast Australia" above for 2008 and later.
# Victoria
# The rules for 1971 through 1991 were reported by George Shepherd
# via Simon Woodhead via Robert Elz (1991-03-06):
# # The state of VICTORIA.. [ Courtesy of Vic. Dept of Premier + Cabinet ]
# # [ Nov 1990 ]
# From Scott Harrington (2001-08-29):
# On KQED's "City Arts and Lectures" program last night I heard an
# interesting story about daylight savings time. Dr. John Heilbron was
# discussing his book "The Sun in the Church: Cathedrals as Solar
# Observatories"[1], and in particular the Shrine of Remembrance[2] located
# in Melbourne, Australia.
#
# Apparently the shrine's main purpose is a beam of sunlight which
# illuminates a special spot on the floor at the 11th hour of the 11th day
# of the 11th month (Remembrance Day) every year in memory of Australia's
# fallen WWI soldiers. And if you go there on Nov. 11, at 11am local time,
# you will indeed see the sunbeam illuminate the special spot at the
# expected time.
#
# However, that is only because of some special mirror contraption that had
# to be employed, since due to daylight savings time, the true solar time of
# the remembrance moment occurs one hour later (or earlier?). Perhaps
# someone with more information on this jury-rig can tell us more.
#
# [1] http://www.hup.harvard.edu/catalog/HEISUN.html
# [2] http://www.shrine.org.au
# From Paul Eggert (2007-07-23):
# See "southeast Australia" above for 2008 and later.
# New South Wales
# From Arthur David Olson:
# New South Wales and subjurisdictions have their own ideas of a fun time.
# Based on law library research by John Mackin,
# who notes:
# In Australia, time is not legislated federally, but rather by the
# individual states. Thus, while such terms as ``Eastern Standard Time''
# [I mean, of course, Australian EST, not any other kind] are in common
# use, _they have NO REAL MEANING_, as they are not defined in the
# legislation. This is very important to understand.
# I have researched New South Wales time only...
# From Eric Ulevik (1999-05-26):
# DST will start in NSW on the last Sunday of August, rather than the usual
# October in 2000. [See: Matthew Moore,
# <a href="http://www.smh.com.au/news/9905/26/pageone/pageone4.html">
# Two months more daylight saving
# </a>
# Sydney Morning Herald (1999-05-26).]
# From Paul Eggert (1999-09-27):
# See the following official NSW source:
# <a href="http://dir.gis.nsw.gov.au/cgi-bin/genobject/document/other/daylightsaving/tigGmZ">
# Daylight Saving in New South Wales.
# </a>
#
# Narrabri Shire (NSW) council has announced it will ignore the extension of
# daylight saving next year. See:
# <a href="http://abc.net.au/news/regionals/neweng/monthly/regeng-22jul1999-1.htm">
# Narrabri Council to ignore daylight saving
# </a> (1999-07-22). For now, we'll wait to see if this really happens.
#
# Victoria will following NSW. See:
# <a href="http://abc.net.au/local/news/olympics/1999/07/item19990728112314_1.htm">
# Vic to extend daylight saving
# </a> (1999-07-28).
#
# However, South Australia rejected the DST request. See:
# <a href="http://abc.net.au/news/olympics/1999/07/item19990719151754_1.htm">
# South Australia rejects Olympics daylight savings request
# </a> (1999-07-19).
#
# Queensland also will not observe DST for the Olympics. See:
# <a href="http://abc.net.au/news/olympics/1999/06/item19990601114608_1.htm">
# Qld says no to daylight savings for Olympics
# </a> (1999-06-01), which quotes Queensland Premier Peter Beattie as saying
# ``Look you've got to remember in my family when this came up last time
# I voted for it, my wife voted against it and she said to me it's all very
# well for you, you don't have to worry about getting the children out of
# bed, getting them to school, getting them to sleep at night.
# I've been through all this argument domestically...my wife rules.''
#
# Broken Hill will stick with South Australian time in 2000. See:
# <a href="http://abc.net.au/news/regionals/brokenh/monthly/regbrok-21jul1999-6.htm">
# Broken Hill to be behind the times
# </a> (1999-07-21).
# IATA SSIM (1998-09) says that the spring 2000 change for Australian
# Capital Territory, New South Wales except Lord Howe Island and Broken
# Hill, and Victoria will be August 27, presumably due to the Sydney Olympics.
# From Eric Ulevik, referring to Sydney's Sun Herald (2000-08-13), page 29:
# The Queensland Premier Peter Beattie is encouraging northern NSW
# towns to use Queensland time.
# From Paul Eggert (2007-07-23):
# See "southeast Australia" above for 2008 and later.
# Yancowinna
# From John Mackin (1989-01-04):
# `Broken Hill' means the County of Yancowinna.
# From George Shepherd via Simon Woodhead via Robert Elz (1991-03-06):
# # YANCOWINNA.. [ Confirmation courtesy of Broken Hill Postmaster ]
# # [ Dec 1990 ]
# ...
# # Yancowinna uses Central Standard Time, despite [its] location on the
# # New South Wales side of the S.A. border. Most business and social dealings
# # are with CST zones, therefore CST is legislated by local government
# # although the switch to Summer Time occurs in line with N.S.W. There have
# # been years when this did not apply, but the historical data is not
# # presently available.
# Zone Australia/Yancowinna 9:30 AY %sST
# ...
# Rule AY 1971 1985 - Oct lastSun 2:00 1:00 D
# Rule AY 1972 only - Feb lastSun 3:00 0 C
# [followed by other Rules]
# Lord Howe Island
# From George Shepherd via Simon Woodhead via Robert Elz (1991-03-06):
# LHI... [ Courtesy of Pauline Van Winsen ]
# [ Dec 1990 ]
# Lord Howe Island is located off the New South Wales coast, and is half an
# hour ahead of NSW time.
# From James Lonergan, Secretary, Lord Howe Island Board (2000-01-27):
# Lord Howe Island summer time in 2000/2001 will commence on the same
# date as the rest of NSW (i.e. 2000-08-27). For your information the
# Lord Howe Island Board (controlling authority for the Island) is
# seeking the community's views on various options for summer time
# arrangements on the Island, e.g. advance clocks by 1 full hour
# instead of only 30 minutes. [Dependent] on the wishes of residents
# the Board may approach the NSW government to change the existing
# arrangements. The starting date for summer time on the Island will
# however always coincide with the rest of NSW.
# From James Lonergan, Secretary, Lord Howe Island Board (2000-10-25):
# Lord Howe Island advances clocks by 30 minutes during DST in NSW and retards
# clocks by 30 minutes when DST finishes. Since DST was most recently
# introduced in NSW, the "changeover" time on the Island has been 02:00 as
# shown on clocks on LHI. I guess this means that for 30 minutes at the start
# of DST, LHI is actually 1 hour ahead of the rest of NSW.
# From Paul Eggert (2006-03-22):
# For Lord Howe dates we use Shanks & Pottenger through 1989, and
# Lonergan thereafter. For times we use Lonergan.
# From Paul Eggert (2007-07-23):
# See "southeast Australia" above for 2008 and later.
# From Steffen Thorsen (2009-04-28):
# According to the official press release, South Australia's extended daylight
# saving period will continue with the same rules as used during the 2008-2009
# summer (southern hemisphere).
#
# From
# <a href="http://www.safework.sa.gov.au/uploaded_files/DaylightDatesSet.pdf">
# http://www.safework.sa.gov.au/uploaded_files/DaylightDatesSet.pdf
# </a>
# The extended daylight saving period that South Australia has been trialling
# for over the last year is now set to be ongoing.
# Daylight saving will continue to start on the first Sunday in October each
# year and finish on the first Sunday in April the following year.
# Industrial Relations Minister, Paul Caica, says this provides South Australia
# with a consistent half hour time difference with NSW, Victoria, Tasmania and
# the ACT for all 52 weeks of the year...
#
# We have a wrap-up here:
# <a href="http://www.timeanddate.com/news/time/south-australia-extends-dst.html">
# http://www.timeanddate.com/news/time/south-australia-extends-dst.html
# </a>
###############################################################################
# New Zealand
# From Mark Davies (1990-10-03):
# the 1989/90 year was a trial of an extended "daylight saving" period.
# This trial was deemed successful and the extended period adopted for
# subsequent years (with the addition of a further week at the start).
# source -- phone call to Ministry of Internal Affairs Head Office.
# From George Shepherd via Simon Woodhead via Robert Elz (1991-03-06):
# # The Country of New Zealand (Australia's east island -) Gee they hate that!
# # or is Australia the west island of N.Z.
# # [ courtesy of Geoff Tribble.. Auckland N.Z. ]
# # [ Nov 1990 ]
# ...
# Rule NZ 1974 1988 - Oct lastSun 2:00 1:00 D
# Rule NZ 1989 max - Oct Sun>=1 2:00 1:00 D
# Rule NZ 1975 1989 - Mar Sun>=1 3:00 0 S
# Rule NZ 1990 max - Mar lastSun 3:00 0 S
# ...
# Zone NZ 12:00 NZ NZ%sT # New Zealand
# Zone NZ-CHAT 12:45 - NZ-CHAT # Chatham Island
# From Arthur David Olson (1992-03-08):
# The chosen rules use the Davies October 8 values for the start of DST in 1989
# rather than the October 1 value.
# From Paul Eggert (1995-12-19);
# Shank & Pottenger report 2:00 for all autumn changes in Australia and NZ.
# Robert Uzgalis writes that the New Zealand Daylight
# Savings Time Order in Council dated 1990-06-18 specifies 2:00 standard
# time on both the first Sunday in October and the third Sunday in March.
# As with Australia, we'll assume the tradition is 2:00s, not 2:00.
#
# From Paul Eggert (2006-03-22):
# The Department of Internal Affairs (DIA) maintains a brief history,
# as does Carol Squires; see tz-link.htm for the full references.
# Use these sources in preference to Shanks & Pottenger.
#
# For Chatham, IATA SSIM (1991/1999) gives the NZ rules but with
# transitions at 2:45 local standard time; this confirms that Chatham
# is always exactly 45 minutes ahead of Auckland.
# From Colin Sharples (2007-04-30):
# DST will now start on the last Sunday in September, and end on the
# first Sunday in April. The changes take effect this year, meaning
# that DST will begin on 2007-09-30 2008-04-06.
# http://www.dia.govt.nz/diawebsite.nsf/wpg_URL/Services-Daylight-Saving-Daylight-saving-to-be-extended
###############################################################################
# Fiji
# Howse writes (p 153) that in 1879 the British governor of Fiji
# enacted an ordinance standardizing the islands on Antipodean Time
# instead of the American system (which was one day behind).
# From Rives McDow (1998-10-08):
# Fiji will introduce DST effective 0200 local time, 1998-11-01
# until 0300 local time 1999-02-28. Each year the DST period will
# be from the first Sunday in November until the last Sunday in February.
# From Paul Eggert (2000-01-08):
# IATA SSIM (1999-09) says DST ends 0100 local time. Go with McDow.
# From the BBC World Service (1998-10-31 11:32 UTC):
# The Fijiian government says the main reasons for the time change is to
# improve productivity and reduce road accidents. But correspondents say it
# also hopes the move will boost Fiji's ability to compete with other pacific
# islands in the effort to attract tourists to witness the dawning of the new
# millenium.
# http://www.fiji.gov.fj/press/2000_09/2000_09_13-05.shtml (2000-09-13)
# reports that Fiji has discontinued DST.
# Johnston
# Johnston data is from usno1995.
# Kiribati
# From Paul Eggert (1996-01-22):
# Today's _Wall Street Journal_ (page 1) reports that Kiribati
# ``declared it the same day [throughout] the country as of Jan. 1, 1995''
# as part of the competition to be first into the 21st century.
# Kwajalein
# In comp.risks 14.87 (26 August 1993), Peter Neumann writes:
# I wonder what happened in Kwajalein, where there was NO Friday,
# 1993-08-20. Thursday night at midnight Kwajalein switched sides with
# respect to the International Date Line, to rejoin its fellow islands,
# going from 11:59 p.m. Thursday to 12:00 m. Saturday in a blink.
# N Mariana Is, Guam
# Howse writes (p 153) ``The Spaniards, on the other hand, reached the
# Philippines and the Ladrones from America,'' and implies that the Ladrones
# (now called the Marianas) kept American date for quite some time.
# For now, we assume the Ladrones switched at the same time as the Philippines;
# see Asia/Manila.
# US Public Law 106-564 (2000-12-23) made UTC+10 the official standard time,
# under the name "Chamorro Standard Time". There is no official abbreviation,
# but Congressman Robert A. Underwood, author of the bill that became law,
# wrote in a press release (2000-12-27) that he will seek the use of "ChST".
# Micronesia
# Alan Eugene Davis writes (1996-03-16),
# ``I am certain, having lived there for the past decade, that "Truk"
# (now properly known as Chuuk) ... is in the time zone GMT+10.''
#
# Shanks & Pottenger write that Truk switched from UTC+10 to UTC+11
# on 1978-10-01; ignore this for now.
# From Paul Eggert (1999-10-29):
# The Federated States of Micronesia Visitors Board writes in
# <a href="http://www.fsmgov.org/info/clocks.html">
# The Federated States of Micronesia - Visitor Information
# </a> (1999-01-26)
# that Truk and Yap are UTC+10, and Ponape and Kosrae are UTC+11.
# We don't know when Kosrae switched from UTC+12; assume January 1 for now.
# Midway
# From Charles T O'Connor, KMTH DJ (1956),
# quoted in the KTMH section of the Radio Heritage Collection
# <http://radiodx.com/spdxr/KMTH.htm> (2002-12-31):
# For the past two months we've been on what is known as Daylight
# Saving Time. This time has put us on air at 5am in the morning,
# your time down there in New Zealand. Starting September 2, 1956
# we'll again go back to Standard Time. This'll mean that we'll go to
# air at 6am your time.
#
# From Paul Eggert (2003-03-23):
# We don't know the date of that quote, but we'll guess they
# started DST on June 3. Possibly DST was observed other years
# in Midway, but we have no record of it.
# Pitcairn
# From Rives McDow (1999-11-08):
# A Proclamation was signed by the Governor of Pitcairn on the 27th March 1998
# with regard to Pitcairn Standard Time. The Proclamation is as follows.
#
# The local time for general purposes in the Islands shall be
# Co-ordinated Universal time minus 8 hours and shall be known
# as Pitcairn Standard Time.
#
# ... I have also seen Pitcairn listed as UTC minus 9 hours in several
# references, and can only assume that this was an error in interpretation
# somehow in light of this proclamation.
# From Rives McDow (1999-11-09):
# The Proclamation regarding Pitcairn time came into effect on 27 April 1998
# ... at midnight.
# From Howie Phelps (1999-11-10), who talked to a Pitcairner via shortwave:
# Betty Christian told me yesterday that their local time is the same as
# Pacific Standard Time. They used to be 1/2 hour different from us here in
# Sacramento but it was changed a couple of years ago.
# Samoa
# Howse writes (p 153, citing p 10 of the 1883-11-18 New York Herald)
# that in 1879 the King of Samoa decided to change
# ``the date in his kingdom from the Antipodean to the American system,
# ordaining -- by a masterpiece of diplomatic flattery -- that
# the Fourth of July should be celebrated twice in that year.''
# Tonga
# From Paul Eggert (1996-01-22):
# Today's _Wall Street Journal_ (p 1) reports that ``Tonga has been plotting
# to sneak ahead of [New Zealanders] by introducing daylight-saving time.''
# Since Kiribati has moved the Date Line it's not clear what Tonga will do.
# Don Mundell writes in the 1997-02-20 Tonga Chronicle
# <a href="http://www.tongatapu.net.to/tonga/homeland/timebegins.htm">
# How Tonga became `The Land where Time Begins'
# </a>:
# Until 1941 Tonga maintained a standard time 50 minutes ahead of NZST
# 12 hours and 20 minutes ahead of GMT. When New Zealand adjusted its
# standard time in 1940s, Tonga had the choice of subtracting from its
# local time to come on the same standard time as New Zealand or of
# advancing its time to maintain the differential of 13 degrees
# (approximately 50 minutes ahead of New Zealand time).
#
# Because His Majesty King Taufa'ahau Tupou IV, then Crown Prince
# Tungi, preferred to ensure Tonga's title as the land where time
# begins, the Legislative Assembly approved the latter change.
#
# But some of the older, more conservative members from the outer
# islands objected. "If at midnight on Dec. 31, we move ahead 40
# minutes, as your Royal Highness wishes, what becomes of the 40
# minutes we have lost?"
#
# The Crown Prince, presented an unanswerable argument: "Remember that
# on the World Day of Prayer, you would be the first people on Earth
# to say your prayers in the morning."
# From Paul Eggert (2006-03-22):
# Shanks & Pottenger say the transition was on 1968-10-01; go with Mundell.
# From Eric Ulevik (1999-05-03):
# Tonga's director of tourism, who is also secretary of the National Millenium
# Committee, has a plan to get Tonga back in front.
# He has proposed a one-off move to tropical daylight saving for Tonga from
# October to March, which has won approval in principle from the Tongan
# Government.
# From Steffen Thorsen (1999-09-09):
# * Tonga will introduce DST in November
#
# I was given this link by John Letts:
# <a href="http://news.bbc.co.uk/hi/english/world/asia-pacific/newsid_424000/424764.stm">
# http://news.bbc.co.uk/hi/english/world/asia-pacific/newsid_424000/424764.stm
# </a>
#
# I have not been able to find exact dates for the transition in November
# yet. By reading this article it seems like Fiji will be 14 hours ahead
# of UTC as well, but as far as I know Fiji will only be 13 hours ahead
# (12 + 1 hour DST).
# From Arthur David Olson (1999-09-20):
# According to <a href="http://www.tongaonline.com/news/sept1799.html">
# http://www.tongaonline.com/news/sept1799.html
# </a>:
# "Daylight Savings Time will take effect on Oct. 2 through April 15, 2000
# and annually thereafter from the first Saturday in October through the
# third Saturday of April. Under the system approved by Privy Council on
# Sept. 10, clocks must be turned ahead one hour on the opening day and
# set back an hour on the closing date."
# Alas, no indication of the time of day.
# From Rives McDow (1999-10-06):
# Tonga started its Daylight Saving on Saturday morning October 2nd at 0200am.
# Daylight Saving ends on April 16 at 0300am which is Sunday morning.
# From Steffen Thorsen (2000-10-31):
# Back in March I found a notice on the website http://www.tongaonline.com
# that Tonga changed back to standard time one month early, on March 19
# instead of the original reported date April 16. Unfortunately, the article
# is no longer available on the site, and I did not make a copy of the
# text, and I have forgotten to report it here.
# (Original URL was: http://www.tongaonline.com/news/march162000.htm )
# From Rives McDow (2000-12-01):
# Tonga is observing DST as of 2000-11-04 and will stop on 2001-01-27.
# From Sione Moala-Mafi (2001-09-20) via Rives McDow:
# At 2:00am on the first Sunday of November, the standard time in the Kingdom
# shall be moved forward by one hour to 3:00am. At 2:00am on the last Sunday
# of January the standard time in the Kingdom shall be moved backward by one
# hour to 1:00am.
# From Pulu 'Anau (2002-11-05):
# The law was for 3 years, supposedly to get renewed. It wasn't.
# Wake
# From Vernice Anderson, Personal Secretary to Philip Jessup,
# US Ambassador At Large (oral history interview, 1971-02-02):
#
# Saturday, the 14th [of October, 1950] -- ... The time was all the
# more confusing at that point, because we had crossed the
# International Date Line, thus getting two Sundays. Furthermore, we
# discovered that Wake Island had two hours of daylight saving time
# making calculation of time in Washington difficult if not almost
# impossible.
#
# http://www.trumanlibrary.org/wake/meeting.htm
# From Paul Eggert (2003-03-23):
# We have no other report of DST in Wake Island, so omit this info for now.
###############################################################################
# The International Date Line
# From Gwillim Law (2000-01-03):
#
# The International Date Line is not defined by any international standard,
# convention, or treaty. Mapmakers are free to draw it as they please.
# Reputable mapmakers will simply ensure that every point of land appears on
# the correct side of the IDL, according to the date legally observed there.
#
# When Kiribati adopted a uniform date in 1995, thereby moving the Phoenix and
# Line Islands to the west side of the IDL (or, if you prefer, moving the IDL
# to the east side of the Phoenix and Line Islands), I suppose that most
# mapmakers redrew the IDL following the boundary of Kiribati. Even that line
# has a rather arbitrary nature. The straight-line boundaries between Pacific
# island nations that are shown on many maps are based on an international
# convention, but are not legally binding national borders.... The date is
# governed by the IDL; therefore, even on the high seas, there may be some
# places as late as fourteen hours later than UTC. And, since the IDL is not
# an international standard, there are some places on the high seas where the
# correct date is ambiguous.
# From Wikipedia <http://en.wikipedia.org/wiki/Time_zone> (2005-08-31):
# Before 1920, all ships kept local apparent time on the high seas by setting
# their clocks at night or at the morning sight so that, given the ship's
# speed and direction, it would be 12 o'clock when the Sun crossed the ship's
# meridian (12 o'clock = local apparent noon). During 1917, at the
# Anglo-French Conference on Time-keeping at Sea, it was recommended that all
# ships, both military and civilian, should adopt hourly standard time zones
# on the high seas. Whenever a ship was within the territorial waters of any
# nation it would use that nation's standard time. The captain was permitted
# to change his ship's clocks at a time of his choice following his ship's
# entry into another zone time--he often chose midnight. These zones were
# adopted by all major fleets between 1920 and 1925 but not by many
# independent merchant ships until World War II.
# From Paul Eggert, using references suggested by Oscar van Vlijmen
# (2005-03-20):
#
# The American Practical Navigator (2002)
# <http://pollux.nss.nima.mil/pubs/pubs_j_apn_sections.html?rid=187>
# talks only about the 180-degree meridian with respect to ships in
# international waters; it ignores the international date line.
|
27182812/ChatGLM-LLaMA-chinese-insturct | 172,046 | src/transformers/utils/dummy_pt_objects.py | # This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
class PyTorchBenchmark(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class PyTorchBenchmarkArguments(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class GlueDataset(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class GlueDataTrainingArguments(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LineByLineTextDataset(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LineByLineWithRefDataset(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LineByLineWithSOPTextDataset(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SquadDataset(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SquadDataTrainingArguments(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class TextDataset(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class TextDatasetForNextSentencePrediction(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BeamScorer(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BeamSearchScorer(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ConstrainedBeamSearchScorer(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class Constraint(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ConstraintListState(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DisjunctiveConstraint(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ForcedBOSTokenLogitsProcessor(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ForcedEOSTokenLogitsProcessor(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class GenerationMixin(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class HammingDiversityLogitsProcessor(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class InfNanRemoveLogitsProcessor(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LogitsProcessor(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LogitsProcessorList(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LogitsWarper(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MaxLengthCriteria(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MaxTimeCriteria(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MinLengthLogitsProcessor(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MinNewTokensLengthLogitsProcessor(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class NoBadWordsLogitsProcessor(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class NoRepeatNGramLogitsProcessor(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class PhrasalConstraint(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class PrefixConstrainedLogitsProcessor(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RepetitionPenaltyLogitsProcessor(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class StoppingCriteria(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class StoppingCriteriaList(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class TemperatureLogitsWarper(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class TopKLogitsWarper(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class TopPLogitsWarper(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class TypicalLogitsWarper(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def top_k_top_p_filtering(*args, **kwargs):
requires_backends(top_k_top_p_filtering, ["torch"])
class PreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class AlbertForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AlbertForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AlbertForPreTraining(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AlbertForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AlbertForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AlbertForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AlbertModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AlbertPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def load_tf_weights_in_albert(*args, **kwargs):
requires_backends(load_tf_weights_in_albert, ["torch"])
ALIGN_PRETRAINED_MODEL_ARCHIVE_LIST = None
class AlignModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AlignPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AlignTextModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AlignVisionModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST = None
class AltCLIPModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AltCLIPPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AltCLIPTextModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AltCLIPVisionModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None
class ASTForAudioClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ASTModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ASTPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING = None
MODEL_FOR_AUDIO_XVECTOR_MAPPING = None
MODEL_FOR_BACKBONE_MAPPING = None
MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING = None
MODEL_FOR_CAUSAL_LM_MAPPING = None
MODEL_FOR_CTC_MAPPING = None
MODEL_FOR_DEPTH_ESTIMATION_MAPPING = None
MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING = None
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING = None
MODEL_FOR_IMAGE_SEGMENTATION_MAPPING = None
MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING = None
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING = None
MODEL_FOR_MASKED_LM_MAPPING = None
MODEL_FOR_MULTIPLE_CHOICE_MAPPING = None
MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING = None
MODEL_FOR_OBJECT_DETECTION_MAPPING = None
MODEL_FOR_PRETRAINING_MAPPING = None
MODEL_FOR_QUESTION_ANSWERING_MAPPING = None
MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING = None
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING = None
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING = None
MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING = None
MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING = None
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING = None
MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING = None
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING = None
MODEL_FOR_VISION_2_SEQ_MAPPING = None
MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING = None
MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING = None
MODEL_MAPPING = None
MODEL_WITH_LM_HEAD_MAPPING = None
class AutoBackbone(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AutoModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AutoModelForAudioClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AutoModelForAudioFrameClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AutoModelForAudioXVector(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AutoModelForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AutoModelForCTC(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AutoModelForDepthEstimation(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AutoModelForDocumentQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AutoModelForImageClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AutoModelForImageSegmentation(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AutoModelForInstanceSegmentation(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AutoModelForMaskedImageModeling(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AutoModelForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AutoModelForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AutoModelForNextSentencePrediction(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AutoModelForObjectDetection(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AutoModelForPreTraining(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AutoModelForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AutoModelForSemanticSegmentation(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AutoModelForSeq2SeqLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AutoModelForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AutoModelForSpeechSeq2Seq(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AutoModelForTableQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AutoModelForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AutoModelForUniversalSegmentation(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AutoModelForVideoClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AutoModelForVision2Seq(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AutoModelForVisualQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AutoModelForZeroShotObjectDetection(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AutoModelWithLMHead(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
BART_PRETRAINED_MODEL_ARCHIVE_LIST = None
class BartForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BartForConditionalGeneration(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BartForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BartForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BartModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BartPretrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class PretrainedBartModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class BeitForImageClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BeitForMaskedImageModeling(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BeitForSemanticSegmentation(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BeitModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BeitPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
BERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class BertForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BertForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BertForNextSentencePrediction(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BertForPreTraining(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BertForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BertForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BertForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BertLayer(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BertLMHeadModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BertModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BertPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def load_tf_weights_in_bert(*args, **kwargs):
requires_backends(load_tf_weights_in_bert, ["torch"])
class BertGenerationDecoder(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BertGenerationEncoder(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BertGenerationPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def load_tf_weights_in_bert_generation(*args, **kwargs):
requires_backends(load_tf_weights_in_bert_generation, ["torch"])
BIG_BIRD_PRETRAINED_MODEL_ARCHIVE_LIST = None
class BigBirdForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BigBirdForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BigBirdForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BigBirdForPreTraining(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BigBirdForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BigBirdForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BigBirdForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BigBirdLayer(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BigBirdModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BigBirdPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def load_tf_weights_in_big_bird(*args, **kwargs):
requires_backends(load_tf_weights_in_big_bird, ["torch"])
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST = None
class BigBirdPegasusForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BigBirdPegasusForConditionalGeneration(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BigBirdPegasusForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BigBirdPegasusForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BigBirdPegasusModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BigBirdPegasusPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class BioGptForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BioGptModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BioGptPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
BIT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class BitBackbone(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BitForImageClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BitModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BitPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class BlenderbotForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BlenderbotForConditionalGeneration(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BlenderbotModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BlenderbotPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST = None
class BlenderbotSmallForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BlenderbotSmallForConditionalGeneration(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BlenderbotSmallModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BlenderbotSmallPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST = None
class BlipForConditionalGeneration(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BlipForImageTextRetrieval(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BlipForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BlipModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BlipPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BlipTextModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BlipVisionModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST = None
class Blip2ForConditionalGeneration(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class Blip2Model(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class Blip2PreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class Blip2QFormerModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class Blip2VisionModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST = None
class BloomForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BloomForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BloomForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BloomForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BloomModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BloomPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST = None
class BridgeTowerForImageAndTextRetrieval(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BridgeTowerForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BridgeTowerModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BridgeTowerPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class CamembertForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CamembertForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CamembertForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CamembertForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CamembertForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CamembertForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CamembertModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CamembertPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST = None
class CanineForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CanineForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CanineForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CanineForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CanineLayer(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CanineModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CaninePreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def load_tf_weights_in_canine(*args, **kwargs):
requires_backends(load_tf_weights_in_canine, ["torch"])
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST = None
class ChineseCLIPModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ChineseCLIPPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ChineseCLIPTextModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ChineseCLIPVisionModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST = None
class ClapAudioModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ClapAudioModelWithProjection(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ClapFeatureExtractor(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ClapModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ClapPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ClapTextModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ClapTextModelWithProjection(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST = None
class CLIPModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CLIPPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CLIPTextModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CLIPTextModelWithProjection(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CLIPVisionModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CLIPVisionModelWithProjection(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST = None
class CLIPSegForImageSegmentation(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CLIPSegModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CLIPSegPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CLIPSegTextModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CLIPSegVisionModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
CODEGEN_PRETRAINED_MODEL_ARCHIVE_LIST = None
class CodeGenForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CodeGenModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CodeGenPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class ConvBertForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ConvBertForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ConvBertForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ConvBertForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ConvBertForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ConvBertLayer(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ConvBertModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ConvBertPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def load_tf_weights_in_convbert(*args, **kwargs):
requires_backends(load_tf_weights_in_convbert, ["torch"])
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class ConvNextBackbone(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ConvNextForImageClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ConvNextModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ConvNextPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST = None
class CTRLForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CTRLLMHeadModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CTRLModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CTRLPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
CVT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class CvtForImageClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CvtModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CvtPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST = None
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST = None
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST = None
class Data2VecAudioForAudioFrameClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class Data2VecAudioForCTC(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class Data2VecAudioForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class Data2VecAudioForXVector(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class Data2VecAudioModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class Data2VecAudioPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class Data2VecTextForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class Data2VecTextForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class Data2VecTextForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class Data2VecTextForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class Data2VecTextForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class Data2VecTextForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class Data2VecTextModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class Data2VecTextPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class Data2VecVisionForImageClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class Data2VecVisionForSemanticSegmentation(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class Data2VecVisionModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class Data2VecVisionPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = None
class DebertaForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DebertaForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DebertaForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DebertaForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DebertaModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DebertaPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST = None
class DebertaV2ForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DebertaV2ForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DebertaV2ForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DebertaV2ForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DebertaV2ForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DebertaV2Model(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DebertaV2PreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None
class DecisionTransformerGPT2Model(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DecisionTransformerGPT2PreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DecisionTransformerModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DecisionTransformerPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class DeiTForImageClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DeiTForImageClassificationWithTeacher(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DeiTForMaskedImageModeling(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DeiTModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DeiTPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
DETA_PRETRAINED_MODEL_ARCHIVE_LIST = None
class DetaForObjectDetection(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DetaModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DetaPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
DINAT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class DinatBackbone(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DinatForImageClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DinatModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DinatPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class DistilBertForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DistilBertForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DistilBertForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DistilBertForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DistilBertForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DistilBertModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DistilBertPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
DONUT_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST = None
class DonutSwinModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DonutSwinPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST = None
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST = None
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST = None
class DPRContextEncoder(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DPRPretrainedContextEncoder(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DPRPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DPRPretrainedQuestionEncoder(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DPRPretrainedReader(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DPRQuestionEncoder(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DPRReader(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
DPT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class DPTForDepthEstimation(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DPTForSemanticSegmentation(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DPTModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DPTPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None
class EfficientFormerForImageClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class EfficientFormerForImageClassificationWithTeacher(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class EfficientFormerModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class EfficientFormerPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST = None
class EfficientNetForImageClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class EfficientNetModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class EfficientNetPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST = None
class ElectraForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ElectraForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ElectraForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ElectraForPreTraining(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ElectraForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ElectraForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ElectraForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ElectraModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ElectraPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def load_tf_weights_in_electra(*args, **kwargs):
requires_backends(load_tf_weights_in_electra, ["torch"])
class EncoderDecoderModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST = None
class ErnieForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ErnieForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ErnieForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ErnieForNextSentencePrediction(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ErnieForPreTraining(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ErnieForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ErnieForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ErnieForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ErnieModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ErniePreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
ERNIE_M_PRETRAINED_MODEL_ARCHIVE_LIST = None
class ErnieMForInformationExtraction(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ErnieMForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ErnieMForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ErnieMForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ErnieMForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ErnieMModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ErnieMPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
ESM_PRETRAINED_MODEL_ARCHIVE_LIST = None
class EsmFoldPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class EsmForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class EsmForProteinFolding(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class EsmForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class EsmForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class EsmModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class EsmPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class FlaubertForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FlaubertForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FlaubertForQuestionAnsweringSimple(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FlaubertForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FlaubertForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FlaubertModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FlaubertPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FlaubertWithLMHeadModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
FLAVA_PRETRAINED_MODEL_ARCHIVE_LIST = None
class FlavaForPreTraining(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FlavaImageCodebook(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FlavaImageModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FlavaModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FlavaMultimodalModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FlavaPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FlavaTextModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
FNET_PRETRAINED_MODEL_ARCHIVE_LIST = None
class FNetForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FNetForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FNetForNextSentencePrediction(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FNetForPreTraining(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FNetForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FNetForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FNetForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FNetLayer(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FNetModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FNetPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FSMTForConditionalGeneration(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FSMTModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class PretrainedFSMTModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST = None
class FunnelBaseModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FunnelForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FunnelForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FunnelForPreTraining(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FunnelForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FunnelForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FunnelForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FunnelModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FunnelPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def load_tf_weights_in_funnel(*args, **kwargs):
requires_backends(load_tf_weights_in_funnel, ["torch"])
GIT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class GitForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class GitModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class GitPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class GitVisionModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST = None
class GLPNForDepthEstimation(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class GLPNModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class GLPNPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
GPT2_PRETRAINED_MODEL_ARCHIVE_LIST = None
class GPT2DoubleHeadsModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class GPT2ForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class GPT2ForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class GPT2LMHeadModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class GPT2Model(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class GPT2PreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def load_tf_weights_in_gpt2(*args, **kwargs):
requires_backends(load_tf_weights_in_gpt2, ["torch"])
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST = None
class GPTNeoForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class GPTNeoForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class GPTNeoModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class GPTNeoPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def load_tf_weights_in_gpt_neo(*args, **kwargs):
requires_backends(load_tf_weights_in_gpt_neo, ["torch"])
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST = None
class GPTNeoXForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class GPTNeoXLayer(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class GPTNeoXModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class GPTNeoXPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST = None
class GPTNeoXJapaneseForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class GPTNeoXJapaneseLayer(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class GPTNeoXJapaneseModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class GPTNeoXJapanesePreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
GPTJ_PRETRAINED_MODEL_ARCHIVE_LIST = None
class GPTJForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class GPTJForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class GPTJForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class GPTJModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class GPTJPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
GPTSAN_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST = None
class GPTSanJapaneseForConditionalGeneration(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class GPTSanJapaneseModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class GPTSanJapanesePreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None
class GraphormerForGraphClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class GraphormerModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class GraphormerPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class GroupViTModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class GroupViTPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class GroupViTTextModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class GroupViTVisionModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
HUBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class HubertForCTC(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class HubertForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class HubertModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class HubertPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class IBertForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class IBertForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class IBertForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class IBertForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class IBertForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class IBertModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class IBertPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
IMAGEGPT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class ImageGPTForCausalImageModeling(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ImageGPTForImageClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ImageGPTModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ImageGPTPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def load_tf_weights_in_imagegpt(*args, **kwargs):
requires_backends(load_tf_weights_in_imagegpt, ["torch"])
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST = None
class JukeboxModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class JukeboxPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class JukeboxPrior(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class JukeboxVQVAE(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST = None
class LayoutLMForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LayoutLMForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LayoutLMForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LayoutLMForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LayoutLMModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LayoutLMPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST = None
class LayoutLMv2ForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LayoutLMv2ForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LayoutLMv2ForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LayoutLMv2Model(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LayoutLMv2PreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST = None
class LayoutLMv3ForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LayoutLMv3ForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LayoutLMv3ForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LayoutLMv3Model(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LayoutLMv3PreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
LED_PRETRAINED_MODEL_ARCHIVE_LIST = None
class LEDForConditionalGeneration(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LEDForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LEDForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LEDModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LEDPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class LevitForImageClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LevitForImageClassificationWithTeacher(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LevitModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LevitPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
LILT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class LiltForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LiltForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LiltForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LiltModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LiltPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LlamaForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LlamaModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LlamaPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None
class LongformerForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LongformerForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LongformerForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LongformerForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LongformerForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LongformerModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LongformerPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LongformerSelfAttention(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST = None
class LongT5EncoderModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LongT5ForConditionalGeneration(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LongT5Model(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LongT5PreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST = None
class LukeForEntityClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LukeForEntityPairClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LukeForEntitySpanClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LukeForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LukeForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LukeForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LukeForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LukeForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LukeModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LukePreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LxmertEncoder(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LxmertForPreTraining(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LxmertForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LxmertModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LxmertPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LxmertVisualFeatureEncoder(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LxmertXLayer(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST = None
class M2M100ForConditionalGeneration(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class M2M100Model(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class M2M100PreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MarianForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MarianModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MarianMTModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
MARKUPLM_PRETRAINED_MODEL_ARCHIVE_LIST = None
class MarkupLMForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MarkupLMForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MarkupLMForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MarkupLMModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MarkupLMPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None
class Mask2FormerForUniversalSegmentation(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class Mask2FormerModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class Mask2FormerPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None
class MaskFormerForInstanceSegmentation(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MaskFormerModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MaskFormerPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MaskFormerSwinBackbone(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MBartForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MBartForConditionalGeneration(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MBartForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MBartForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MBartModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MBartPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class MCTCTForCTC(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MCTCTModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MCTCTPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class MegatronBertForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MegatronBertForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MegatronBertForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MegatronBertForNextSentencePrediction(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MegatronBertForPreTraining(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MegatronBertForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MegatronBertForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MegatronBertForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MegatronBertModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MegatronBertPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MMBTForClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MMBTModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ModalEmbeddings(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class MobileBertForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MobileBertForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MobileBertForNextSentencePrediction(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MobileBertForPreTraining(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MobileBertForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MobileBertForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MobileBertForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MobileBertLayer(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MobileBertModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MobileBertPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def load_tf_weights_in_mobilebert(*args, **kwargs):
requires_backends(load_tf_weights_in_mobilebert, ["torch"])
MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST = None
class MobileNetV1ForImageClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MobileNetV1Model(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MobileNetV1PreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def load_tf_weights_in_mobilenet_v1(*args, **kwargs):
requires_backends(load_tf_weights_in_mobilenet_v1, ["torch"])
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST = None
class MobileNetV2ForImageClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MobileNetV2ForSemanticSegmentation(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MobileNetV2Model(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MobileNetV2PreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def load_tf_weights_in_mobilenet_v2(*args, **kwargs):
requires_backends(load_tf_weights_in_mobilenet_v2, ["torch"])
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class MobileViTForImageClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MobileViTForSemanticSegmentation(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MobileViTModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MobileViTPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
MPNET_PRETRAINED_MODEL_ARCHIVE_LIST = None
class MPNetForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MPNetForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MPNetForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MPNetForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MPNetForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MPNetLayer(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MPNetModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MPNetPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MT5EncoderModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MT5ForConditionalGeneration(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MT5Model(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MT5PreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
MVP_PRETRAINED_MODEL_ARCHIVE_LIST = None
class MvpForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MvpForConditionalGeneration(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MvpForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MvpForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MvpModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MvpPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
NAT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class NatBackbone(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class NatForImageClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class NatModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class NatPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST = None
class NezhaForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class NezhaForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class NezhaForNextSentencePrediction(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class NezhaForPreTraining(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class NezhaForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class NezhaForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class NezhaForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class NezhaModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class NezhaPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None
class NystromformerForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class NystromformerForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class NystromformerForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class NystromformerForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class NystromformerForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class NystromformerLayer(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class NystromformerModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class NystromformerPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
ONEFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None
class OneFormerForUniversalSegmentation(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class OneFormerModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class OneFormerPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class OpenAIGPTDoubleHeadsModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class OpenAIGPTForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class OpenAIGPTLMHeadModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class OpenAIGPTModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class OpenAIGPTPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def load_tf_weights_in_openai_gpt(*args, **kwargs):
requires_backends(load_tf_weights_in_openai_gpt, ["torch"])
OPT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class OPTForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class OPTForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class OPTForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class OPTModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class OPTPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class OwlViTForObjectDetection(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class OwlViTModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class OwlViTPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class OwlViTTextModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class OwlViTVisionModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class PegasusForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class PegasusForConditionalGeneration(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class PegasusModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class PegasusPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST = None
class PegasusXForConditionalGeneration(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class PegasusXModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class PegasusXPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST = None
class PerceiverForImageClassificationConvProcessing(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class PerceiverForImageClassificationFourier(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class PerceiverForImageClassificationLearned(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class PerceiverForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class PerceiverForMultimodalAutoencoding(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class PerceiverForOpticalFlow(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class PerceiverForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class PerceiverLayer(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class PerceiverModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class PerceiverPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST = None
class PLBartForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class PLBartForConditionalGeneration(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class PLBartForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class PLBartModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class PLBartPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None
class PoolFormerForImageClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class PoolFormerModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class PoolFormerPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST = None
class ProphetNetDecoder(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ProphetNetEncoder(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ProphetNetForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ProphetNetForConditionalGeneration(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ProphetNetModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ProphetNetPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
QDQBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class QDQBertForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class QDQBertForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class QDQBertForNextSentencePrediction(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class QDQBertForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class QDQBertForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class QDQBertForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class QDQBertLayer(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class QDQBertLMHeadModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class QDQBertModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class QDQBertPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def load_tf_weights_in_qdqbert(*args, **kwargs):
requires_backends(load_tf_weights_in_qdqbert, ["torch"])
class RagModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RagPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RagSequenceForGeneration(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RagTokenForGeneration(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
REALM_PRETRAINED_MODEL_ARCHIVE_LIST = None
class RealmEmbedder(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RealmForOpenQA(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RealmKnowledgeAugEncoder(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RealmPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RealmReader(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RealmRetriever(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RealmScorer(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def load_tf_weights_in_realm(*args, **kwargs):
requires_backends(load_tf_weights_in_realm, ["torch"])
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None
class ReformerAttention(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ReformerForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ReformerForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ReformerForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ReformerLayer(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ReformerModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ReformerModelWithLMHead(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ReformerPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
REGNET_PRETRAINED_MODEL_ARCHIVE_LIST = None
class RegNetForImageClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RegNetModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RegNetPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class RemBertForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RemBertForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RemBertForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RemBertForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RemBertForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RemBertForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RemBertLayer(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RemBertModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RemBertPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def load_tf_weights_in_rembert(*args, **kwargs):
requires_backends(load_tf_weights_in_rembert, ["torch"])
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST = None
class ResNetBackbone(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ResNetForImageClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ResNetModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ResNetPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
RETRIBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class RetriBertModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RetriBertPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = None
class RobertaForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RobertaForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RobertaForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RobertaForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RobertaForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RobertaForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RobertaModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RobertaPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST = None
class RobertaPreLayerNormForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RobertaPreLayerNormForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RobertaPreLayerNormForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RobertaPreLayerNormForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RobertaPreLayerNormForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RobertaPreLayerNormForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RobertaPreLayerNormModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RobertaPreLayerNormPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class RoCBertForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RoCBertForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RoCBertForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RoCBertForPreTraining(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RoCBertForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RoCBertForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RoCBertForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RoCBertLayer(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RoCBertModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RoCBertPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def load_tf_weights_in_roc_bert(*args, **kwargs):
requires_backends(load_tf_weights_in_roc_bert, ["torch"])
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None
class RoFormerForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RoFormerForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RoFormerForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RoFormerForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RoFormerForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RoFormerForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RoFormerLayer(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RoFormerModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RoFormerPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def load_tf_weights_in_roformer(*args, **kwargs):
requires_backends(load_tf_weights_in_roformer, ["torch"])
SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None
class SegformerDecodeHead(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SegformerForImageClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SegformerForSemanticSegmentation(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SegformerLayer(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SegformerModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SegformerPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
SEW_PRETRAINED_MODEL_ARCHIVE_LIST = None
class SEWForCTC(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SEWForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SEWModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SEWPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
SEW_D_PRETRAINED_MODEL_ARCHIVE_LIST = None
class SEWDForCTC(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SEWDForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SEWDModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SEWDPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SpeechEncoderDecoderModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class Speech2TextForConditionalGeneration(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class Speech2TextModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class Speech2TextPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class Speech2Text2ForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class Speech2Text2PreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST = None
class SpeechT5ForSpeechToSpeech(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SpeechT5ForSpeechToText(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SpeechT5ForTextToSpeech(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SpeechT5HifiGan(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SpeechT5Model(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SpeechT5PreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
SPLINTER_PRETRAINED_MODEL_ARCHIVE_LIST = None
class SplinterForPreTraining(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SplinterForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SplinterLayer(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SplinterModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SplinterPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class SqueezeBertForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SqueezeBertForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SqueezeBertForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SqueezeBertForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SqueezeBertForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SqueezeBertModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SqueezeBertModule(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SqueezeBertPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST = None
class SwinBackbone(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SwinForImageClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SwinForMaskedImageModeling(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SwinModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SwinPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
SWIN2SR_PRETRAINED_MODEL_ARCHIVE_LIST = None
class Swin2SRForImageSuperResolution(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class Swin2SRModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class Swin2SRPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST = None
class Swinv2ForImageClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class Swinv2ForMaskedImageModeling(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class Swinv2Model(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class Swinv2PreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
SWITCH_TRANSFORMERS_PRETRAINED_MODEL_ARCHIVE_LIST = None
class SwitchTransformersEncoderModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SwitchTransformersForConditionalGeneration(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SwitchTransformersModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SwitchTransformersPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SwitchTransformersSparseMLP(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SwitchTransformersTop1Router(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
T5_PRETRAINED_MODEL_ARCHIVE_LIST = None
class T5EncoderModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class T5ForConditionalGeneration(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class T5Model(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class T5PreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def load_tf_weights_in_t5(*args, **kwargs):
requires_backends(load_tf_weights_in_t5, ["torch"])
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST = None
class TapasForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class TapasForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class TapasForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class TapasModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class TapasPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def load_tf_weights_in_tapas(*args, **kwargs):
requires_backends(load_tf_weights_in_tapas, ["torch"])
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None
class TimeSeriesTransformerForPrediction(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class TimeSeriesTransformerModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class TimeSeriesTransformerPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None
class TimesformerForVideoClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class TimesformerModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class TimesformerPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None
class TrajectoryTransformerModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class TrajectoryTransformerPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST = None
class AdaptiveEmbedding(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class TransfoXLForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class TransfoXLLMHeadModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class TransfoXLModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class TransfoXLPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def load_tf_weights_in_transfo_xl(*args, **kwargs):
requires_backends(load_tf_weights_in_transfo_xl, ["torch"])
TROCR_PRETRAINED_MODEL_ARCHIVE_LIST = None
class TrOCRForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class TrOCRPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
TVLT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class TvltForAudioVisualClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class TvltForPreTraining(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class TvltModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class TvltPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST = None
class UniSpeechForCTC(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class UniSpeechForPreTraining(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class UniSpeechForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class UniSpeechModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class UniSpeechPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
UNISPEECH_SAT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class UniSpeechSatForAudioFrameClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class UniSpeechSatForCTC(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class UniSpeechSatForPreTraining(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class UniSpeechSatForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class UniSpeechSatForXVector(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class UniSpeechSatModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class UniSpeechSatPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class UperNetForSemanticSegmentation(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class UperNetPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
VAN_PRETRAINED_MODEL_ARCHIVE_LIST = None
class VanForImageClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class VanModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class VanPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST = None
class VideoMAEForPreTraining(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class VideoMAEForVideoClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class VideoMAEModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class VideoMAEPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
VILT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class ViltForImageAndTextRetrieval(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ViltForImagesAndTextClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ViltForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ViltForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ViltForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ViltLayer(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ViltModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ViltPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class VisionEncoderDecoderModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class VisionTextDualEncoderModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
VISUAL_BERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class VisualBertForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class VisualBertForPreTraining(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class VisualBertForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class VisualBertForRegionToPhraseAlignment(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class VisualBertForVisualReasoning(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class VisualBertLayer(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class VisualBertModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class VisualBertPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
VIT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class ViTForImageClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ViTForMaskedImageModeling(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ViTModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ViTPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST = None
class ViTHybridForImageClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ViTHybridModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ViTHybridPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST = None
class ViTMAEForPreTraining(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ViTMAELayer(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ViTMAEModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ViTMAEPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST = None
class ViTMSNForImageClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ViTMSNModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ViTMSNPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST = None
class Wav2Vec2ForAudioFrameClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class Wav2Vec2ForCTC(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class Wav2Vec2ForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class Wav2Vec2ForPreTraining(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class Wav2Vec2ForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class Wav2Vec2ForXVector(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class Wav2Vec2Model(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class Wav2Vec2PreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
WAV2VEC2_CONFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None
class Wav2Vec2ConformerForAudioFrameClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class Wav2Vec2ConformerForCTC(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class Wav2Vec2ConformerForPreTraining(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class Wav2Vec2ConformerForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class Wav2Vec2ConformerForXVector(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class Wav2Vec2ConformerModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class Wav2Vec2ConformerPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST = None
class WavLMForAudioFrameClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class WavLMForCTC(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class WavLMForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class WavLMForXVector(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class WavLMModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class WavLMPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST = None
class WhisperForConditionalGeneration(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class WhisperModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class WhisperPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST = None
class XCLIPModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XCLIPPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XCLIPTextModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XCLIPVisionModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
XGLM_PRETRAINED_MODEL_ARCHIVE_LIST = None
class XGLMForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XGLMModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XGLMPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
XLM_PRETRAINED_MODEL_ARCHIVE_LIST = None
class XLMForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLMForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLMForQuestionAnsweringSimple(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLMForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLMForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLMModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLMPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLMWithLMHeadModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
XLM_PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST = None
class XLMProphetNetDecoder(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLMProphetNetEncoder(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLMProphetNetForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLMProphetNetForConditionalGeneration(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLMProphetNetModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLMProphetNetPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = None
class XLMRobertaForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLMRobertaForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLMRobertaForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLMRobertaForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLMRobertaForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLMRobertaForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLMRobertaModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLMRobertaPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST = None
class XLMRobertaXLForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLMRobertaXLForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLMRobertaXLForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLMRobertaXLForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLMRobertaXLForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLMRobertaXLForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLMRobertaXLModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLMRobertaXLPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST = None
class XLNetForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLNetForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLNetForQuestionAnsweringSimple(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLNetForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLNetForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLNetLMHeadModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLNetModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLNetPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def load_tf_weights_in_xlnet(*args, **kwargs):
requires_backends(load_tf_weights_in_xlnet, ["torch"])
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST = None
class XmodForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XmodForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XmodForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XmodForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XmodForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XmodForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XmodModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XmodPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST = None
class YolosForObjectDetection(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class YolosModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class YolosPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
YOSO_PRETRAINED_MODEL_ARCHIVE_LIST = None
class YosoForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class YosoForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class YosoForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class YosoForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class YosoForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class YosoLayer(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class YosoModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class YosoPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class Adafactor(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AdamW(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def get_constant_schedule(*args, **kwargs):
requires_backends(get_constant_schedule, ["torch"])
def get_constant_schedule_with_warmup(*args, **kwargs):
requires_backends(get_constant_schedule_with_warmup, ["torch"])
def get_cosine_schedule_with_warmup(*args, **kwargs):
requires_backends(get_cosine_schedule_with_warmup, ["torch"])
def get_cosine_with_hard_restarts_schedule_with_warmup(*args, **kwargs):
requires_backends(get_cosine_with_hard_restarts_schedule_with_warmup, ["torch"])
def get_inverse_sqrt_schedule(*args, **kwargs):
requires_backends(get_inverse_sqrt_schedule, ["torch"])
def get_linear_schedule_with_warmup(*args, **kwargs):
requires_backends(get_linear_schedule_with_warmup, ["torch"])
def get_polynomial_decay_schedule_with_warmup(*args, **kwargs):
requires_backends(get_polynomial_decay_schedule_with_warmup, ["torch"])
def get_scheduler(*args, **kwargs):
requires_backends(get_scheduler, ["torch"])
class Conv1D(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def apply_chunking_to_forward(*args, **kwargs):
requires_backends(apply_chunking_to_forward, ["torch"])
def prune_layer(*args, **kwargs):
requires_backends(prune_layer, ["torch"])
class Trainer(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def torch_distributed_zero_first(*args, **kwargs):
requires_backends(torch_distributed_zero_first, ["torch"])
class Seq2SeqTrainer(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
|
27182812/ChatGLM-LLaMA-chinese-insturct | 4,518 | src/transformers/utils/versions.py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utilities for working with package versions
"""
import operator
import re
import sys
from typing import Optional
from packaging import version
# The package importlib_metadata is in a different place, depending on the python version.
if sys.version_info < (3, 8):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
ops = {
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def _compare_versions(op, got_ver, want_ver, requirement, pkg, hint):
if got_ver is None or want_ver is None:
raise ValueError(
f"Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"
f" reinstalling {pkg}."
)
if not ops[op](version.parse(got_ver), version.parse(want_ver)):
raise ImportError(
f"{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}"
)
def require_version(requirement: str, hint: Optional[str] = None) -> None:
"""
Perform a runtime check of the dependency versions, using the exact same syntax used by pip.
The installed module version comes from the *site-packages* dir via *importlib_metadata*.
Args:
requirement (`str`): pip style definition, e.g., "tokenizers==0.9.4", "tqdm>=4.27", "numpy"
hint (`str`, *optional*): what suggestion to print in case of requirements not being met
Example:
```python
require_version("pandas>1.1.2")
require_version("numpy>1.18.5", "this is important to have for whatever reason")
```"""
hint = f"\n{hint}" if hint is not None else ""
# non-versioned check
if re.match(r"^[\w_\-\d]+$", requirement):
pkg, op, want_ver = requirement, None, None
else:
match = re.findall(r"^([^!=<>\s]+)([\s!=<>]{1,2}.+)", requirement)
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"
f" got {requirement}"
)
pkg, want_full = match[0]
want_range = want_full.split(",") # there could be multiple requirements
wanted = {}
for w in want_range:
match = re.findall(r"^([\s!=<>]{1,2})(.+)", w)
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"
f" but got {requirement}"
)
op, want_ver = match[0]
wanted[op] = want_ver
if op not in ops:
raise ValueError(f"{requirement}: need one of {list(ops.keys())}, but got {op}")
# special case
if pkg == "python":
got_ver = ".".join([str(x) for x in sys.version_info[:3]])
for op, want_ver in wanted.items():
_compare_versions(op, got_ver, want_ver, requirement, pkg, hint)
return
# check if any version is installed
try:
got_ver = importlib_metadata.version(pkg)
except importlib_metadata.PackageNotFoundError:
raise importlib_metadata.PackageNotFoundError(
f"The '{requirement}' distribution was not found and is required by this application. {hint}"
)
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(op, got_ver, want_ver, requirement, pkg, hint)
def require_version_core(requirement):
"""require_version wrapper which emits a core-specific hint on failure"""
hint = "Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"
return require_version(requirement, hint)
|
27182812/ChatGLM-LLaMA-chinese-insturct | 10,069 | src/transformers/utils/dummy_tokenizers_objects.py | # This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
class AlbertTokenizerFast(metaclass=DummyObject):
_backends = ["tokenizers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tokenizers"])
class BartTokenizerFast(metaclass=DummyObject):
_backends = ["tokenizers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tokenizers"])
class BarthezTokenizerFast(metaclass=DummyObject):
_backends = ["tokenizers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tokenizers"])
class BertTokenizerFast(metaclass=DummyObject):
_backends = ["tokenizers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tokenizers"])
class BigBirdTokenizerFast(metaclass=DummyObject):
_backends = ["tokenizers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tokenizers"])
class BlenderbotTokenizerFast(metaclass=DummyObject):
_backends = ["tokenizers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tokenizers"])
class BlenderbotSmallTokenizerFast(metaclass=DummyObject):
_backends = ["tokenizers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tokenizers"])
class BloomTokenizerFast(metaclass=DummyObject):
_backends = ["tokenizers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tokenizers"])
class CamembertTokenizerFast(metaclass=DummyObject):
_backends = ["tokenizers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tokenizers"])
class CLIPTokenizerFast(metaclass=DummyObject):
_backends = ["tokenizers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tokenizers"])
class CodeGenTokenizerFast(metaclass=DummyObject):
_backends = ["tokenizers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tokenizers"])
class ConvBertTokenizerFast(metaclass=DummyObject):
_backends = ["tokenizers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tokenizers"])
class CpmTokenizerFast(metaclass=DummyObject):
_backends = ["tokenizers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tokenizers"])
class DebertaTokenizerFast(metaclass=DummyObject):
_backends = ["tokenizers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tokenizers"])
class DebertaV2TokenizerFast(metaclass=DummyObject):
_backends = ["tokenizers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tokenizers"])
class DistilBertTokenizerFast(metaclass=DummyObject):
_backends = ["tokenizers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tokenizers"])
class DPRContextEncoderTokenizerFast(metaclass=DummyObject):
_backends = ["tokenizers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tokenizers"])
class DPRQuestionEncoderTokenizerFast(metaclass=DummyObject):
_backends = ["tokenizers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tokenizers"])
class DPRReaderTokenizerFast(metaclass=DummyObject):
_backends = ["tokenizers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tokenizers"])
class ElectraTokenizerFast(metaclass=DummyObject):
_backends = ["tokenizers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tokenizers"])
class FNetTokenizerFast(metaclass=DummyObject):
_backends = ["tokenizers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tokenizers"])
class FunnelTokenizerFast(metaclass=DummyObject):
_backends = ["tokenizers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tokenizers"])
class GPT2TokenizerFast(metaclass=DummyObject):
_backends = ["tokenizers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tokenizers"])
class GPTNeoXTokenizerFast(metaclass=DummyObject):
_backends = ["tokenizers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tokenizers"])
class GPTNeoXJapaneseTokenizer(metaclass=DummyObject):
_backends = ["tokenizers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tokenizers"])
class HerbertTokenizerFast(metaclass=DummyObject):
_backends = ["tokenizers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tokenizers"])
class LayoutLMTokenizerFast(metaclass=DummyObject):
_backends = ["tokenizers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tokenizers"])
class LayoutLMv2TokenizerFast(metaclass=DummyObject):
_backends = ["tokenizers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tokenizers"])
class LayoutLMv3TokenizerFast(metaclass=DummyObject):
_backends = ["tokenizers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tokenizers"])
class LayoutXLMTokenizerFast(metaclass=DummyObject):
_backends = ["tokenizers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tokenizers"])
class LEDTokenizerFast(metaclass=DummyObject):
_backends = ["tokenizers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tokenizers"])
class LongformerTokenizerFast(metaclass=DummyObject):
_backends = ["tokenizers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tokenizers"])
class LxmertTokenizerFast(metaclass=DummyObject):
_backends = ["tokenizers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tokenizers"])
class MarkupLMTokenizerFast(metaclass=DummyObject):
_backends = ["tokenizers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tokenizers"])
class MBartTokenizerFast(metaclass=DummyObject):
_backends = ["tokenizers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tokenizers"])
class MBart50TokenizerFast(metaclass=DummyObject):
_backends = ["tokenizers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tokenizers"])
class MobileBertTokenizerFast(metaclass=DummyObject):
_backends = ["tokenizers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tokenizers"])
class MPNetTokenizerFast(metaclass=DummyObject):
_backends = ["tokenizers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tokenizers"])
class MT5TokenizerFast(metaclass=DummyObject):
_backends = ["tokenizers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tokenizers"])
class MvpTokenizerFast(metaclass=DummyObject):
_backends = ["tokenizers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tokenizers"])
class NllbTokenizerFast(metaclass=DummyObject):
_backends = ["tokenizers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tokenizers"])
class OpenAIGPTTokenizerFast(metaclass=DummyObject):
_backends = ["tokenizers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tokenizers"])
class PegasusTokenizerFast(metaclass=DummyObject):
_backends = ["tokenizers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tokenizers"])
class RealmTokenizerFast(metaclass=DummyObject):
_backends = ["tokenizers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tokenizers"])
class ReformerTokenizerFast(metaclass=DummyObject):
_backends = ["tokenizers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tokenizers"])
class RemBertTokenizerFast(metaclass=DummyObject):
_backends = ["tokenizers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tokenizers"])
class RetriBertTokenizerFast(metaclass=DummyObject):
_backends = ["tokenizers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tokenizers"])
class RobertaTokenizerFast(metaclass=DummyObject):
_backends = ["tokenizers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tokenizers"])
class RoFormerTokenizerFast(metaclass=DummyObject):
_backends = ["tokenizers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tokenizers"])
class SplinterTokenizerFast(metaclass=DummyObject):
_backends = ["tokenizers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tokenizers"])
class SqueezeBertTokenizerFast(metaclass=DummyObject):
_backends = ["tokenizers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tokenizers"])
class T5TokenizerFast(metaclass=DummyObject):
_backends = ["tokenizers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tokenizers"])
class WhisperTokenizerFast(metaclass=DummyObject):
_backends = ["tokenizers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tokenizers"])
class XGLMTokenizerFast(metaclass=DummyObject):
_backends = ["tokenizers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tokenizers"])
class XLMRobertaTokenizerFast(metaclass=DummyObject):
_backends = ["tokenizers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tokenizers"])
class XLNetTokenizerFast(metaclass=DummyObject):
_backends = ["tokenizers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tokenizers"])
class PreTrainedTokenizerFast(metaclass=DummyObject):
_backends = ["tokenizers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tokenizers"])
|
274056675/springboot-openai-chatgpt | 9,756 | mng_web/src/views/system/menu.vue | <template>
<basic-container>
<avue-crud :option="option"
:table-loading="loading"
:data="data"
ref="crud"
v-model="form"
:permission="permissionList"
:before-open="beforeOpen"
@row-del="rowDel"
@row-update="rowUpdate"
@row-save="rowSave"
@search-change="searchChange"
@search-reset="searchReset"
@selection-change="selectionChange"
@current-change="currentChange"
@size-change="sizeChange"
@on-load="onLoad">
<template slot="menuLeft">
<el-button type="danger"
size="small"
icon="el-icon-delete"
v-if="permission.menu_delete"
plain
@click="handleDelete">删 除
</el-button>
</template>
<template slot-scope="scope" slot="menu">
<el-button
type="text"
icon="el-icon-circle-plus-outline"
size="small"
@click.stop="handleAdd(scope.row,scope.index)"
v-if="userInfo.authority.includes('admin')"
>新增子项
</el-button>
</template>
<template slot-scope="{row}"
slot="source">
<div style="text-align:center">
<i :class="row.source"></i>
</div>
</template>
</avue-crud>
</basic-container>
</template>
<script>
import {add, getList, getMenu, remove, update} from "@/api/system/menu";
import {mapGetters} from "vuex";
import iconList from "@/config/iconList";
export default {
data() {
return {
form: {},
loading: true,
selectionList: [],
query: {},
page: {
pageSize: 10,
currentPage: 1,
total: 0
},
option: {
searchShow: true,
searchMenuSpan: 6,
tip: false,
tree: true,
border: true,
index: true,
selection: true,
viewBtn: true,
menuWidth: 300,
column: [
{
label: "菜单名称",
prop: "name",
search: true,
rules: [
{
required: true,
message: "请输入菜单名称",
trigger: "blur"
}
]
},
{
label: "路由地址",
prop: "path",
rules: [
{
required: true,
message: "请输入路由地址",
trigger: "blur"
}
]
},
{
label: "上级菜单",
prop: "parentId",
type: "tree",
dicUrl: "/api/blade-system/menu/tree",
hide: true,
props: {
label: "title"
},
rules: [
{
required: false,
message: "请选择上级菜单",
trigger: "click"
}
]
},
{
label: "菜单图标",
prop: "source",
type: "icon",
slot: true,
iconList: iconList,
rules: [
{
required: true,
message: "请输入菜单图标",
trigger: "click"
}
]
},
{
label: "菜单编号",
prop: "code",
search: true,
rules: [
{
required: true,
message: "请输入菜单编号",
trigger: "blur"
}
]
},
{
label: "菜单类型",
prop: "category",
type: "radio",
dicData: [
{
label: "菜单",
value: 1
},
{
label: "按钮",
value: 2
}
],
hide: true,
rules: [
{
required: true,
message: "请选择菜单类型",
trigger: "blur"
}
]
},
{
label: "菜单别名",
prop: "alias",
rules: [
{
required: true,
message: "请输入菜单别名",
trigger: "blur"
}
]
},
{
label: "按钮功能",
prop: "action",
type: "radio",
dicData: [
{
label: "工具栏",
value: 1
},
{
label: "操作栏",
value: 2
},
{
label: "工具操作栏",
value: 3
}
],
hide: true,
rules: [
{
required: true,
message: "请选择按钮功能",
trigger: "blur"
}
]
},
{
label: "菜单排序",
prop: "sort",
type: "number",
rules: [
{
required: true,
message: "请输入菜单排序",
trigger: "blur"
}
]
},
{
label: "新窗口",
prop: "isOpen",
type: "radio",
dicData: [
{
label: "否",
value: 0
},
{
label: "是",
value: 1
},
],
hide: true
},
{
label: "菜单备注",
prop: "remark",
type: "textarea",
span: 24,
minRows: 6,
hide: true
}
]
},
data: []
};
},
computed: {
...mapGetters(["userInfo", "permission"]),
permissionList() {
return {
addBtn: this.vaildData(this.permission.menu_add, false),
viewBtn: this.vaildData(this.permission.menu_view, false),
delBtn: this.vaildData(this.permission.menu_delete, false),
editBtn: this.vaildData(this.permission.menu_edit, false)
};
},
ids() {
let ids = [];
this.selectionList.forEach(ele => {
ids.push(ele.id);
});
return ids.join(",");
}
},
methods: {
handleAdd(row) {
this.$refs.crud.value.parentId = row.id;
this.$refs.crud.option.column.filter(item => {
if (item.prop === "parentId") {
item.value = row.id;
item.addDisabled = true;
}
});
this.$refs.crud.rowAdd();
},
rowSave(row, done, loading) {
add(row).then(() => {
done();
this.onLoad(this.page);
this.$message({
type: "success",
message: "操作成功!"
});
}, error => {
window.console.log(error);
loading();
});
},
rowUpdate(row, index, done, loading) {
update(row).then(() => {
done();
this.onLoad(this.page);
this.$message({
type: "success",
message: "操作成功!"
});
}, error => {
window.console.log(error);
loading();
});
},
rowDel(row) {
this.$confirm("确定将选择数据删除?", {
confirmButtonText: "确定",
cancelButtonText: "取消",
type: "warning"
})
.then(() => {
return remove(row.id);
})
.then(() => {
this.onLoad(this.page);
this.$message({
type: "success",
message: "操作成功!"
});
});
},
searchReset() {
this.query = {};
this.onLoad(this.page);
},
searchChange(params, done) {
this.query = params;
this.page.currentPage = 1;
this.onLoad(this.page, params);
done();
},
selectionChange(list) {
this.selectionList = list;
},
handleDelete() {
if (this.selectionList.length === 0) {
this.$message.warning("请选择至少一条数据");
return;
}
this.$confirm("确定将选择数据删除?", {
confirmButtonText: "确定",
cancelButtonText: "取消",
type: "warning"
})
.then(() => {
return remove(this.ids);
})
.then(() => {
this.onLoad(this.page);
this.$message({
type: "success",
message: "操作成功!"
});
this.$refs.crud.toggleSelection();
});
},
beforeOpen(done, type) {
if (["edit", "view"].includes(type)) {
getMenu(this.form.id).then(res => {
this.form = res.data.data;
});
}
done();
},
currentChange(currentPage){
this.page.currentPage = currentPage;
},
sizeChange(pageSize){
this.page.pageSize = pageSize;
},
onLoad(page, params = {}) {
this.loading = true;
getList(page.currentPage, page.pageSize, Object.assign(params, this.query)).then(res => {
this.loading = false;
this.data = res.data.data;
});
}
}
};
</script>
<style>
</style>
|
274056675/springboot-openai-chatgpt | 17,687 | mng_web/src/views/system/user.vue | <template>
<basic-container>
<avue-crud :option="option"
:table-loading="loading"
:data="data"
ref="crud"
v-model="form"
:permission="permissionList"
:search.sync="search"
@row-del="rowDel"
@row-update="rowUpdate"
@row-save="rowSave"
:before-open="beforeOpen"
:page="page"
@search-change="searchChange"
@search-reset="searchReset"
@selection-change="selectionChange"
@current-change="currentChange"
@size-change="sizeChange"
@refresh-change="refreshChange"
@on-load="onLoad">
<template slot="menuLeft">
<el-button type="danger"
size="small"
icon="el-icon-delete"
plain
v-if="permission.user_delete"
@click="handleDelete">删 除
</el-button>
<el-button type="info"
size="small"
plain
v-if="userInfo.authority.includes('admin')"
icon="el-icon-user"
@click="handleGrant">角色配置
</el-button>
<el-button type="primary"
size="small"
plain
v-if="permission.user_reset"
icon="el-icon-refresh"
@click="handleReset">密码重置
</el-button>
<el-button type="success"
size="small"
plain
v-if="userInfo.authority.includes('admin')"
icon="el-icon-upload2"
@click="handleImport">导入
</el-button>
<el-button type="warning"
size="small"
plain
v-if="userInfo.authority.includes('admin')"
icon="el-icon-download"
@click="handleExport">导出
</el-button>
</template>
<template slot-scope="{row}"
slot="roleId">
<el-tag>{{row.roleName}}</el-tag>
</template>
<template slot-scope="{row}"
slot="deptId">
<el-tag>{{row.deptName}}</el-tag>
</template>
</avue-crud>
<el-dialog title="用户角色配置"
append-to-body
:visible.sync="roleBox"
width="345px">
<el-tree :data="roleGrantList"
show-checkbox
default-expand-all
node-key="id"
ref="treeRole"
:default-checked-keys="roleTreeObj"
:props="props">
</el-tree>
<span slot="footer" class="dialog-footer">
<el-button @click="roleBox = false">取 消</el-button>
<el-button type="primary"
@click="submitRole">确 定</el-button>
</span>
</el-dialog>
<el-dialog title="用户数据导入"
append-to-body
:visible.sync="excelBox"
width="555px">
<avue-form :option="excelOption" v-model="excelForm" :upload-after="uploadAfter">
<template slot="excelTemplate">
<el-button type="primary" @click="handleTemplate()">
点击下载<i class="el-icon-download el-icon--right"></i>
</el-button>
</template>
</avue-form>
</el-dialog>
</basic-container>
</template>
<script>
import {
getList,
getUser,
remove,
update,
add,
grant,
resetPassword
} from "@/api/system/user";
import {getDeptTree} from "@/api/system/dept";
import {getRoleTree} from "@/api/system/role";
import {getPostList} from "@/api/system/post";
import {mapGetters} from "vuex";
import website from '@/config/website';
import {getToken} from '@/util/auth';
import func from "@/util/func";
export default {
data() {
const validatePass = (rule, value, callback) => {
if (value === '') {
callback(new Error('请输入密码'));
} else {
callback();
}
};
const validatePass2 = (rule, value, callback) => {
if (value === '') {
callback(new Error('请再次输入密码'));
} else if (value !== this.form.password) {
callback(new Error('两次输入密码不一致!'));
} else {
callback();
}
};
return {
form: {},
search:{},
roleBox: false,
excelBox: false,
loading: true,
selectionList: [],
query: {},
page: {
pageSize: 10,
currentPage: 1,
total: 0
},
init: {
roleTree: [],
deptTree: [],
},
props: {
label: "title",
value: "key"
},
roleGrantList: [],
roleTreeObj: [],
option: {
height: 'auto',
calcHeight: 210,
searchShow: true,
searchMenuSpan: 6,
tip: false,
border: true,
index: true,
selection: true,
viewBtn: true,
column: [
{
label: "登录账号",
prop: "account",
search: true,
rules: [{
required: true,
message: "请输入登录账号",
trigger: "blur"
}],
span: website.tenantMode ? 12 : 24,
},
{
label: "所属租户",
prop: "tenantId",
type: "tree",
dicUrl: "/api/blade-system/tenant/select",
props: {
label: "tenantName",
value: "tenantId"
},
hide: !website.tenantMode,
addDisplay: website.tenantMode,
editDisplay: website.tenantMode,
viewDisplay: website.tenantMode,
search: false,
rules: [{
required: true,
message: "请输入所属租户",
trigger: "click"
}]
},
{
label: '密码',
prop: 'password',
hide: true,
editDisplay: false,
viewDisplay: false,
rules: [{required: true, validator: validatePass, trigger: 'blur'}]
},
{
label: '确认密码',
prop: 'password2',
hide: true,
editDisplay: false,
viewDisplay: false,
rules: [{required: true, validator: validatePass2, trigger: 'blur'}]
},
{
label: "用户昵称",
prop: "name",
rules: [{
required: true,
message: "请输入用户昵称",
trigger: "blur"
}]
},
{
label: "用户姓名",
prop: "realName",
search: true,
rules: [{
required: true,
message: "请输入用户姓名",
trigger: "blur"
}]
},
{
label: "所属角色",
prop: "roleId",
multiple: true,
type: "tree",
dicData: [],
props: {
label: "title"
},
slot: true,
checkStrictly: true,
rules: [{
required: true,
message: "请选择所属角色",
trigger: "click"
}]
},
{
label: "所属部门",
prop: "deptId",
type: "tree",
multiple: true,
dicData: [],
props: {
label: "title"
},
slot: true,
checkStrictly: true,
rules: [{
required: true,
message: "请选择所属部门",
trigger: "click"
}]
},
{
label: "用户编号",
prop: "code",
hide: true,
},
{
label: "所属岗位",
prop: "postId",
type: "tree",
multiple: true,
dicData: [],
hide: true,
props: {
label: "postName",
value: "id"
},
rules: [{
required: true,
message: "请选择所属岗位",
trigger: "click"
}],
},
{
label: "手机号码",
prop: "phone",
overHidden: true
},
{
label: "电子邮箱",
prop: "email",
hide: true,
overHidden: true
},
{
label: "用户性别",
prop: "sex",
type: "select",
dicData: [
{
label: "男",
value: 1
},
{
label: "女",
value: 2
},
{
label: "未知",
value: 3
}
],
hide: true
},
{
label: "用户生日",
type: "date",
prop: "birthday",
format: "yyyy-MM-dd hh:mm:ss",
valueFormat: "yyyy-MM-dd hh:mm:ss",
hide: true
},
{
label: "账号状态",
prop: "statusName",
hide: true,
display: false
}
]
},
data: [],
excelForm: {},
excelOption: {
submitBtn: false,
emptyBtn: false,
column: [
{
label: '模板上传',
prop: 'excelFile',
type: 'upload',
drag: true,
loadText: '模板上传中,请稍等',
span: 24,
propsHttp: {
res: 'data'
},
tip: '请上传 .xls,.xlsx 标准格式文件',
action: "/api/blade-user/import-user"
},
{
label: '模板下载',
prop: 'excelTemplate',
formslot: true,
span: 24,
}
]
}
};
},
watch: {
'form.tenantId'() {
if (this.form.tenantId !== '') {
getDeptTree(this.form.tenantId).then(res => {
const column = this.findObject(this.option.column, "deptId");
column.dicData = res.data.data;
});
getRoleTree(this.form.tenantId).then(res => {
const column = this.findObject(this.option.column, "roleId");
column.dicData = res.data.data;
});
getPostList(this.form.tenantId).then(res => {
const column = this.findObject(this.option.column, "postId");
column.dicData = res.data.data;
});
}
},
},
computed: {
...mapGetters(["userInfo", "permission"]),
permissionList() {
return {
addBtn: this.vaildData(this.permission.user_add, false),
viewBtn: this.vaildData(this.permission.user_view, false),
delBtn: this.vaildData(this.permission.user_delete, false),
editBtn: this.vaildData(this.permission.user_edit, false)
};
},
ids() {
let ids = [];
this.selectionList.forEach(ele => {
ids.push(ele.id);
});
return ids.join(",");
},
},
methods: {
submitRole() {
const roleList = this.$refs.treeRole.getCheckedKeys().join(",");
grant(this.ids, roleList).then(() => {
this.roleBox = false;
this.$message({
type: "success",
message: "操作成功!"
});
this.onLoad(this.page);
});
},
rowSave(row, done, loading) {
row.deptId = func.join(row.deptId);
row.roleId = func.join(row.roleId);
row.postId = func.join(row.postId);
add(row).then(() => {
done();
this.onLoad(this.page);
this.$message({
type: "success",
message: "操作成功!"
});
}, error => {
window.console.log(error);
loading();
});
},
rowUpdate(row, index, done, loading) {
row.deptId = func.join(row.deptId);
row.roleId = func.join(row.roleId);
row.postId = func.join(row.postId);
update(row).then(() => {
done();
this.onLoad(this.page);
this.$message({
type: "success",
message: "操作成功!"
});
}, error => {
window.console.log(error);
loading();
});
},
rowDel(row) {
this.$confirm("确定将选择数据删除?", {
confirmButtonText: "确定",
cancelButtonText: "取消",
type: "warning"
})
.then(() => {
return remove(row.id);
})
.then(() => {
this.onLoad(this.page);
this.$message({
type: "success",
message: "操作成功!"
});
});
},
searchReset() {
this.query = {};
this.onLoad(this.page);
},
searchChange(params, done) {
this.query = params;
this.page.currentPage = 1;
this.onLoad(this.page, params);
done();
},
selectionChange(list) {
this.selectionList = list;
},
handleDelete() {
if (this.selectionList.length === 0) {
this.$message.warning("请选择至少一条数据");
return;
}
this.$confirm("确定将选择数据删除?", {
confirmButtonText: "确定",
cancelButtonText: "取消",
type: "warning"
})
.then(() => {
return remove(this.ids);
})
.then(() => {
this.onLoad(this.page);
this.$message({
type: "success",
message: "操作成功!"
});
this.$refs.crud.toggleSelection();
});
},
handleReset() {
if (this.selectionList.length === 0) {
this.$message.warning("请选择至少一条数据");
return;
}
this.$confirm("确定将选择账号密码重置为123456?", {
confirmButtonText: "确定",
cancelButtonText: "取消",
type: "warning"
})
.then(() => {
return resetPassword(this.ids);
})
.then(() => {
this.$message({
type: "success",
message: "操作成功!"
});
this.$refs.crud.toggleSelection();
});
},
handleGrant() {
if (this.selectionList.length === 0) {
this.$message.warning("请选择至少一条数据");
return;
}
this.roleTreeObj = [];
if (this.selectionList.length === 1) {
this.roleTreeObj = this.selectionList[0].roleId.split(",");
}
getRoleTree().then(res => {
this.roleGrantList = res.data.data;
this.roleBox = true;
});
},
handleImport() {
this.excelBox = true;
},
uploadAfter(res, done, loading, column) {
window.console.log(column);
done();
this.excelBox = false;
this.refreshChange();
},
handleExport() {
this.$confirm("是否导出用户数据?", "提示", {
confirmButtonText: "确定",
cancelButtonText: "取消",
type: "warning"
}).then(() => {
window.open(`/api/blade-user/export-user?blade-auth=${getToken()}&account=${this.search.account}&realName=${this.search.realName}`);
});
},
handleTemplate() {
window.open(`/api/blade-user/export-template?blade-auth=${getToken()}`);
},
beforeOpen(done, type) {
if (["edit", "view"].includes(type)) {
getUser(this.form.id).then(res => {
this.form = res.data.data;
if(this.form.hasOwnProperty("deptId")){
this.form.deptId = this.form.deptId.split(",");
}
if(this.form.hasOwnProperty("roleId")){
this.form.roleId = this.form.roleId.split(",");
}
if(this.form.hasOwnProperty("postId")){
this.form.postId = this.form.postId.split(",");
}
});
}
done();
},
currentChange(currentPage) {
this.page.currentPage = currentPage;
},
sizeChange(pageSize) {
this.page.pageSize = pageSize;
},
refreshChange() {
this.onLoad(this.page, this.query);
},
onLoad(page, params = {}) {
this.loading = true;
getList(page.currentPage, page.pageSize, Object.assign(params, this.query)).then(res => {
const data = res.data.data;
this.page.total = data.total;
this.data = data.records;
this.loading = false;
});
getDeptTree(this.form.tenantId).then(res => {
const column = this.findObject(this.option.column, "deptId");
column.dicData = res.data.data;
});
getRoleTree(this.form.tenantId).then(res => {
const column = this.findObject(this.option.column, "roleId");
column.dicData = res.data.data;
});
getPostList(this.form.tenantId).then(res => {
const column = this.findObject(this.option.column, "postId");
column.dicData = res.data.data;
});
}
}
};
</script>
<style>
</style>
|
27182812/ChatGLM-LLaMA-chinese-insturct | 12,068 | src/transformers/utils/dummy_vision_objects.py | # This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
class ImageProcessingMixin(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class ImageFeatureExtractionMixin(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class BeitFeatureExtractor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class BeitImageProcessor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class BitImageProcessor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class BlipImageProcessor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class BridgeTowerImageProcessor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class ChineseCLIPFeatureExtractor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class ChineseCLIPImageProcessor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class CLIPFeatureExtractor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class CLIPImageProcessor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class ConditionalDetrFeatureExtractor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class ConditionalDetrImageProcessor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class ConvNextFeatureExtractor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class ConvNextImageProcessor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class DeformableDetrFeatureExtractor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class DeformableDetrImageProcessor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class DeiTFeatureExtractor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class DeiTImageProcessor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class DetaImageProcessor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class DetrFeatureExtractor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class DetrImageProcessor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class DonutFeatureExtractor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class DonutImageProcessor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class DPTFeatureExtractor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class DPTImageProcessor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class EfficientFormerImageProcessor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class EfficientNetImageProcessor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class FlavaFeatureExtractor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class FlavaImageProcessor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class FlavaProcessor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class GLPNFeatureExtractor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class GLPNImageProcessor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class ImageGPTFeatureExtractor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class ImageGPTImageProcessor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class LayoutLMv2FeatureExtractor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class LayoutLMv2ImageProcessor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class LayoutLMv3FeatureExtractor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class LayoutLMv3ImageProcessor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class LevitFeatureExtractor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class LevitImageProcessor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class Mask2FormerImageProcessor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class MaskFormerFeatureExtractor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class MaskFormerImageProcessor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class MobileNetV1FeatureExtractor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class MobileNetV1ImageProcessor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class MobileNetV2FeatureExtractor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class MobileNetV2ImageProcessor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class MobileViTFeatureExtractor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class MobileViTImageProcessor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class OneFormerImageProcessor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class OwlViTFeatureExtractor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class OwlViTImageProcessor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class PerceiverFeatureExtractor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class PerceiverImageProcessor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class PoolFormerFeatureExtractor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class PoolFormerImageProcessor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class SegformerFeatureExtractor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class SegformerImageProcessor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class Swin2SRImageProcessor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class TvltImageProcessor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class VideoMAEFeatureExtractor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class VideoMAEImageProcessor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class ViltFeatureExtractor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class ViltImageProcessor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class ViltProcessor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class ViTFeatureExtractor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class ViTImageProcessor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class ViTHybridImageProcessor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class YolosFeatureExtractor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
class YolosImageProcessor(metaclass=DummyObject):
_backends = ["vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["vision"])
|
27182812/ChatGLM-LLaMA-chinese-insturct | 8,512 | src/transformers/utils/bitsandbytes.py | from copy import deepcopy
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
def set_module_8bit_tensor_to_device(module, tensor_name, device, value=None):
"""
A helper function to set a given tensor (parameter of buffer) of a module on a specific device (note that doing
`param.to(device)` creates a new tensor not linked to the parameter, which is why we need this function). The
function is adapted from `set_module_tensor_to_device` function from accelerate that is adapted to support the
class `Int8Params` from `bitsandbytes`.
Args:
module (`torch.nn.Module`):
The module in which the tensor we want to move lives.
tensor_name (`str`):
The full name of the parameter/buffer.
device (`int`, `str` or `torch.device`):
The device on which to set the tensor.
value (`torch.Tensor`, *optional*):
The value of the tensor (useful when going from the meta device to any other device).
"""
# Recurse if needed
if "." in tensor_name:
splits = tensor_name.split(".")
for split in splits[:-1]:
new_module = getattr(module, split)
if new_module is None:
raise ValueError(f"{module} has no attribute {split}.")
module = new_module
tensor_name = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f"{module} does not have a parameter or a buffer named {tensor_name}.")
is_buffer = tensor_name in module._buffers
old_value = getattr(module, tensor_name)
if old_value.device == torch.device("meta") and device not in ["meta", torch.device("meta")] and value is None:
raise ValueError(f"{tensor_name} is on the meta device, we need a `value` to put in on {device}.")
if is_buffer:
has_fp16_weights = None
else:
has_fp16_weights = getattr(module._parameters[tensor_name], "has_fp16_weights", None)
if has_fp16_weights is not None:
param = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
new_value = old_value.to(device)
elif isinstance(value, torch.Tensor):
new_value = value.to("cpu")
if value.dtype == torch.int8:
raise ValueError(
"You cannot load weights that are saved in int8 using `load_in_8bit=True`, make sure you are",
" using `load_in_8bit=True` on float32/float16/bfloat16 weights.",
)
else:
new_value = torch.tensor(value, device="cpu")
new_value = bnb.nn.Int8Params(new_value, requires_grad=False, has_fp16_weights=has_fp16_weights).to(device)
module._parameters[tensor_name] = new_value
else:
if value is None:
new_value = old_value.to(device)
elif isinstance(value, torch.Tensor):
new_value = value.to(device)
else:
new_value = torch.tensor(value, device=device)
if is_buffer:
module._buffers[tensor_name] = new_value
else:
new_value = nn.Parameter(new_value, requires_grad=old_value.requires_grad)
module._parameters[tensor_name] = new_value
def replace_8bit_linear(model, threshold=6.0, modules_to_not_convert="lm_head", current_key_name=None):
"""
A helper function to replace all `torch.nn.Linear` modules by `bnb.nn.Linear8bit` modules from the `bitsandbytes`
library. This will enable running your models using mixed int8 precision as described by the paper `GPT3.int8():
8-bit Matrix Multiplication for Transformers at Scale`. Make sure `bitsandbytes` compiled with the correct CUDA
version of your hardware is installed before running this function. `pip install -i https://test.pypi.org/simple/
bitsandbytes`
The function will be run recursively and replace all `torch.nn.Linear` modules except for the `lm_head` that should
be kept as a `torch.nn.Linear` module. The replacement is done under `init_empty_weights` context manager so no
CPU/GPU memory is required to run this function. Int8 mixed-precision matrix decomposition works by separating a
matrix multiplication into two streams: (1) and systematic feature outlier stream matrix multiplied in fp16
(0.01%), (2) a regular stream of int8 matrix multiplication (99.9%). With this method, int8 inference with no
predictive degradation is possible for very large models (>=176B parameters).
Parameters:
model (`torch.nn.Module`):
Input model or `torch.nn.Module` as the function is run recursively.
threshold (`float`, *optional*, defaults to 6.0):
`int8_threshold` for outlier detection as described in the formentioned paper. This parameters is set to
`6.0` as described by the paper.
modules_to_not_convert (`str`, *optional*, defaults to `lm_head`):
Name of the module to not convert in `Linear8bitLt`. In practice we keep the `lm_head` in full precision
for numerical stability reasons.
current_key_name (`List[`str`]`, *optional*):
An array to track the current key of the recursion. This is used to check whether the current key (part of
it) is not in the list of modules to not convert (for instances modules that are offloaded to `cpu` or
`disk`).
"""
for name, module in model.named_children():
if current_key_name is None:
current_key_name = []
current_key_name.append(name)
if len(list(module.children())) > 0:
replace_8bit_linear(module, threshold, modules_to_not_convert, current_key_name)
if isinstance(module, nn.Linear) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in ".".join(current_key_name) for key in modules_to_not_convert):
with init_empty_weights():
model._modules[name] = bnb.nn.Linear8bitLt(
module.in_features,
module.out_features,
module.bias is not None,
has_fp16_weights=False,
threshold=threshold,
)
# Remove the last key for recursion
current_key_name.pop(-1)
return model
def get_keys_to_not_convert(model):
r"""
An utility function to get the key of the module to keep in full precision if any For example for CausalLM modules
we may want to keep the lm_head in full precision for numerical stability reasons. For other architectures, we want
to keep the tied weights of the model. The function will return a list of the keys of the modules to not convert in
int8.
Parameters:
model (`torch.nn.Module`):
Input model
"""
# Create a copy of the model and tie the weights, then
# check if it contains tied weights
tied_model = deepcopy(model) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
tied_keys = list(find_tied_parameters(tied_model).values())
has_tied_params = len(tied_keys) > 0
# Check if it is a base model
is_base_model = not hasattr(model, model.base_model_prefix)
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
list_modules = list(model.named_parameters())
list_last_module = [list_modules[-1][0]]
# add last module together with tied weights
intersection = set(list_last_module) - set(tied_keys)
list_untouched = tied_keys + list(intersection)
# remove ".weight" from the keys
names_to_remove = [".weight", ".bias"]
filtered_module_names = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
name = name.replace(name_to_remove, "")
filtered_module_names.append(name)
return filtered_module_names
|
27182812/ChatGLM-LLaMA-chinese-insturct | 14,831 | src/transformers/utils/notebook.py | # coding=utf-8
# Copyright 2020 Hugging Face
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def format_time(t):
"Format `t` (in seconds) to (h):mm:ss"
t = int(t)
h, m, s = t // 3600, (t // 60) % 60, t % 60
return f"{h}:{m:02d}:{s:02d}" if h != 0 else f"{m:02d}:{s:02d}"
def html_progress_bar(value, total, prefix, label, width=300):
# docstyle-ignore
return f"""
<div>
{prefix}
<progress value='{value}' max='{total}' style='width:{width}px; height:20px; vertical-align: middle;'></progress>
{label}
</div>
"""
def text_to_html_table(items):
"Put the texts in `items` in an HTML table."
html_code = """<table border="1" class="dataframe">\n"""
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f" <th>{i}</th>\n"
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
elt = f"{elt:.6f}" if isinstance(elt, float) else str(elt)
html_code += f" <td>{elt}</td>\n"
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class NotebookProgressBar:
"""
A progress par for display in a notebook.
Class attributes (overridden by derived classes)
- **warmup** (`int`) -- The number of iterations to do at the beginning while ignoring `update_every`.
- **update_every** (`float`) -- Since calling the time takes some time, we only do it every presumed
`update_every` seconds. The progress bar uses the average time passed up until now to guess the next value
for which it will call the update.
Args:
total (`int`):
The total number of iterations to reach.
prefix (`str`, *optional*):
A prefix to add before the progress bar.
leave (`bool`, *optional*, defaults to `True`):
Whether or not to leave the progress bar once it's completed. You can always call the
[`~utils.notebook.NotebookProgressBar.close`] method to make the bar disappear.
parent ([`~notebook.NotebookTrainingTracker`], *optional*):
A parent object (like [`~utils.notebook.NotebookTrainingTracker`]) that spawns progress bars and handle
their display. If set, the object passed must have a `display()` method.
width (`int`, *optional*, defaults to 300):
The width (in pixels) that the bar will take.
Example:
```python
import time
pbar = NotebookProgressBar(100)
for val in range(100):
pbar.update(val)
time.sleep(0.07)
pbar.update(100)
```"""
warmup = 5
update_every = 0.2
def __init__(
self,
total: int,
prefix: Optional[str] = None,
leave: bool = True,
parent: Optional["NotebookTrainingTracker"] = None,
width: int = 300,
):
self.total = total
self.prefix = "" if prefix is None else prefix
self.leave = leave
self.parent = parent
self.width = width
self.last_value = None
self.comment = None
self.output = None
def update(self, value: int, force_update: bool = False, comment: str = None):
"""
The main method to update the progress bar to `value`.
Args:
value (`int`):
The value to use. Must be between 0 and `total`.
force_update (`bool`, *optional*, defaults to `False`):
Whether or not to force and update of the internal state and display (by default, the bar will wait for
`value` to reach the value it predicted corresponds to a time of more than the `update_every` attribute
since the last update to avoid adding boilerplate).
comment (`str`, *optional*):
A comment to add on the left of the progress bar.
"""
self.value = value
if comment is not None:
self.comment = comment
if self.last_value is None:
self.start_time = self.last_time = time.time()
self.start_value = self.last_value = value
self.elapsed_time = self.predicted_remaining = None
self.first_calls = self.warmup
self.wait_for = 1
self.update_bar(value)
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for, self.total):
if self.first_calls > 0:
self.first_calls -= 1
current_time = time.time()
self.elapsed_time = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
self.average_time_per_item = self.elapsed_time / (value - self.start_value)
else:
self.average_time_per_item = None
if value >= self.total:
value = self.total
self.predicted_remaining = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
self.predicted_remaining = self.average_time_per_item * (self.total - value)
self.update_bar(value)
self.last_value = value
self.last_time = current_time
if self.average_time_per_item is None:
self.wait_for = 1
else:
self.wait_for = max(int(self.update_every / self.average_time_per_item), 1)
def update_bar(self, value, comment=None):
spaced_value = " " * (len(str(self.total)) - len(str(value))) + str(value)
if self.elapsed_time is None:
self.label = f"[{spaced_value}/{self.total} : < :"
elif self.predicted_remaining is None:
self.label = f"[{spaced_value}/{self.total} {format_time(self.elapsed_time)}"
else:
self.label = (
f"[{spaced_value}/{self.total} {format_time(self.elapsed_time)} <"
f" {format_time(self.predicted_remaining)}"
)
self.label += f", {1/self.average_time_per_item:.2f} it/s"
self.label += "]" if self.comment is None or len(self.comment) == 0 else f", {self.comment}]"
self.display()
def display(self):
self.html_code = html_progress_bar(self.value, self.total, self.prefix, self.label, self.width)
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
self.output = disp.display(disp.HTML(self.html_code), display_id=True)
else:
self.output.update(disp.HTML(self.html_code))
def close(self):
"Closes the progress bar."
if self.parent is None and self.output is not None:
self.output.update(disp.HTML(""))
class NotebookTrainingTracker(NotebookProgressBar):
"""
An object tracking the updates of an ongoing training with progress bars and a nice table reporting metrics.
Args:
num_steps (`int`): The number of steps during training. column_names (`List[str]`, *optional*):
The list of column names for the metrics table (will be inferred from the first call to
[`~utils.notebook.NotebookTrainingTracker.write_line`] if not set).
"""
def __init__(self, num_steps, column_names=None):
super().__init__(num_steps)
self.inner_table = None if column_names is None else [column_names]
self.child_bar = None
def display(self):
self.html_code = html_progress_bar(self.value, self.total, self.prefix, self.label, self.width)
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table)
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
self.output = disp.display(disp.HTML(self.html_code), display_id=True)
else:
self.output.update(disp.HTML(self.html_code))
def write_line(self, values):
"""
Write the values in the inner table.
Args:
values (`Dict[str, float]`): The values to display.
"""
if self.inner_table is None:
self.inner_table = [list(values.keys()), list(values.values())]
else:
columns = self.inner_table[0]
if len(self.inner_table) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(key)
self.inner_table[0] = columns
self.inner_table.append([values[c] for c in columns])
def add_child(self, total, prefix=None, width=300):
"""
Add a child progress bar displayed under the table of metrics. The child progress bar is returned (so it can be
easily updated).
Args:
total (`int`): The number of iterations for the child progress bar.
prefix (`str`, *optional*): A prefix to write on the left of the progress bar.
width (`int`, *optional*, defaults to 300): The width (in pixels) of the progress bar.
"""
self.child_bar = NotebookProgressBar(total, prefix=prefix, parent=self, width=width)
return self.child_bar
def remove_child(self):
"""
Closes the child progress bar.
"""
self.child_bar = None
self.display()
class NotebookProgressCallback(TrainerCallback):
"""
A [`TrainerCallback`] that displays the progress of training or evaluation, optimized for Jupyter Notebooks or
Google colab.
"""
def __init__(self):
self.training_tracker = None
self.prediction_bar = None
self._force_next_update = False
def on_train_begin(self, args, state, control, **kwargs):
self.first_column = "Epoch" if args.evaluation_strategy == IntervalStrategy.EPOCH else "Step"
self.training_loss = 0
self.last_log = 0
column_names = [self.first_column] + ["Training Loss"]
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append("Validation Loss")
self.training_tracker = NotebookTrainingTracker(state.max_steps, column_names)
def on_step_end(self, args, state, control, **kwargs):
epoch = int(state.epoch) if int(state.epoch) == state.epoch else f"{state.epoch:.2f}"
self.training_tracker.update(
state.global_step + 1,
comment=f"Epoch {epoch}/{state.num_train_epochs}",
force_update=self._force_next_update,
)
self._force_next_update = False
def on_prediction_step(self, args, state, control, eval_dataloader=None, **kwargs):
if not has_length(eval_dataloader):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
self.prediction_bar = self.training_tracker.add_child(len(eval_dataloader))
else:
self.prediction_bar = NotebookProgressBar(len(eval_dataloader))
self.prediction_bar.update(1)
else:
self.prediction_bar.update(self.prediction_bar.value + 1)
def on_predict(self, args, state, control, **kwargs):
if self.prediction_bar is not None:
self.prediction_bar.close()
self.prediction_bar = None
def on_log(self, args, state, control, logs=None, **kwargs):
# Only for when there is no evaluation
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
values = {"Training Loss": logs["loss"]}
# First column is necessarily Step sine we're not in epoch eval strategy
values["Step"] = state.global_step
self.training_tracker.write_line(values)
def on_evaluate(self, args, state, control, metrics=None, **kwargs):
if self.training_tracker is not None:
values = {"Training Loss": "No log", "Validation Loss": "No log"}
for log in reversed(state.log_history):
if "loss" in log:
values["Training Loss"] = log["loss"]
break
if self.first_column == "Epoch":
values["Epoch"] = int(state.epoch)
else:
values["Step"] = state.global_step
metric_key_prefix = "eval"
for k in metrics:
if k.endswith("_loss"):
metric_key_prefix = re.sub(r"\_loss$", "", k)
_ = metrics.pop("total_flos", None)
_ = metrics.pop("epoch", None)
_ = metrics.pop(f"{metric_key_prefix}_runtime", None)
_ = metrics.pop(f"{metric_key_prefix}_samples_per_second", None)
_ = metrics.pop(f"{metric_key_prefix}_steps_per_second", None)
_ = metrics.pop(f"{metric_key_prefix}_jit_compilation_time", None)
for k, v in metrics.items():
if k == f"{metric_key_prefix}_loss":
values["Validation Loss"] = v
else:
splits = k.split("_")
name = " ".join([part.capitalize() for part in splits[1:]])
values[name] = v
self.training_tracker.write_line(values)
self.training_tracker.remove_child()
self.prediction_bar = None
# Evaluation takes a long time so we should force the next update.
self._force_next_update = True
def on_train_end(self, args, state, control, **kwargs):
self.training_tracker.update(
state.global_step, comment=f"Epoch {int(state.epoch)}/{state.num_train_epochs}", force_update=True
)
self.training_tracker = None
|
274056675/springboot-openai-chatgpt | 7,920 | mng_web/src/views/system/client.vue | <template>
<basic-container>
<avue-crud :option="option"
:table-loading="loading"
:data="data"
:page="page"
@row-del="rowDel"
v-model="form"
:permission="permissionList"
@row-update="rowUpdate"
@row-save="rowSave"
:before-open="beforeOpen"
@search-change="searchChange"
@search-reset="searchReset"
@selection-change="selectionChange"
@current-change="currentChange"
@size-change="sizeChange"
@on-load="onLoad">
<template slot="menuLeft">
<el-button type="danger"
size="small"
icon="el-icon-delete"
plain
v-if="permission.client_delete"
@click="handleDelete">删 除
</el-button>
</template>
</avue-crud>
</basic-container>
</template>
<script>
import {getList, getDetail, add, update, remove} from "@/api/system/client";
import {mapGetters} from "vuex";
export default {
data() {
return {
form: {},
query: {},
loading: true,
page: {
pageSize: 10,
currentPage: 1,
total: 0
},
selectionList: [],
option: {
height: 'auto',
calcHeight: 210,
searchShow: true,
searchMenuSpan: 6,
tip: false,
border: true,
index: true,
viewBtn: true,
selection: true,
column: [
{
label: "应用id",
prop: "clientId",
search: true,
rules: [{
required: true,
message: "请输入客户端id",
trigger: "blur"
}]
},
{
label: "应用密钥",
prop: "clientSecret",
search: true,
rules: [{
required: true,
message: "请输入客户端密钥",
trigger: "blur"
}]
},
{
label: "授权类型",
prop: "authorizedGrantTypes",
valueDefault: "refresh_token,password,authorization_code",
rules: [{
required: true,
message: "请输入授权类型",
trigger: "blur"
}]
},
{
label: "授权范围",
prop: "scope",
valueDefault: "all",
rules: [{
required: true,
message: "请输入授权范围",
trigger: "blur"
}]
},
{
label: "令牌秒数",
prop: "accessTokenValidity",
type: "number",
valueDefault: 3600,
rules: [{
required: true,
message: "请输入令牌过期秒数",
trigger: "blur"
}]
},
{
label: "刷新秒数",
prop: "refreshTokenValidity",
type: "number",
valueDefault: 604800,
hide: true,
rules: [{
required: true,
message: "请输入刷新令牌过期秒数",
trigger: "blur"
}]
},
{
label: "回调地址",
prop: "webServerRedirectUri",
hide: true,
rules: [{
required: true,
message: "请输入回调地址",
trigger: "blur"
}]
},
{
label: "资源集合",
prop: "resourceIds",
hide: true,
rules: [{
message: "请输入资源集合",
trigger: "blur"
}]
},
{
label: "权限",
prop: "authorities",
hide: true,
rules: [{
message: "请输入权限",
trigger: "blur"
}]
},
{
label: "自动授权",
prop: "autoapprove",
hide: true,
rules: [{
message: "请输入自动授权",
trigger: "blur"
}]
},
{
label: "附加说明",
hide: true,
prop: "additionalInformation",
span: 24,
rules: [{
message: "请输入附加说明",
trigger: "blur"
}]
},
]
},
data: []
};
},
computed: {
...mapGetters(["permission"]),
permissionList() {
return {
addBtn: this.vaildData(this.permission.client_add),
viewBtn: this.vaildData(this.permission.client_view),
delBtn: this.vaildData(this.permission.client_delete),
editBtn: this.vaildData(this.permission.client_edit)
};
},
ids() {
let ids = [];
this.selectionList.forEach(ele => {
ids.push(ele.id);
});
return ids.join(",");
}
},
methods: {
rowSave(row, done, loading) {
add(row).then(() => {
done();
this.onLoad(this.page);
this.$message({
type: "success",
message: "操作成功!"
});
}, error => {
window.console.log(error);
loading();
});
},
rowUpdate(row, index, done, loading) {
update(row).then(() => {
done();
this.onLoad(this.page);
this.$message({
type: "success",
message: "操作成功!"
});
}, error => {
window.console.log(error);
loading();
});
},
rowDel(row) {
this.$confirm("确定将选择数据删除?", {
confirmButtonText: "确定",
cancelButtonText: "取消",
type: "warning"
})
.then(() => {
return remove(row.id);
})
.then(() => {
this.onLoad(this.page);
this.$message({
type: "success",
message: "操作成功!"
});
});
},
searchReset() {
this.query = {};
this.onLoad(this.page);
},
searchChange(params, done) {
this.query = params;
this.page.currentPage = 1;
this.onLoad(this.page, params);
done();
},
selectionChange(list) {
this.selectionList = list;
},
handleDelete() {
if (this.selectionList.length === 0) {
this.$message.warning("请选择至少一条数据");
return;
}
this.$confirm("确定将选择数据删除?", {
confirmButtonText: "确定",
cancelButtonText: "取消",
type: "warning"
})
.then(() => {
return remove(this.ids);
})
.then(() => {
this.onLoad(this.page);
this.$message({
type: "success",
message: "操作成功!"
});
this.$refs.crud.toggleSelection();
});
},
beforeOpen(done, type) {
if (["edit", "view"].includes(type)) {
getDetail(this.form.id).then(res => {
this.form = res.data.data;
});
}
done();
},
currentChange(currentPage){
this.page.currentPage = currentPage;
},
sizeChange(pageSize){
this.page.pageSize = pageSize;
},
onLoad(page, params = {}) {
this.loading = true;
getList(page.currentPage, page.pageSize, Object.assign(params, this.query)).then(res => {
const data = res.data.data;
this.page.total = data.total;
this.data = data.records;
this.loading = false;
});
}
}
};
</script>
<style>
</style>
|
27182812/ChatGLM-LLaMA-chinese-insturct | 41,501 | src/transformers/utils/import_utils.py | # Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Import utilities: Utilities related to imports and our lazy inits.
"""
import importlib.util
import json
import os
import shutil
import sys
import warnings
from collections import OrderedDict
from functools import lru_cache
from itertools import chain
from types import ModuleType
from typing import Any
from packaging import version
from . import logging
from .versions import importlib_metadata
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
ENV_VARS_TRUE_VALUES = {"1", "ON", "YES", "TRUE"}
ENV_VARS_TRUE_AND_AUTO_VALUES = ENV_VARS_TRUE_VALUES.union({"AUTO"})
USE_TF = os.environ.get("USE_TF", "AUTO").upper()
USE_TORCH = os.environ.get("USE_TORCH", "AUTO").upper()
USE_JAX = os.environ.get("USE_FLAX", "AUTO").upper()
FORCE_TF_AVAILABLE = os.environ.get("FORCE_TF_AVAILABLE", "AUTO").upper()
_torch_version = "N/A"
if USE_TORCH in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TF not in ENV_VARS_TRUE_VALUES:
_torch_available = importlib.util.find_spec("torch") is not None
if _torch_available:
try:
_torch_version = importlib_metadata.version("torch")
logger.info(f"PyTorch version {_torch_version} available.")
except importlib_metadata.PackageNotFoundError:
_torch_available = False
else:
logger.info("Disabling PyTorch because USE_TF is set")
_torch_available = False
_tf_version = "N/A"
if FORCE_TF_AVAILABLE in ENV_VARS_TRUE_VALUES:
_tf_available = True
else:
if USE_TF in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TORCH not in ENV_VARS_TRUE_VALUES:
_tf_available = importlib.util.find_spec("tensorflow") is not None
if _tf_available:
candidates = (
"tensorflow",
"tensorflow-cpu",
"tensorflow-gpu",
"tf-nightly",
"tf-nightly-cpu",
"tf-nightly-gpu",
"intel-tensorflow",
"intel-tensorflow-avx512",
"tensorflow-rocm",
"tensorflow-macos",
"tensorflow-aarch64",
)
_tf_version = None
# For the metadata, we have to look for both tensorflow and tensorflow-cpu
for pkg in candidates:
try:
_tf_version = importlib_metadata.version(pkg)
break
except importlib_metadata.PackageNotFoundError:
pass
_tf_available = _tf_version is not None
if _tf_available:
if version.parse(_tf_version) < version.parse("2"):
logger.info(
f"TensorFlow found but with version {_tf_version}. Transformers requires version 2 minimum."
)
_tf_available = False
else:
logger.info(f"TensorFlow version {_tf_version} available.")
else:
logger.info("Disabling Tensorflow because USE_TORCH is set")
_tf_available = False
if USE_JAX in ENV_VARS_TRUE_AND_AUTO_VALUES:
_flax_available = importlib.util.find_spec("jax") is not None and importlib.util.find_spec("flax") is not None
if _flax_available:
try:
_jax_version = importlib_metadata.version("jax")
_flax_version = importlib_metadata.version("flax")
logger.info(f"JAX version {_jax_version}, Flax version {_flax_version} available.")
except importlib_metadata.PackageNotFoundError:
_flax_available = False
else:
_flax_available = False
_datasets_available = importlib.util.find_spec("datasets") is not None
try:
# Check we're not importing a "datasets" directory somewhere but the actual library by trying to grab the version
# AND checking it has an author field in the metadata that is HuggingFace.
_ = importlib_metadata.version("datasets")
_datasets_metadata = importlib_metadata.metadata("datasets")
if _datasets_metadata.get("author", "") != "HuggingFace Inc.":
_datasets_available = False
except importlib_metadata.PackageNotFoundError:
_datasets_available = False
_detectron2_available = importlib.util.find_spec("detectron2") is not None
try:
_detectron2_version = importlib_metadata.version("detectron2")
logger.debug(f"Successfully imported detectron2 version {_detectron2_version}")
except importlib_metadata.PackageNotFoundError:
_detectron2_available = False
_faiss_available = importlib.util.find_spec("faiss") is not None
try:
_faiss_version = importlib_metadata.version("faiss")
logger.debug(f"Successfully imported faiss version {_faiss_version}")
except importlib_metadata.PackageNotFoundError:
try:
_faiss_version = importlib_metadata.version("faiss-cpu")
logger.debug(f"Successfully imported faiss version {_faiss_version}")
except importlib_metadata.PackageNotFoundError:
_faiss_available = False
_ftfy_available = importlib.util.find_spec("ftfy") is not None
try:
_ftfy_version = importlib_metadata.version("ftfy")
logger.debug(f"Successfully imported ftfy version {_ftfy_version}")
except importlib_metadata.PackageNotFoundError:
_ftfy_available = False
coloredlogs = importlib.util.find_spec("coloredlogs") is not None
try:
_coloredlogs_available = importlib_metadata.version("coloredlogs")
logger.debug(f"Successfully imported sympy version {_coloredlogs_available}")
except importlib_metadata.PackageNotFoundError:
_coloredlogs_available = False
sympy_available = importlib.util.find_spec("sympy") is not None
try:
_sympy_available = importlib_metadata.version("sympy")
logger.debug(f"Successfully imported sympy version {_sympy_available}")
except importlib_metadata.PackageNotFoundError:
_sympy_available = False
_tf2onnx_available = importlib.util.find_spec("tf2onnx") is not None
try:
_tf2onnx_version = importlib_metadata.version("tf2onnx")
logger.debug(f"Successfully imported tf2onnx version {_tf2onnx_version}")
except importlib_metadata.PackageNotFoundError:
_tf2onnx_available = False
_onnx_available = importlib.util.find_spec("onnxruntime") is not None
try:
_onxx_version = importlib_metadata.version("onnx")
logger.debug(f"Successfully imported onnx version {_onxx_version}")
except importlib_metadata.PackageNotFoundError:
_onnx_available = False
_pytorch_quantization_available = importlib.util.find_spec("pytorch_quantization") is not None
try:
_pytorch_quantization_version = importlib_metadata.version("pytorch_quantization")
logger.debug(f"Successfully imported pytorch-quantization version {_pytorch_quantization_version}")
except importlib_metadata.PackageNotFoundError:
_pytorch_quantization_available = False
_soundfile_available = importlib.util.find_spec("soundfile") is not None
try:
_soundfile_version = importlib_metadata.version("soundfile")
logger.debug(f"Successfully imported soundfile version {_soundfile_version}")
except importlib_metadata.PackageNotFoundError:
_soundfile_available = False
_tensorflow_probability_available = importlib.util.find_spec("tensorflow_probability") is not None
try:
_tensorflow_probability_version = importlib_metadata.version("tensorflow_probability")
logger.debug(f"Successfully imported tensorflow-probability version {_tensorflow_probability_version}")
except importlib_metadata.PackageNotFoundError:
_tensorflow_probability_available = False
_timm_available = importlib.util.find_spec("timm") is not None
try:
_timm_version = importlib_metadata.version("timm")
logger.debug(f"Successfully imported timm version {_timm_version}")
except importlib_metadata.PackageNotFoundError:
_timm_available = False
_natten_available = importlib.util.find_spec("natten") is not None
try:
_natten_version = importlib_metadata.version("natten")
logger.debug(f"Successfully imported natten version {_natten_version}")
except importlib_metadata.PackageNotFoundError:
_natten_available = False
_torchaudio_available = importlib.util.find_spec("torchaudio") is not None
try:
_torchaudio_version = importlib_metadata.version("torchaudio")
logger.debug(f"Successfully imported torchaudio version {_torchaudio_version}")
except importlib_metadata.PackageNotFoundError:
_torchaudio_available = False
_phonemizer_available = importlib.util.find_spec("phonemizer") is not None
try:
_phonemizer_version = importlib_metadata.version("phonemizer")
logger.debug(f"Successfully imported phonemizer version {_phonemizer_version}")
except importlib_metadata.PackageNotFoundError:
_phonemizer_available = False
_pyctcdecode_available = importlib.util.find_spec("pyctcdecode") is not None
try:
_pyctcdecode_version = importlib_metadata.version("pyctcdecode")
logger.debug(f"Successfully imported pyctcdecode version {_pyctcdecode_version}")
except importlib_metadata.PackageNotFoundError:
_pyctcdecode_available = False
_librosa_available = importlib.util.find_spec("librosa") is not None
try:
_librosa_version = importlib_metadata.version("librosa")
logger.debug(f"Successfully imported librosa version {_librosa_version}")
except importlib_metadata.PackageNotFoundError:
_librosa_available = False
ccl_version = "N/A"
_is_ccl_available = (
importlib.util.find_spec("torch_ccl") is not None
or importlib.util.find_spec("oneccl_bindings_for_pytorch") is not None
)
try:
ccl_version = importlib_metadata.version("oneccl_bind_pt")
logger.debug(f"Successfully imported oneccl_bind_pt version {ccl_version}")
except importlib_metadata.PackageNotFoundError:
_is_ccl_available = False
_decord_availale = importlib.util.find_spec("decord") is not None
try:
_decord_version = importlib_metadata.version("decord")
logger.debug(f"Successfully imported decord version {_decord_version}")
except importlib_metadata.PackageNotFoundError:
_decord_availale = False
# This is the version of torch required to run torch.fx features and torch.onnx with dictionary inputs.
TORCH_FX_REQUIRED_VERSION = version.parse("1.10")
TORCH_ONNX_DICT_INPUTS_MINIMUM_VERSION = version.parse("1.8")
def is_kenlm_available():
return importlib.util.find_spec("kenlm") is not None
def is_torch_available():
return _torch_available
def is_torchvision_available():
return importlib.util.find_spec("torchvision") is not None
def is_pyctcdecode_available():
return _pyctcdecode_available
def is_librosa_available():
return _librosa_available
def is_torch_cuda_available():
if is_torch_available():
import torch
return torch.cuda.is_available()
else:
return False
def is_torch_bf16_gpu_available():
if not is_torch_available():
return False
import torch
# since currently no utility function is available we build our own.
# some bits come from https://github.com/pytorch/pytorch/blob/2289a12f21c54da93bf5d696e3f9aea83dd9c10d/torch/testing/_internal/common_cuda.py#L51
# with additional check for torch version
# to succeed:
# 1. torch >= 1.10 (1.9 should be enough for AMP API has changed in 1.10, so using 1.10 as minimal)
# 2. the hardware needs to support bf16 (GPU arch >= Ampere, or CPU)
# 3. if using gpu, CUDA >= 11
# 4. torch.autocast exists
# XXX: one problem here is that it may give invalid results on mixed gpus setup, so it's
# really only correct for the 0th gpu (or currently set default device if different from 0)
if version.parse(version.parse(torch.__version__).base_version) < version.parse("1.10"):
return False
if torch.cuda.is_available() and torch.version.cuda is not None:
if torch.cuda.get_device_properties(torch.cuda.current_device()).major < 8:
return False
if int(torch.version.cuda.split(".")[0]) < 11:
return False
if not hasattr(torch.cuda.amp, "autocast"):
return False
else:
return False
return True
def is_torch_bf16_cpu_available():
if not is_torch_available():
return False
import torch
if version.parse(version.parse(torch.__version__).base_version) < version.parse("1.10"):
return False
try:
# multiple levels of AttributeError depending on the pytorch version so do them all in one check
_ = torch.cpu.amp.autocast
except AttributeError:
return False
return True
def is_torch_bf16_available():
# the original bf16 check was for gpu only, but later a cpu/bf16 combo has emerged so this util
# has become ambiguous and therefore deprecated
warnings.warn(
"The util is_torch_bf16_available is deprecated, please use is_torch_bf16_gpu_available "
"or is_torch_bf16_cpu_available instead according to whether it's used with cpu or gpu",
FutureWarning,
)
return is_torch_bf16_gpu_available()
def is_torch_tf32_available():
if not is_torch_available():
return False
import torch
if not torch.cuda.is_available() or torch.version.cuda is None:
return False
if torch.cuda.get_device_properties(torch.cuda.current_device()).major < 8:
return False
if int(torch.version.cuda.split(".")[0]) < 11:
return False
if version.parse(version.parse(torch.__version__).base_version) < version.parse("1.7"):
return False
return True
torch_version = None
_torch_fx_available = _torch_onnx_dict_inputs_support_available = False
if _torch_available:
torch_version = version.parse(importlib_metadata.version("torch"))
_torch_fx_available = (torch_version.major, torch_version.minor) >= (
TORCH_FX_REQUIRED_VERSION.major,
TORCH_FX_REQUIRED_VERSION.minor,
)
_torch_onnx_dict_inputs_support_available = torch_version >= TORCH_ONNX_DICT_INPUTS_MINIMUM_VERSION
def is_torch_fx_available():
return _torch_fx_available
def is_bs4_available():
return importlib.util.find_spec("bs4") is not None
def is_torch_onnx_dict_inputs_support_available():
return _torch_onnx_dict_inputs_support_available
def is_tf_available():
return _tf_available
def is_coloredlogs_available():
return _coloredlogs_available
def is_tf2onnx_available():
return _tf2onnx_available
def is_onnx_available():
return _onnx_available
def is_flax_available():
return _flax_available
def is_ftfy_available():
return _ftfy_available
@lru_cache()
def is_torch_tpu_available(check_device=True):
"Checks if `torch_xla` is installed and potentially if a TPU is in the environment"
if not _torch_available:
return False
if importlib.util.find_spec("torch_xla") is not None:
if check_device:
# We need to check if `xla_device` can be found, will raise a RuntimeError if not
try:
import torch_xla.core.xla_model as xm
_ = xm.xla_device()
return True
except RuntimeError:
return False
return True
return False
@lru_cache()
def is_torch_neuroncore_available(check_device=True):
if importlib.util.find_spec("torch_neuronx") is not None:
return is_torch_tpu_available(check_device)
return False
def is_torchdynamo_available():
if not is_torch_available():
return False
try:
import torch._dynamo as dynamo # noqa: F401
return True
except Exception:
return False
def is_torch_compile_available():
if not is_torch_available():
return False
import torch
return hasattr(torch, "compile")
def is_torch_tensorrt_fx_available():
if importlib.util.find_spec("torch_tensorrt") is None:
return False
return importlib.util.find_spec("torch_tensorrt.fx") is not None
def is_datasets_available():
return _datasets_available
def is_detectron2_available():
return _detectron2_available
def is_rjieba_available():
return importlib.util.find_spec("rjieba") is not None
def is_psutil_available():
return importlib.util.find_spec("psutil") is not None
def is_py3nvml_available():
return importlib.util.find_spec("py3nvml") is not None
def is_sacremoses_available():
return importlib.util.find_spec("sacremoses") is not None
def is_apex_available():
return importlib.util.find_spec("apex") is not None
def is_ninja_available():
return importlib.util.find_spec("ninja") is not None
def is_ipex_available():
def get_major_and_minor_from_version(full_version):
return str(version.parse(full_version).major) + "." + str(version.parse(full_version).minor)
if not is_torch_available() or importlib.util.find_spec("intel_extension_for_pytorch") is None:
return False
_ipex_version = "N/A"
try:
_ipex_version = importlib_metadata.version("intel_extension_for_pytorch")
except importlib_metadata.PackageNotFoundError:
return False
torch_major_and_minor = get_major_and_minor_from_version(_torch_version)
ipex_major_and_minor = get_major_and_minor_from_version(_ipex_version)
if torch_major_and_minor != ipex_major_and_minor:
logger.warning(
f"Intel Extension for PyTorch {ipex_major_and_minor} needs to work with PyTorch {ipex_major_and_minor}.*,"
f" but PyTorch {_torch_version} is found. Please switch to the matching version and run again."
)
return False
return True
def is_bitsandbytes_available():
return importlib.util.find_spec("bitsandbytes") is not None
def is_torchdistx_available():
return importlib.util.find_spec("torchdistx") is not None
def is_faiss_available():
return _faiss_available
def is_scipy_available():
return importlib.util.find_spec("scipy") is not None
def is_sklearn_available():
if importlib.util.find_spec("sklearn") is None:
return False
return is_scipy_available() and importlib.util.find_spec("sklearn.metrics")
def is_sentencepiece_available():
return importlib.util.find_spec("sentencepiece") is not None
def is_protobuf_available():
if importlib.util.find_spec("google") is None:
return False
return importlib.util.find_spec("google.protobuf") is not None
def is_accelerate_available():
return importlib.util.find_spec("accelerate") is not None
def is_optimum_available():
return importlib.util.find_spec("optimum") is not None
def is_safetensors_available():
return importlib.util.find_spec("safetensors") is not None
def is_tokenizers_available():
return importlib.util.find_spec("tokenizers") is not None
def is_vision_available():
return importlib.util.find_spec("PIL") is not None
def is_pytesseract_available():
return importlib.util.find_spec("pytesseract") is not None
def is_spacy_available():
return importlib.util.find_spec("spacy") is not None
def is_tensorflow_text_available():
return is_tf_available() and importlib.util.find_spec("tensorflow_text") is not None
def is_keras_nlp_available():
return is_tensorflow_text_available() and importlib.util.find_spec("keras_nlp") is not None
def is_in_notebook():
try:
# Test adapted from tqdm.autonotebook: https://github.com/tqdm/tqdm/blob/master/tqdm/autonotebook.py
get_ipython = sys.modules["IPython"].get_ipython
if "IPKernelApp" not in get_ipython().config:
raise ImportError("console")
if "VSCODE_PID" in os.environ:
raise ImportError("vscode")
if "DATABRICKS_RUNTIME_VERSION" in os.environ and os.environ["DATABRICKS_RUNTIME_VERSION"] < "11.0":
# Databricks Runtime 11.0 and above uses IPython kernel by default so it should be compatible with Jupyter notebook
# https://docs.microsoft.com/en-us/azure/databricks/notebooks/ipython-kernel
raise ImportError("databricks")
return importlib.util.find_spec("IPython") is not None
except (AttributeError, ImportError, KeyError):
return False
def is_pytorch_quantization_available():
return _pytorch_quantization_available
def is_tensorflow_probability_available():
return _tensorflow_probability_available
def is_pandas_available():
return importlib.util.find_spec("pandas") is not None
def is_sagemaker_dp_enabled():
# Get the sagemaker specific env variable.
sagemaker_params = os.getenv("SM_FRAMEWORK_PARAMS", "{}")
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
sagemaker_params = json.loads(sagemaker_params)
if not sagemaker_params.get("sagemaker_distributed_dataparallel_enabled", False):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("smdistributed") is not None
def is_sagemaker_mp_enabled():
# Get the sagemaker specific mp parameters from smp_options variable.
smp_options = os.getenv("SM_HP_MP_PARAMETERS", "{}")
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
smp_options = json.loads(smp_options)
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
mpi_options = os.getenv("SM_FRAMEWORK_PARAMS", "{}")
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
mpi_options = json.loads(mpi_options)
if not mpi_options.get("sagemaker_mpi_enabled", False):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("smdistributed") is not None
def is_training_run_on_sagemaker():
return "SAGEMAKER_JOB_NAME" in os.environ
def is_soundfile_availble():
return _soundfile_available
def is_timm_available():
return _timm_available
def is_natten_available():
return _natten_available
def is_torchaudio_available():
return _torchaudio_available
def is_speech_available():
# For now this depends on torchaudio but the exact dependency might evolve in the future.
return _torchaudio_available
def is_phonemizer_available():
return _phonemizer_available
def torch_only_method(fn):
def wrapper(*args, **kwargs):
if not _torch_available:
raise ImportError(
"You need to install pytorch to use this method or class, "
"or activate it with environment variables USE_TORCH=1 and USE_TF=0."
)
else:
return fn(*args, **kwargs)
return wrapper
def is_ccl_available():
return _is_ccl_available
def is_decord_available():
return _decord_availale
def is_sudachi_available():
return importlib.util.find_spec("sudachipy") is not None
def is_jumanpp_available():
return (importlib.util.find_spec("rhoknp") is not None) and (shutil.which("jumanpp") is not None)
def is_cython_available():
return importlib.util.find_spec("pyximport") is not None
# docstyle-ignore
DATASETS_IMPORT_ERROR = """
{0} requires the 🤗 Datasets library but it was not found in your environment. You can install it with:
```
pip install datasets
```
In a notebook or a colab, you can install it by executing a cell with
```
!pip install datasets
```
then restarting your kernel.
Note that if you have a local folder named `datasets` or a local python file named `datasets.py` in your current
working directory, python may try to import this instead of the 🤗 Datasets library. You should rename this folder or
that python file if that's the case. Please note that you may need to restart your runtime after installation.
"""
# docstyle-ignore
TOKENIZERS_IMPORT_ERROR = """
{0} requires the 🤗 Tokenizers library but it was not found in your environment. You can install it with:
```
pip install tokenizers
```
In a notebook or a colab, you can install it by executing a cell with
```
!pip install tokenizers
```
Please note that you may need to restart your runtime after installation.
"""
# docstyle-ignore
SENTENCEPIECE_IMPORT_ERROR = """
{0} requires the SentencePiece library but it was not found in your environment. Checkout the instructions on the
installation page of its repo: https://github.com/google/sentencepiece#installation and follow the ones
that match your environment. Please note that you may need to restart your runtime after installation.
"""
# docstyle-ignore
PROTOBUF_IMPORT_ERROR = """
{0} requires the protobuf library but it was not found in your environment. Checkout the instructions on the
installation page of its repo: https://github.com/protocolbuffers/protobuf/tree/master/python#installation and follow the ones
that match your environment. Please note that you may need to restart your runtime after installation.
"""
# docstyle-ignore
FAISS_IMPORT_ERROR = """
{0} requires the faiss library but it was not found in your environment. Checkout the instructions on the
installation page of its repo: https://github.com/facebookresearch/faiss/blob/master/INSTALL.md and follow the ones
that match your environment. Please note that you may need to restart your runtime after installation.
"""
# docstyle-ignore
PYTORCH_IMPORT_ERROR = """
{0} requires the PyTorch library but it was not found in your environment. Checkout the instructions on the
installation page: https://pytorch.org/get-started/locally/ and follow the ones that match your environment.
Please note that you may need to restart your runtime after installation.
"""
# docstyle-ignore
TORCHVISION_IMPORT_ERROR = """
{0} requires the Torchvision library but it was not found in your environment. Checkout the instructions on the
installation page: https://pytorch.org/get-started/locally/ and follow the ones that match your environment.
Please note that you may need to restart your runtime after installation.
"""
# docstyle-ignore
PYTORCH_IMPORT_ERROR_WITH_TF = """
{0} requires the PyTorch library but it was not found in your environment.
However, we were able to find a TensorFlow installation. TensorFlow classes begin
with "TF", but are otherwise identically named to our PyTorch classes. This
means that the TF equivalent of the class you tried to import would be "TF{0}".
If you want to use TensorFlow, please use TF classes instead!
If you really do want to use PyTorch please go to
https://pytorch.org/get-started/locally/ and follow the instructions that
match your environment.
"""
# docstyle-ignore
TF_IMPORT_ERROR_WITH_PYTORCH = """
{0} requires the TensorFlow library but it was not found in your environment.
However, we were able to find a PyTorch installation. PyTorch classes do not begin
with "TF", but are otherwise identically named to our TF classes.
If you want to use PyTorch, please use those classes instead!
If you really do want to use TensorFlow, please follow the instructions on the
installation page https://www.tensorflow.org/install that match your environment.
"""
# docstyle-ignore
BS4_IMPORT_ERROR = """
{0} requires the Beautiful Soup library but it was not found in your environment. You can install it with pip:
`pip install beautifulsoup4`. Please note that you may need to restart your runtime after installation.
"""
# docstyle-ignore
SKLEARN_IMPORT_ERROR = """
{0} requires the scikit-learn library but it was not found in your environment. You can install it with:
```
pip install -U scikit-learn
```
In a notebook or a colab, you can install it by executing a cell with
```
!pip install -U scikit-learn
```
Please note that you may need to restart your runtime after installation.
"""
# docstyle-ignore
TENSORFLOW_IMPORT_ERROR = """
{0} requires the TensorFlow library but it was not found in your environment. Checkout the instructions on the
installation page: https://www.tensorflow.org/install and follow the ones that match your environment.
Please note that you may need to restart your runtime after installation.
"""
# docstyle-ignore
DETECTRON2_IMPORT_ERROR = """
{0} requires the detectron2 library but it was not found in your environment. Checkout the instructions on the
installation page: https://github.com/facebookresearch/detectron2/blob/master/INSTALL.md and follow the ones
that match your environment. Please note that you may need to restart your runtime after installation.
"""
# docstyle-ignore
FLAX_IMPORT_ERROR = """
{0} requires the FLAX library but it was not found in your environment. Checkout the instructions on the
installation page: https://github.com/google/flax and follow the ones that match your environment.
Please note that you may need to restart your runtime after installation.
"""
# docstyle-ignore
FTFY_IMPORT_ERROR = """
{0} requires the ftfy library but it was not found in your environment. Checkout the instructions on the
installation section: https://github.com/rspeer/python-ftfy/tree/master#installing and follow the ones
that match your environment. Please note that you may need to restart your runtime after installation.
"""
# docstyle-ignore
PYTORCH_QUANTIZATION_IMPORT_ERROR = """
{0} requires the pytorch-quantization library but it was not found in your environment. You can install it with pip:
`pip install pytorch-quantization --extra-index-url https://pypi.ngc.nvidia.com`
Please note that you may need to restart your runtime after installation.
"""
# docstyle-ignore
TENSORFLOW_PROBABILITY_IMPORT_ERROR = """
{0} requires the tensorflow_probability library but it was not found in your environment. You can install it with pip as
explained here: https://github.com/tensorflow/probability. Please note that you may need to restart your runtime after installation.
"""
# docstyle-ignore
TENSORFLOW_TEXT_IMPORT_ERROR = """
{0} requires the tensorflow_text library but it was not found in your environment. You can install it with pip as
explained here: https://www.tensorflow.org/text/guide/tf_text_intro.
Please note that you may need to restart your runtime after installation.
"""
# docstyle-ignore
PANDAS_IMPORT_ERROR = """
{0} requires the pandas library but it was not found in your environment. You can install it with pip as
explained here: https://pandas.pydata.org/pandas-docs/stable/getting_started/install.html.
Please note that you may need to restart your runtime after installation.
"""
# docstyle-ignore
PHONEMIZER_IMPORT_ERROR = """
{0} requires the phonemizer library but it was not found in your environment. You can install it with pip:
`pip install phonemizer`. Please note that you may need to restart your runtime after installation.
"""
# docstyle-ignore
SACREMOSES_IMPORT_ERROR = """
{0} requires the sacremoses library but it was not found in your environment. You can install it with pip:
`pip install sacremoses`. Please note that you may need to restart your runtime after installation.
"""
# docstyle-ignore
SCIPY_IMPORT_ERROR = """
{0} requires the scipy library but it was not found in your environment. You can install it with pip:
`pip install scipy`. Please note that you may need to restart your runtime after installation.
"""
# docstyle-ignore
SPEECH_IMPORT_ERROR = """
{0} requires the torchaudio library but it was not found in your environment. You can install it with pip:
`pip install torchaudio`. Please note that you may need to restart your runtime after installation.
"""
# docstyle-ignore
TIMM_IMPORT_ERROR = """
{0} requires the timm library but it was not found in your environment. You can install it with pip:
`pip install timm`. Please note that you may need to restart your runtime after installation.
"""
# docstyle-ignore
NATTEN_IMPORT_ERROR = """
{0} requires the natten library but it was not found in your environment. You can install it by referring to:
shi-labs.com/natten . You can also install it with pip (may take longer to build):
`pip install natten`. Please note that you may need to restart your runtime after installation.
"""
# docstyle-ignore
VISION_IMPORT_ERROR = """
{0} requires the PIL library but it was not found in your environment. You can install it with pip:
`pip install pillow`. Please note that you may need to restart your runtime after installation.
"""
# docstyle-ignore
PYTESSERACT_IMPORT_ERROR = """
{0} requires the PyTesseract library but it was not found in your environment. You can install it with pip:
`pip install pytesseract`. Please note that you may need to restart your runtime after installation.
"""
# docstyle-ignore
PYCTCDECODE_IMPORT_ERROR = """
{0} requires the pyctcdecode library but it was not found in your environment. You can install it with pip:
`pip install pyctcdecode`. Please note that you may need to restart your runtime after installation.
"""
# docstyle-ignore
ACCELERATE_IMPORT_ERROR = """
{0} requires the accelerate library but it was not found in your environment. You can install it with pip:
`pip install accelerate`. Please note that you may need to restart your runtime after installation.
"""
# docstyle-ignore
CCL_IMPORT_ERROR = """
{0} requires the torch ccl library but it was not found in your environment. You can install it with pip:
`pip install oneccl_bind_pt -f https://developer.intel.com/ipex-whl-stable`
Please note that you may need to restart your runtime after installation.
"""
DECORD_IMPORT_ERROR = """
{0} requires the decord library but it was not found in your environment. You can install it with pip: `pip install
decord`. Please note that you may need to restart your runtime after installation.
"""
CYTHON_IMPORT_ERROR = """
{0} requires the Cython library but it was not found in your environment. You can install it with pip: `pip install
Cython`. Please note that you may need to restart your runtime after installation.
"""
BACKENDS_MAPPING = OrderedDict(
[
("bs4", (is_bs4_available, BS4_IMPORT_ERROR)),
("datasets", (is_datasets_available, DATASETS_IMPORT_ERROR)),
("detectron2", (is_detectron2_available, DETECTRON2_IMPORT_ERROR)),
("faiss", (is_faiss_available, FAISS_IMPORT_ERROR)),
("flax", (is_flax_available, FLAX_IMPORT_ERROR)),
("ftfy", (is_ftfy_available, FTFY_IMPORT_ERROR)),
("pandas", (is_pandas_available, PANDAS_IMPORT_ERROR)),
("phonemizer", (is_phonemizer_available, PHONEMIZER_IMPORT_ERROR)),
("protobuf", (is_protobuf_available, PROTOBUF_IMPORT_ERROR)),
("pyctcdecode", (is_pyctcdecode_available, PYCTCDECODE_IMPORT_ERROR)),
("pytesseract", (is_pytesseract_available, PYTESSERACT_IMPORT_ERROR)),
("sacremoses", (is_sacremoses_available, SACREMOSES_IMPORT_ERROR)),
("pytorch_quantization", (is_pytorch_quantization_available, PYTORCH_QUANTIZATION_IMPORT_ERROR)),
("sentencepiece", (is_sentencepiece_available, SENTENCEPIECE_IMPORT_ERROR)),
("sklearn", (is_sklearn_available, SKLEARN_IMPORT_ERROR)),
("speech", (is_speech_available, SPEECH_IMPORT_ERROR)),
("tensorflow_probability", (is_tensorflow_probability_available, TENSORFLOW_PROBABILITY_IMPORT_ERROR)),
("tf", (is_tf_available, TENSORFLOW_IMPORT_ERROR)),
("tensorflow_text", (is_tensorflow_text_available, TENSORFLOW_TEXT_IMPORT_ERROR)),
("timm", (is_timm_available, TIMM_IMPORT_ERROR)),
("natten", (is_natten_available, NATTEN_IMPORT_ERROR)),
("tokenizers", (is_tokenizers_available, TOKENIZERS_IMPORT_ERROR)),
("torch", (is_torch_available, PYTORCH_IMPORT_ERROR)),
("torchvision", (is_torchvision_available, TORCHVISION_IMPORT_ERROR)),
("vision", (is_vision_available, VISION_IMPORT_ERROR)),
("scipy", (is_scipy_available, SCIPY_IMPORT_ERROR)),
("accelerate", (is_accelerate_available, ACCELERATE_IMPORT_ERROR)),
("oneccl_bind_pt", (is_ccl_available, CCL_IMPORT_ERROR)),
("decord", (is_decord_available, DECORD_IMPORT_ERROR)),
("cython", (is_cython_available, CYTHON_IMPORT_ERROR)),
]
)
def requires_backends(obj, backends):
if not isinstance(backends, (list, tuple)):
backends = [backends]
name = obj.__name__ if hasattr(obj, "__name__") else obj.__class__.__name__
# Raise an error for users who might not realize that classes without "TF" are torch-only
if "torch" in backends and "tf" not in backends and not is_torch_available() and is_tf_available():
raise ImportError(PYTORCH_IMPORT_ERROR_WITH_TF.format(name))
# Raise the inverse error for PyTorch users trying to load TF classes
if "tf" in backends and "torch" not in backends and is_torch_available() and not is_tf_available():
raise ImportError(TF_IMPORT_ERROR_WITH_PYTORCH.format(name))
checks = (BACKENDS_MAPPING[backend] for backend in backends)
failed = [msg.format(name) for available, msg in checks if not available()]
if failed:
raise ImportError("".join(failed))
class DummyObject(type):
"""
Metaclass for the dummy objects. Any class inheriting from it will return the ImportError generated by
`requires_backend` each time a user tries to access any method of that class.
"""
def __getattribute__(cls, key):
if key.startswith("_") and key != "_from_config":
return super().__getattribute__(key)
requires_backends(cls, cls._backends)
def is_torch_fx_proxy(x):
if is_torch_fx_available():
import torch.fx
return isinstance(x, torch.fx.Proxy)
return False
class _LazyModule(ModuleType):
"""
Module class that surfaces all objects but only performs associated imports when the objects are requested.
"""
# Very heavily inspired by optuna.integration._IntegrationModule
# https://github.com/optuna/optuna/blob/master/optuna/integration/__init__.py
def __init__(self, name, module_file, import_structure, module_spec=None, extra_objects=None):
super().__init__(name)
self._modules = set(import_structure.keys())
self._class_to_module = {}
for key, values in import_structure.items():
for value in values:
self._class_to_module[value] = key
# Needed for autocompletion in an IDE
self.__all__ = list(import_structure.keys()) + list(chain(*import_structure.values()))
self.__file__ = module_file
self.__spec__ = module_spec
self.__path__ = [os.path.dirname(module_file)]
self._objects = {} if extra_objects is None else extra_objects
self._name = name
self._import_structure = import_structure
# Needed for autocompletion in an IDE
def __dir__(self):
result = super().__dir__()
# The elements of self.__all__ that are submodules may or may not be in the dir already, depending on whether
# they have been accessed or not. So we only add the elements of self.__all__ that are not already in the dir.
for attr in self.__all__:
if attr not in result:
result.append(attr)
return result
def __getattr__(self, name: str) -> Any:
if name in self._objects:
return self._objects[name]
if name in self._modules:
value = self._get_module(name)
elif name in self._class_to_module.keys():
module = self._get_module(self._class_to_module[name])
value = getattr(module, name)
else:
raise AttributeError(f"module {self.__name__} has no attribute {name}")
setattr(self, name, value)
return value
def _get_module(self, module_name: str):
try:
return importlib.import_module("." + module_name, self.__name__)
except Exception as e:
raise RuntimeError(
f"Failed to import {self.__name__}.{module_name} because of the following error (look up to see its"
f" traceback):\n{e}"
) from e
def __reduce__(self):
return (self.__class__, (self._name, self.__file__, self._import_structure))
class OptionalDependencyNotAvailable(BaseException):
"""Internally used error class for signalling an optional dependency was not found."""
def direct_transformers_import(path: str, file="__init__.py") -> ModuleType:
"""Imports transformers directly
Args:
path (`str`): The path to the source file
file (`str`, optional): The file to join with the path. Defaults to "__init__.py".
Returns:
`ModuleType`: The resulting imported module
"""
name = "transformers"
location = os.path.join(path, file)
spec = importlib.util.spec_from_file_location(name, location, submodule_search_locations=[path])
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
module = sys.modules[name]
return module
|
233zzh/TitanDataOperationSystem | 45,770 | 代码/web代码/titanApp/src/main/resources/static/src/assets/extra-libs/flot/examples/axes-time-zones/tz/africa | # <pre>
# This file is in the public domain, so clarified as of
# 2009-05-17 by Arthur David Olson.
# This data is by no means authoritative; if you think you know better,
# go ahead and edit the file (and please send any changes to
# tz@iana.org for general use in the future).
# From Paul Eggert (2006-03-22):
#
# A good source for time zone historical data outside the U.S. is
# Thomas G. Shanks and Rique Pottenger, The International Atlas (6th edition),
# San Diego: ACS Publications, Inc. (2003).
#
# Gwillim Law writes that a good source
# for recent time zone data is the International Air Transport
# Association's Standard Schedules Information Manual (IATA SSIM),
# published semiannually. Law sent in several helpful summaries
# of the IATA's data after 1990.
#
# Except where otherwise noted, Shanks & Pottenger is the source for
# entries through 1990, and IATA SSIM is the source for entries afterwards.
#
# Another source occasionally used is Edward W. Whitman, World Time Differences,
# Whitman Publishing Co, 2 Niagara Av, Ealing, London (undated), which
# I found in the UCLA library.
#
# A reliable and entertaining source about time zones is
# Derek Howse, Greenwich time and longitude, Philip Wilson Publishers (1997).
#
# Previous editions of this database used WAT, CAT, SAT, and EAT
# for +0:00 through +3:00, respectively,
# but Mark R V Murray reports that
# `SAST' is the official abbreviation for +2:00 in the country of South Africa,
# `CAT' is commonly used for +2:00 in countries north of South Africa, and
# `WAT' is probably the best name for +1:00, as the common phrase for
# the area that includes Nigeria is ``West Africa''.
# He has heard of ``Western Sahara Time'' for +0:00 but can find no reference.
#
# To make things confusing, `WAT' seems to have been used for -1:00 long ago;
# I'd guess that this was because people needed _some_ name for -1:00,
# and at the time, far west Africa was the only major land area in -1:00.
# This usage is now obsolete, as the last use of -1:00 on the African
# mainland seems to have been 1976 in Western Sahara.
#
# To summarize, the following abbreviations seem to have some currency:
# -1:00 WAT West Africa Time (no longer used)
# 0:00 GMT Greenwich Mean Time
# 2:00 CAT Central Africa Time
# 2:00 SAST South Africa Standard Time
# and Murray suggests the following abbreviation:
# 1:00 WAT West Africa Time
# I realize that this leads to `WAT' being used for both -1:00 and 1:00
# for times before 1976, but this is the best I can think of
# until we get more information.
#
# I invented the following abbreviations; corrections are welcome!
# 2:00 WAST West Africa Summer Time
# 2:30 BEAT British East Africa Time (no longer used)
# 2:45 BEAUT British East Africa Unified Time (no longer used)
# 3:00 CAST Central Africa Summer Time (no longer used)
# 3:00 SAST South Africa Summer Time (no longer used)
# 3:00 EAT East Africa Time
# 4:00 EAST East Africa Summer Time (no longer used)
# Algeria
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Algeria 1916 only - Jun 14 23:00s 1:00 S
Rule Algeria 1916 1919 - Oct Sun>=1 23:00s 0 -
Rule Algeria 1917 only - Mar 24 23:00s 1:00 S
Rule Algeria 1918 only - Mar 9 23:00s 1:00 S
Rule Algeria 1919 only - Mar 1 23:00s 1:00 S
Rule Algeria 1920 only - Feb 14 23:00s 1:00 S
Rule Algeria 1920 only - Oct 23 23:00s 0 -
Rule Algeria 1921 only - Mar 14 23:00s 1:00 S
Rule Algeria 1921 only - Jun 21 23:00s 0 -
Rule Algeria 1939 only - Sep 11 23:00s 1:00 S
Rule Algeria 1939 only - Nov 19 1:00 0 -
Rule Algeria 1944 1945 - Apr Mon>=1 2:00 1:00 S
Rule Algeria 1944 only - Oct 8 2:00 0 -
Rule Algeria 1945 only - Sep 16 1:00 0 -
Rule Algeria 1971 only - Apr 25 23:00s 1:00 S
Rule Algeria 1971 only - Sep 26 23:00s 0 -
Rule Algeria 1977 only - May 6 0:00 1:00 S
Rule Algeria 1977 only - Oct 21 0:00 0 -
Rule Algeria 1978 only - Mar 24 1:00 1:00 S
Rule Algeria 1978 only - Sep 22 3:00 0 -
Rule Algeria 1980 only - Apr 25 0:00 1:00 S
Rule Algeria 1980 only - Oct 31 2:00 0 -
# Shanks & Pottenger give 0:09:20 for Paris Mean Time; go with Howse's
# more precise 0:09:21.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Algiers 0:12:12 - LMT 1891 Mar 15 0:01
0:09:21 - PMT 1911 Mar 11 # Paris Mean Time
0:00 Algeria WE%sT 1940 Feb 25 2:00
1:00 Algeria CE%sT 1946 Oct 7
0:00 - WET 1956 Jan 29
1:00 - CET 1963 Apr 14
0:00 Algeria WE%sT 1977 Oct 21
1:00 Algeria CE%sT 1979 Oct 26
0:00 Algeria WE%sT 1981 May
1:00 - CET
# Angola
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Luanda 0:52:56 - LMT 1892
0:52:04 - AOT 1911 May 26 # Angola Time
1:00 - WAT
# Benin
# Whitman says they switched to 1:00 in 1946, not 1934;
# go with Shanks & Pottenger.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Porto-Novo 0:10:28 - LMT 1912
0:00 - GMT 1934 Feb 26
1:00 - WAT
# Botswana
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Gaborone 1:43:40 - LMT 1885
2:00 - CAT 1943 Sep 19 2:00
2:00 1:00 CAST 1944 Mar 19 2:00
2:00 - CAT
# Burkina Faso
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Ouagadougou -0:06:04 - LMT 1912
0:00 - GMT
# Burundi
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Bujumbura 1:57:28 - LMT 1890
2:00 - CAT
# Cameroon
# Whitman says they switched to 1:00 in 1920; go with Shanks & Pottenger.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Douala 0:38:48 - LMT 1912
1:00 - WAT
# Cape Verde
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Atlantic/Cape_Verde -1:34:04 - LMT 1907 # Praia
-2:00 - CVT 1942 Sep
-2:00 1:00 CVST 1945 Oct 15
-2:00 - CVT 1975 Nov 25 2:00
-1:00 - CVT
# Central African Republic
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Bangui 1:14:20 - LMT 1912
1:00 - WAT
# Chad
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Ndjamena 1:00:12 - LMT 1912
1:00 - WAT 1979 Oct 14
1:00 1:00 WAST 1980 Mar 8
1:00 - WAT
# Comoros
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Indian/Comoro 2:53:04 - LMT 1911 Jul # Moroni, Gran Comoro
3:00 - EAT
# Democratic Republic of Congo
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Kinshasa 1:01:12 - LMT 1897 Nov 9
1:00 - WAT
Zone Africa/Lubumbashi 1:49:52 - LMT 1897 Nov 9
2:00 - CAT
# Republic of the Congo
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Brazzaville 1:01:08 - LMT 1912
1:00 - WAT
# Cote D'Ivoire
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Abidjan -0:16:08 - LMT 1912
0:00 - GMT
# Djibouti
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Djibouti 2:52:36 - LMT 1911 Jul
3:00 - EAT
###############################################################################
# Egypt
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Egypt 1940 only - Jul 15 0:00 1:00 S
Rule Egypt 1940 only - Oct 1 0:00 0 -
Rule Egypt 1941 only - Apr 15 0:00 1:00 S
Rule Egypt 1941 only - Sep 16 0:00 0 -
Rule Egypt 1942 1944 - Apr 1 0:00 1:00 S
Rule Egypt 1942 only - Oct 27 0:00 0 -
Rule Egypt 1943 1945 - Nov 1 0:00 0 -
Rule Egypt 1945 only - Apr 16 0:00 1:00 S
Rule Egypt 1957 only - May 10 0:00 1:00 S
Rule Egypt 1957 1958 - Oct 1 0:00 0 -
Rule Egypt 1958 only - May 1 0:00 1:00 S
Rule Egypt 1959 1981 - May 1 1:00 1:00 S
Rule Egypt 1959 1965 - Sep 30 3:00 0 -
Rule Egypt 1966 1994 - Oct 1 3:00 0 -
Rule Egypt 1982 only - Jul 25 1:00 1:00 S
Rule Egypt 1983 only - Jul 12 1:00 1:00 S
Rule Egypt 1984 1988 - May 1 1:00 1:00 S
Rule Egypt 1989 only - May 6 1:00 1:00 S
Rule Egypt 1990 1994 - May 1 1:00 1:00 S
# IATA (after 1990) says transitions are at 0:00.
# Go with IATA starting in 1995, except correct 1995 entry from 09-30 to 09-29.
# From Alexander Krivenyshev (2011-04-20):
# "...Egypt's interim cabinet decided on Wednesday to cancel daylight
# saving time after a poll posted on its website showed the majority of
# Egyptians would approve the cancellation."
#
# Egypt to cancel daylight saving time
# <a href="http://www.almasryalyoum.com/en/node/407168">
# http://www.almasryalyoum.com/en/node/407168
# </a>
# or
# <a href="http://www.worldtimezone.com/dst_news/dst_news_egypt04.html">
# http://www.worldtimezone.com/dst_news/dst_news_egypt04.html
# </a>
Rule Egypt 1995 2010 - Apr lastFri 0:00s 1:00 S
Rule Egypt 1995 2005 - Sep lastThu 23:00s 0 -
# From Steffen Thorsen (2006-09-19):
# The Egyptian Gazette, issue 41,090 (2006-09-18), page 1, reports:
# Egypt will turn back clocks by one hour at the midnight of Thursday
# after observing the daylight saving time since May.
# http://news.gom.com.eg/gazette/pdf/2006/09/18/01.pdf
Rule Egypt 2006 only - Sep 21 23:00s 0 -
# From Dirk Losch (2007-08-14):
# I received a mail from an airline which says that the daylight
# saving time in Egypt will end in the night of 2007-09-06 to 2007-09-07.
# From Jesper Norgaard Welen (2007-08-15): [The following agree:]
# http://www.nentjes.info/Bill/bill5.htm
# http://www.timeanddate.com/worldclock/city.html?n=53
# From Steffen Thorsen (2007-09-04): The official information...:
# http://www.sis.gov.eg/En/EgyptOnline/Miscellaneous/000002/0207000000000000001580.htm
Rule Egypt 2007 only - Sep Thu>=1 23:00s 0 -
# From Abdelrahman Hassan (2007-09-06):
# Due to the Hijri (lunar Islamic calendar) year being 11 days shorter
# than the year of the Gregorian calendar, Ramadan shifts earlier each
# year. This year it will be observed September 13 (September is quite
# hot in Egypt), and the idea is to make fasting easier for workers by
# shifting business hours one hour out of daytime heat. Consequently,
# unless discontinued, next DST may end Thursday 28 August 2008.
# From Paul Eggert (2007-08-17):
# For lack of better info, assume the new rule is last Thursday in August.
# From Petr Machata (2009-04-06):
# The following appeared in Red Hat bugzilla[1] (edited):
#
# > $ zdump -v /usr/share/zoneinfo/Africa/Cairo | grep 2009
# > /usr/share/zoneinfo/Africa/Cairo Thu Apr 23 21:59:59 2009 UTC = Thu =
# Apr 23
# > 23:59:59 2009 EET isdst=0 gmtoff=7200
# > /usr/share/zoneinfo/Africa/Cairo Thu Apr 23 22:00:00 2009 UTC = Fri =
# Apr 24
# > 01:00:00 2009 EEST isdst=1 gmtoff=10800
# > /usr/share/zoneinfo/Africa/Cairo Thu Aug 27 20:59:59 2009 UTC = Thu =
# Aug 27
# > 23:59:59 2009 EEST isdst=1 gmtoff=10800
# > /usr/share/zoneinfo/Africa/Cairo Thu Aug 27 21:00:00 2009 UTC = Thu =
# Aug 27
# > 23:00:00 2009 EET isdst=0 gmtoff=7200
#
# > end date should be Thu Sep 24 2009 (Last Thursday in September at 23:59=
# :59)
# > http://support.microsoft.com/kb/958729/
#
# timeanddate[2] and another site I've found[3] also support that.
#
# [1] <a href="https://bugzilla.redhat.com/show_bug.cgi?id=492263">
# https://bugzilla.redhat.com/show_bug.cgi?id=492263
# </a>
# [2] <a href="http://www.timeanddate.com/worldclock/clockchange.html?n=53">
# http://www.timeanddate.com/worldclock/clockchange.html?n=53
# </a>
# [3] <a href="http://wwp.greenwichmeantime.com/time-zone/africa/egypt/">
# http://wwp.greenwichmeantime.com/time-zone/africa/egypt/
# </a>
# From Arthur David Olson (2009-04-20):
# In 2009 (and for the next several years), Ramadan ends before the fourth
# Thursday in September; Egypt is expected to revert to the last Thursday
# in September.
# From Steffen Thorsen (2009-08-11):
# We have been able to confirm the August change with the Egyptian Cabinet
# Information and Decision Support Center:
# <a href="http://www.timeanddate.com/news/time/egypt-dst-ends-2009.html">
# http://www.timeanddate.com/news/time/egypt-dst-ends-2009.html
# </a>
#
# The Middle East News Agency
# <a href="http://www.mena.org.eg/index.aspx">
# http://www.mena.org.eg/index.aspx
# </a>
# also reports "Egypt starts winter time on August 21"
# today in article numbered "71, 11/08/2009 12:25 GMT."
# Only the title above is available without a subscription to their service,
# and can be found by searching for "winter" in their search engine
# (at least today).
# From Alexander Krivenyshev (2010-07-20):
# According to News from Egypt - Al-Masry Al-Youm Egypt's cabinet has
# decided that Daylight Saving Time will not be used in Egypt during
# Ramadan.
#
# Arabic translation:
# "Clocks to go back during Ramadan--and then forward again"
# <a href="http://www.almasryalyoum.com/en/news/clocks-go-back-during-ramadan-and-then-forward-again">
# http://www.almasryalyoum.com/en/news/clocks-go-back-during-ramadan-and-then-forward-again
# </a>
# or
# <a href="http://www.worldtimezone.com/dst_news/dst_news_egypt02.html">
# http://www.worldtimezone.com/dst_news/dst_news_egypt02.html
# </a>
Rule Egypt 2008 only - Aug lastThu 23:00s 0 -
Rule Egypt 2009 only - Aug 20 23:00s 0 -
Rule Egypt 2010 only - Aug 11 0:00 0 -
Rule Egypt 2010 only - Sep 10 0:00 1:00 S
Rule Egypt 2010 only - Sep lastThu 23:00s 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Cairo 2:05:00 - LMT 1900 Oct
2:00 Egypt EE%sT
# Equatorial Guinea
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Malabo 0:35:08 - LMT 1912
0:00 - GMT 1963 Dec 15
1:00 - WAT
# Eritrea
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Asmara 2:35:32 - LMT 1870
2:35:32 - AMT 1890 # Asmara Mean Time
2:35:20 - ADMT 1936 May 5 # Adis Dera MT
3:00 - EAT
# Ethiopia
# From Paul Eggert (2006-03-22):
# Shanks & Pottenger write that Ethiopia had six narrowly-spaced time zones
# between 1870 and 1890, and that they merged to 38E50 (2:35:20) in 1890.
# We'll guess that 38E50 is for Adis Dera.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Addis_Ababa 2:34:48 - LMT 1870
2:35:20 - ADMT 1936 May 5 # Adis Dera MT
3:00 - EAT
# Gabon
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Libreville 0:37:48 - LMT 1912
1:00 - WAT
# Gambia
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Banjul -1:06:36 - LMT 1912
-1:06:36 - BMT 1935 # Banjul Mean Time
-1:00 - WAT 1964
0:00 - GMT
# Ghana
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
# Whitman says DST was observed from 1931 to ``the present'';
# go with Shanks & Pottenger.
Rule Ghana 1936 1942 - Sep 1 0:00 0:20 GHST
Rule Ghana 1936 1942 - Dec 31 0:00 0 GMT
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Accra -0:00:52 - LMT 1918
0:00 Ghana %s
# Guinea
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Conakry -0:54:52 - LMT 1912
0:00 - GMT 1934 Feb 26
-1:00 - WAT 1960
0:00 - GMT
# Guinea-Bissau
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Bissau -1:02:20 - LMT 1911 May 26
-1:00 - WAT 1975
0:00 - GMT
# Kenya
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Nairobi 2:27:16 - LMT 1928 Jul
3:00 - EAT 1930
2:30 - BEAT 1940
2:45 - BEAUT 1960
3:00 - EAT
# Lesotho
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Maseru 1:50:00 - LMT 1903 Mar
2:00 - SAST 1943 Sep 19 2:00
2:00 1:00 SAST 1944 Mar 19 2:00
2:00 - SAST
# Liberia
# From Paul Eggert (2006-03-22):
# In 1972 Liberia was the last country to switch
# from a UTC offset that was not a multiple of 15 or 20 minutes.
# Howse reports that it was in honor of their president's birthday.
# Shank & Pottenger report the date as May 1, whereas Howse reports Jan;
# go with Shanks & Pottenger.
# For Liberia before 1972, Shanks & Pottenger report -0:44, whereas Howse and
# Whitman each report -0:44:30; go with the more precise figure.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Monrovia -0:43:08 - LMT 1882
-0:43:08 - MMT 1919 Mar # Monrovia Mean Time
-0:44:30 - LRT 1972 May # Liberia Time
0:00 - GMT
###############################################################################
# Libya
# From Even Scharning (2012-11-10):
# Libya set their time one hour back at 02:00 on Saturday November 10.
# http://www.libyaherald.com/2012/11/04/clocks-to-go-back-an-hour-on-saturday/
# Here is an official source [in Arabic]: http://ls.ly/fb6Yc
#
# Steffen Thorsen forwarded a translation (2012-11-10) in
# http://mm.icann.org/pipermail/tz/2012-November/018451.html
#
# From Tim Parenti (2012-11-11):
# Treat the 2012-11-10 change as a zone change from UTC+2 to UTC+1.
# The DST rules planned for 2013 and onward roughly mirror those of Europe
# (either two days before them or five days after them, so as to fall on
# lastFri instead of lastSun).
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Libya 1951 only - Oct 14 2:00 1:00 S
Rule Libya 1952 only - Jan 1 0:00 0 -
Rule Libya 1953 only - Oct 9 2:00 1:00 S
Rule Libya 1954 only - Jan 1 0:00 0 -
Rule Libya 1955 only - Sep 30 0:00 1:00 S
Rule Libya 1956 only - Jan 1 0:00 0 -
Rule Libya 1982 1984 - Apr 1 0:00 1:00 S
Rule Libya 1982 1985 - Oct 1 0:00 0 -
Rule Libya 1985 only - Apr 6 0:00 1:00 S
Rule Libya 1986 only - Apr 4 0:00 1:00 S
Rule Libya 1986 only - Oct 3 0:00 0 -
Rule Libya 1987 1989 - Apr 1 0:00 1:00 S
Rule Libya 1987 1989 - Oct 1 0:00 0 -
Rule Libya 1997 only - Apr 4 0:00 1:00 S
Rule Libya 1997 only - Oct 4 0:00 0 -
Rule Libya 2013 max - Mar lastFri 1:00 1:00 S
Rule Libya 2013 max - Oct lastFri 2:00 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Tripoli 0:52:44 - LMT 1920
1:00 Libya CE%sT 1959
2:00 - EET 1982
1:00 Libya CE%sT 1990 May 4
# The 1996 and 1997 entries are from Shanks & Pottenger;
# the IATA SSIM data contain some obvious errors.
2:00 - EET 1996 Sep 30
1:00 Libya CE%sT 1997 Oct 4
2:00 - EET 2012 Nov 10 2:00
1:00 Libya CE%sT
# Madagascar
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Indian/Antananarivo 3:10:04 - LMT 1911 Jul
3:00 - EAT 1954 Feb 27 23:00s
3:00 1:00 EAST 1954 May 29 23:00s
3:00 - EAT
# Malawi
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Blantyre 2:20:00 - LMT 1903 Mar
2:00 - CAT
# Mali
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Bamako -0:32:00 - LMT 1912
0:00 - GMT 1934 Feb 26
-1:00 - WAT 1960 Jun 20
0:00 - GMT
# Mauritania
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Nouakchott -1:03:48 - LMT 1912
0:00 - GMT 1934 Feb 26
-1:00 - WAT 1960 Nov 28
0:00 - GMT
# Mauritius
# From Steffen Thorsen (2008-06-25):
# Mauritius plans to observe DST from 2008-11-01 to 2009-03-31 on a trial
# basis....
# It seems that Mauritius observed daylight saving time from 1982-10-10 to
# 1983-03-20 as well, but that was not successful....
# http://www.timeanddate.com/news/time/mauritius-daylight-saving-time.html
# From Alex Krivenyshev (2008-06-25):
# http://economicdevelopment.gov.mu/portal/site/Mainhomepage/menuitem.a42b24128104d9845dabddd154508a0c/?content_id=0a7cee8b5d69a110VgnVCM1000000a04a8c0RCRD
# From Arthur David Olson (2008-06-30):
# The www.timeanddate.com article cited by Steffen Thorsen notes that "A
# final decision has yet to be made on the times that daylight saving
# would begin and end on these dates." As a place holder, use midnight.
# From Paul Eggert (2008-06-30):
# Follow Thorsen on DST in 1982/1983, instead of Shanks & Pottenger.
# From Steffen Thorsen (2008-07-10):
# According to
# <a href="http://www.lexpress.mu/display_article.php?news_id=111216">
# http://www.lexpress.mu/display_article.php?news_id=111216
# </a>
# (in French), Mauritius will start and end their DST a few days earlier
# than previously announced (2008-11-01 to 2009-03-31). The new start
# date is 2008-10-26 at 02:00 and the new end date is 2009-03-27 (no time
# given, but it is probably at either 2 or 3 wall clock time).
#
# A little strange though, since the article says that they moved the date
# to align itself with Europe and USA which also change time on that date,
# but that means they have not paid attention to what happened in
# USA/Canada last year (DST ends first Sunday in November). I also wonder
# why that they end on a Friday, instead of aligning with Europe which
# changes two days later.
# From Alex Krivenyshev (2008-07-11):
# Seems that English language article "The revival of daylight saving
# time: Energy conservation?"-# No. 16578 (07/11/2008) was originally
# published on Monday, June 30, 2008...
#
# I guess that article in French "Le gouvernement avance l'introduction
# de l'heure d'ete" stating that DST in Mauritius starting on October 26
# and ending on March 27, 2009 is the most recent one.
# ...
# <a href="http://www.worldtimezone.com/dst_news/dst_news_mauritius02.html">
# http://www.worldtimezone.com/dst_news/dst_news_mauritius02.html
# </a>
# From Riad M. Hossen Ally (2008-08-03):
# The Government of Mauritius weblink
# <a href="http://www.gov.mu/portal/site/pmosite/menuitem.4ca0efdee47462e7440a600248a521ca/?content_id=4728ca68b2a5b110VgnVCM1000000a04a8c0RCRD">
# http://www.gov.mu/portal/site/pmosite/menuitem.4ca0efdee47462e7440a600248a521ca/?content_id=4728ca68b2a5b110VgnVCM1000000a04a8c0RCRD
# </a>
# Cabinet Decision of July 18th, 2008 states as follows:
#
# 4. ...Cabinet has agreed to the introduction into the National Assembly
# of the Time Bill which provides for the introduction of summer time in
# Mauritius. The summer time period which will be of one hour ahead of
# the standard time, will be aligned with that in Europe and the United
# States of America. It will start at two o'clock in the morning on the
# last Sunday of October and will end at two o'clock in the morning on
# the last Sunday of March the following year. The summer time for the
# year 2008 - 2009 will, therefore, be effective as from 26 October 2008
# and end on 29 March 2009.
# From Ed Maste (2008-10-07):
# THE TIME BILL (No. XXVII of 2008) Explanatory Memorandum states the
# beginning / ending of summer time is 2 o'clock standard time in the
# morning of the last Sunday of October / last Sunday of March.
# <a href="http://www.gov.mu/portal/goc/assemblysite/file/bill2708.pdf">
# http://www.gov.mu/portal/goc/assemblysite/file/bill2708.pdf
# </a>
# From Steffen Thorsen (2009-06-05):
# According to several sources, Mauritius will not continue to observe
# DST the coming summer...
#
# Some sources, in French:
# <a href="http://www.defimedia.info/news/946/Rashid-Beebeejaun-:-%C2%AB-L%E2%80%99heure-d%E2%80%99%C3%A9t%C3%A9-ne-sera-pas-appliqu%C3%A9e-cette-ann%C3%A9e-%C2%BB">
# http://www.defimedia.info/news/946/Rashid-Beebeejaun-:-%C2%AB-L%E2%80%99heure-d%E2%80%99%C3%A9t%C3%A9-ne-sera-pas-appliqu%C3%A9e-cette-ann%C3%A9e-%C2%BB
# </a>
# <a href="http://lexpress.mu/Story/3398~Beebeejaun---Les-objectifs-d-%C3%A9conomie-d-%C3%A9nergie-de-l-heure-d-%C3%A9t%C3%A9-ont-%C3%A9t%C3%A9-atteints-">
# http://lexpress.mu/Story/3398~Beebeejaun---Les-objectifs-d-%C3%A9conomie-d-%C3%A9nergie-de-l-heure-d-%C3%A9t%C3%A9-ont-%C3%A9t%C3%A9-atteints-
# </a>
#
# Our wrap-up:
# <a href="http://www.timeanddate.com/news/time/mauritius-dst-will-not-repeat.html">
# http://www.timeanddate.com/news/time/mauritius-dst-will-not-repeat.html
# </a>
# From Arthur David Olson (2009-07-11):
# The "mauritius-dst-will-not-repeat" wrapup includes this:
# "The trial ended on March 29, 2009, when the clocks moved back by one hour
# at 2am (or 02:00) local time..."
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Mauritius 1982 only - Oct 10 0:00 1:00 S
Rule Mauritius 1983 only - Mar 21 0:00 0 -
Rule Mauritius 2008 only - Oct lastSun 2:00 1:00 S
Rule Mauritius 2009 only - Mar lastSun 2:00 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Indian/Mauritius 3:50:00 - LMT 1907 # Port Louis
4:00 Mauritius MU%sT # Mauritius Time
# Agalega Is, Rodriguez
# no information; probably like Indian/Mauritius
# Mayotte
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Indian/Mayotte 3:00:56 - LMT 1911 Jul # Mamoutzou
3:00 - EAT
# Morocco
# See the `europe' file for Spanish Morocco (Africa/Ceuta).
# From Alex Krivenyshev (2008-05-09):
# Here is an article that Morocco plan to introduce Daylight Saving Time between
# 1 June, 2008 and 27 September, 2008.
#
# "... Morocco is to save energy by adjusting its clock during summer so it will
# be one hour ahead of GMT between 1 June and 27 September, according to
# Communication Minister and Gov ernment Spokesman, Khalid Naciri...."
#
# <a href="http://www.worldtimezone.net/dst_news/dst_news_morocco01.html">
# http://www.worldtimezone.net/dst_news/dst_news_morocco01.html
# </a>
# OR
# <a href="http://en.afrik.com/news11892.html">
# http://en.afrik.com/news11892.html
# </a>
# From Alex Krivenyshev (2008-05-09):
# The Morocco time change can be confirmed on Morocco web site Maghreb Arabe Presse:
# <a href="http://www.map.ma/eng/sections/box3/morocco_shifts_to_da/view">
# http://www.map.ma/eng/sections/box3/morocco_shifts_to_da/view
# </a>
#
# Morocco shifts to daylight time on June 1st through September 27, Govt.
# spokesman.
# From Patrice Scattolin (2008-05-09):
# According to this article:
# <a href="http://www.avmaroc.com/actualite/heure-dete-comment-a127896.html">
# http://www.avmaroc.com/actualite/heure-dete-comment-a127896.html
# </a>
# (and republished here:
# <a href="http://www.actu.ma/heure-dete-comment_i127896_0.html">
# http://www.actu.ma/heure-dete-comment_i127896_0.html
# </a>
# )
# the changes occurs at midnight:
#
# saturday night may 31st at midnight (which in french is to be
# intrepreted as the night between saturday and sunday)
# sunday night the 28th at midnight
#
# Seeing that the 28th is monday, I am guessing that she intends to say
# the midnight of the 28th which is the midnight between sunday and
# monday, which jives with other sources that say that it's inclusive
# june1st to sept 27th.
#
# The decision was taken by decree *2-08-224 *but I can't find the decree
# published on the web.
#
# It's also confirmed here:
# <a href="http://www.maroc.ma/NR/exeres/FACF141F-D910-44B0-B7FA-6E03733425D1.htm">
# http://www.maroc.ma/NR/exeres/FACF141F-D910-44B0-B7FA-6E03733425D1.htm
# </a>
# on a government portal as being between june 1st and sept 27th (not yet
# posted in english).
#
# The following google query will generate many relevant hits:
# <a href="http://www.google.com/search?hl=en&q=Conseil+de+gouvernement+maroc+heure+avance&btnG=Search">
# http://www.google.com/search?hl=en&q=Conseil+de+gouvernement+maroc+heure+avance&btnG=Search
# </a>
# From Alex Krivenyshev (2008-05-09):
# Is Western Sahara (part which administrated by Morocco) going to follow
# Morocco DST changes? Any information? What about other part of
# Western Sahara - under administration of POLISARIO Front (also named
# SADR Saharawi Arab Democratic Republic)?
# From Arthur David Olson (2008-05-09):
# XXX--guess that it is only Morocco for now; guess only 2008 for now.
# From Steffen Thorsen (2008-08-27):
# Morocco will change the clocks back on the midnight between August 31
# and September 1. They originally planned to observe DST to near the end
# of September:
#
# One article about it (in French):
# <a href="http://www.menara.ma/fr/Actualites/Maroc/Societe/ci.retour_a_l_heure_gmt_a_partir_du_dimanche_31_aout_a_minuit_officiel_.default">
# http://www.menara.ma/fr/Actualites/Maroc/Societe/ci.retour_a_l_heure_gmt_a_partir_du_dimanche_31_aout_a_minuit_officiel_.default
# </a>
#
# We have some further details posted here:
# <a href="http://www.timeanddate.com/news/time/morocco-ends-dst-early-2008.html">
# http://www.timeanddate.com/news/time/morocco-ends-dst-early-2008.html
# </a>
# From Steffen Thorsen (2009-03-17):
# Morocco will observe DST from 2009-06-01 00:00 to 2009-08-21 00:00 according
# to many sources, such as
# <a href="http://news.marweb.com/morocco/entertainment/morocco-daylight-saving.html">
# http://news.marweb.com/morocco/entertainment/morocco-daylight-saving.html
# </a>
# <a href="http://www.medi1sat.ma/fr/depeche.aspx?idp=2312">
# http://www.medi1sat.ma/fr/depeche.aspx?idp=2312
# </a>
# (French)
#
# Our summary:
# <a href="http://www.timeanddate.com/news/time/morocco-starts-dst-2009.html">
# http://www.timeanddate.com/news/time/morocco-starts-dst-2009.html
# </a>
# From Alexander Krivenyshev (2009-03-17):
# Here is a link to official document from Royaume du Maroc Premier Ministre,
# Ministere de la Modernisation des Secteurs Publics
#
# Under Article 1 of Royal Decree No. 455-67 of Act 23 safar 1387 (2 june 1967)
# concerning the amendment of the legal time, the Ministry of Modernization of
# Public Sectors announced that the official time in the Kingdom will be
# advanced 60 minutes from Sunday 31 May 2009 at midnight.
#
# <a href="http://www.mmsp.gov.ma/francais/Actualites_fr/PDF_Actualites_Fr/HeureEte_FR.pdf">
# http://www.mmsp.gov.ma/francais/Actualites_fr/PDF_Actualites_Fr/HeureEte_FR.pdf
# </a>
#
# <a href="http://www.worldtimezone.com/dst_news/dst_news_morocco03.html">
# http://www.worldtimezone.com/dst_news/dst_news_morocco03.html
# </a>
# From Steffen Thorsen (2010-04-13):
# Several news media in Morocco report that the Ministry of Modernization
# of Public Sectors has announced that Morocco will have DST from
# 2010-05-02 to 2010-08-08.
#
# Example:
# <a href="http://www.lavieeco.com/actualites/4099-le-maroc-passera-a-l-heure-d-ete-gmt1-le-2-mai.html">
# http://www.lavieeco.com/actualites/4099-le-maroc-passera-a-l-heure-d-ete-gmt1-le-2-mai.html
# </a>
# (French)
# Our page:
# <a href="http://www.timeanddate.com/news/time/morocco-starts-dst-2010.html">
# http://www.timeanddate.com/news/time/morocco-starts-dst-2010.html
# </a>
# From Dan Abitol (2011-03-30):
# ...Rules for Africa/Casablanca are the following (24h format)
# The 3rd april 2011 at 00:00:00, [it] will be 3rd april 1:00:00
# The 31th july 2011 at 00:59:59, [it] will be 31th July 00:00:00
# ...Official links of change in morocco
# The change was broadcast on the FM Radio
# I ve called ANRT (telecom regulations in Morocco) at
# +212.537.71.84.00
# <a href="http://www.anrt.net.ma/fr/">
# http://www.anrt.net.ma/fr/
# </a>
# They said that
# <a href="http://www.map.ma/fr/sections/accueil/l_heure_legale_au_ma/view">
# http://www.map.ma/fr/sections/accueil/l_heure_legale_au_ma/view
# </a>
# is the official publication to look at.
# They said that the decision was already taken.
#
# More articles in the press
# <a href="http://www.yabiladi.com/articles/details/5058/secret-l-heure-d-ete-maroc-lev">
# http://www.yabiladi.com/articles/details/5058/secret-l-heure-d-ete-maroc-lev
# </a>
# e.html
# <a href="http://www.lematin.ma/Actualite/Express/Article.asp?id=148923">
# http://www.lematin.ma/Actualite/Express/Article.asp?id=148923
# </a>
# <a href="http://www.lavieeco.com/actualite/Le-Maroc-passe-sur-GMT%2B1-a-partir-de-dim">
# http://www.lavieeco.com/actualite/Le-Maroc-passe-sur-GMT%2B1-a-partir-de-dim
# anche-prochain-5538.html
# </a>
# From Petr Machata (2011-03-30):
# They have it written in English here:
# <a href="http://www.map.ma/eng/sections/home/morocco_to_spring_fo/view">
# http://www.map.ma/eng/sections/home/morocco_to_spring_fo/view
# </a>
#
# It says there that "Morocco will resume its standard time on July 31,
# 2011 at midnight." Now they don't say whether they mean midnight of
# wall clock time (i.e. 11pm UTC), but that's what I would assume. It has
# also been like that in the past.
# From Alexander Krivenyshev (2012-03-09):
# According to Infomédiaire web site from Morocco (infomediaire.ma),
# on March 9, 2012, (in French) Heure légale:
# Le Maroc adopte officiellement l'heure d'été
# <a href="http://www.infomediaire.ma/news/maroc/heure-l%C3%A9gale-le-maroc-adopte-officiellement-lheure-d%C3%A9t%C3%A9">
# http://www.infomediaire.ma/news/maroc/heure-l%C3%A9gale-le-maroc-adopte-officiellement-lheure-d%C3%A9t%C3%A9
# </a>
# Governing Council adopted draft decree, that Morocco DST starts on
# the last Sunday of March (March 25, 2012) and ends on
# last Sunday of September (September 30, 2012)
# except the month of Ramadan.
# or (brief)
# <a href="http://www.worldtimezone.com/dst_news/dst_news_morocco06.html">
# http://www.worldtimezone.com/dst_news/dst_news_morocco06.html
# </a>
# From Arthur David Olson (2012-03-10):
# The infomediaire.ma source indicates that the system is to be in
# effect every year. It gives 03H00 as the "fall back" time of day;
# it lacks a "spring forward" time of day; assume 2:00 XXX.
# Wait on specifying the Ramadan exception for details about
# start date, start time of day, end date, and end time of day XXX.
# From Christophe Tropamer (2012-03-16):
# Seen Morocco change again:
# <a href="http://www.le2uminutes.com/actualite.php">
# http://www.le2uminutes.com/actualite.php
# </a>
# "...à partir du dernier dimance d'avril et non fins mars,
# comme annoncé précédemment."
# From Milamber Space Network (2012-07-17):
# The official return to GMT is announced by the Moroccan government:
# <a href="http://www.mmsp.gov.ma/fr/actualites.aspx?id=288">
# http://www.mmsp.gov.ma/fr/actualites.aspx?id=288 [in French]
# </a>
#
# Google translation, lightly edited:
# Back to the standard time of the Kingdom (GMT)
# Pursuant to Decree No. 2-12-126 issued on 26 Jumada (I) 1433 (April 18,
# 2012) and in accordance with the order of Mr. President of the
# Government No. 3-47-12 issued on 24 Sha'ban (11 July 2012), the Ministry
# of Public Service and Administration Modernization announces the return
# of the legal time of the Kingdom (GMT) from Friday, July 20, 2012 until
# Monday, August 20, 2012. So the time will be delayed by 60 minutes from
# 3:00 am Friday, July 20, 2012 and will again be advanced by 60 minutes
# August 20, 2012 from 2:00 am.
# RULE NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Morocco 1939 only - Sep 12 0:00 1:00 S
Rule Morocco 1939 only - Nov 19 0:00 0 -
Rule Morocco 1940 only - Feb 25 0:00 1:00 S
Rule Morocco 1945 only - Nov 18 0:00 0 -
Rule Morocco 1950 only - Jun 11 0:00 1:00 S
Rule Morocco 1950 only - Oct 29 0:00 0 -
Rule Morocco 1967 only - Jun 3 12:00 1:00 S
Rule Morocco 1967 only - Oct 1 0:00 0 -
Rule Morocco 1974 only - Jun 24 0:00 1:00 S
Rule Morocco 1974 only - Sep 1 0:00 0 -
Rule Morocco 1976 1977 - May 1 0:00 1:00 S
Rule Morocco 1976 only - Aug 1 0:00 0 -
Rule Morocco 1977 only - Sep 28 0:00 0 -
Rule Morocco 1978 only - Jun 1 0:00 1:00 S
Rule Morocco 1978 only - Aug 4 0:00 0 -
Rule Morocco 2008 only - Jun 1 0:00 1:00 S
Rule Morocco 2008 only - Sep 1 0:00 0 -
Rule Morocco 2009 only - Jun 1 0:00 1:00 S
Rule Morocco 2009 only - Aug 21 0:00 0 -
Rule Morocco 2010 only - May 2 0:00 1:00 S
Rule Morocco 2010 only - Aug 8 0:00 0 -
Rule Morocco 2011 only - Apr 3 0:00 1:00 S
Rule Morocco 2011 only - Jul 31 0 0 -
Rule Morocco 2012 max - Apr lastSun 2:00 1:00 S
Rule Morocco 2012 max - Sep lastSun 3:00 0 -
Rule Morocco 2012 only - Jul 20 3:00 0 -
Rule Morocco 2012 only - Aug 20 2:00 1:00 S
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Casablanca -0:30:20 - LMT 1913 Oct 26
0:00 Morocco WE%sT 1984 Mar 16
1:00 - CET 1986
0:00 Morocco WE%sT
# Western Sahara
Zone Africa/El_Aaiun -0:52:48 - LMT 1934 Jan
-1:00 - WAT 1976 Apr 14
0:00 - WET
# Mozambique
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Maputo 2:10:20 - LMT 1903 Mar
2:00 - CAT
# Namibia
# The 1994-04-03 transition is from Shanks & Pottenger.
# Shanks & Pottenger report no DST after 1998-04; go with IATA.
# From Petronella Sibeene (2007-03-30) in
# <http://allafrica.com/stories/200703300178.html>:
# While the entire country changes its time, Katima Mulilo and other
# settlements in Caprivi unofficially will not because the sun there
# rises and sets earlier compared to other regions. Chief of
# Forecasting Riaan van Zyl explained that the far eastern parts of
# the country are close to 40 minutes earlier in sunrise than the rest
# of the country.
#
# From Paul Eggert (2007-03-31):
# Apparently the Caprivi Strip informally observes Botswana time, but
# we have no details. In the meantime people there can use Africa/Gaborone.
# RULE NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Namibia 1994 max - Sep Sun>=1 2:00 1:00 S
Rule Namibia 1995 max - Apr Sun>=1 2:00 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Windhoek 1:08:24 - LMT 1892 Feb 8
1:30 - SWAT 1903 Mar # SW Africa Time
2:00 - SAST 1942 Sep 20 2:00
2:00 1:00 SAST 1943 Mar 21 2:00
2:00 - SAST 1990 Mar 21 # independence
2:00 - CAT 1994 Apr 3
1:00 Namibia WA%sT
# Niger
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Niamey 0:08:28 - LMT 1912
-1:00 - WAT 1934 Feb 26
0:00 - GMT 1960
1:00 - WAT
# Nigeria
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Lagos 0:13:36 - LMT 1919 Sep
1:00 - WAT
# Reunion
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Indian/Reunion 3:41:52 - LMT 1911 Jun # Saint-Denis
4:00 - RET # Reunion Time
#
# Scattered Islands (Iles Eparses) administered from Reunion are as follows.
# The following information about them is taken from
# Iles Eparses (www.outre-mer.gouv.fr/domtom/ile.htm, 1997-07-22, in French;
# no longer available as of 1999-08-17).
# We have no info about their time zone histories.
#
# Bassas da India - uninhabited
# Europa Island - inhabited from 1905 to 1910 by two families
# Glorioso Is - inhabited until at least 1958
# Juan de Nova - uninhabited
# Tromelin - inhabited until at least 1958
# Rwanda
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Kigali 2:00:16 - LMT 1935 Jun
2:00 - CAT
# St Helena
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Atlantic/St_Helena -0:22:48 - LMT 1890 # Jamestown
-0:22:48 - JMT 1951 # Jamestown Mean Time
0:00 - GMT
# The other parts of the St Helena territory are similar:
# Tristan da Cunha: on GMT, say Whitman and the CIA
# Ascension: on GMT, says usno1995 and the CIA
# Gough (scientific station since 1955; sealers wintered previously):
# on GMT, says the CIA
# Inaccessible, Nightingale: no information, but probably GMT
# Sao Tome and Principe
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Sao_Tome 0:26:56 - LMT 1884
-0:36:32 - LMT 1912 # Lisbon Mean Time
0:00 - GMT
# Senegal
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Dakar -1:09:44 - LMT 1912
-1:00 - WAT 1941 Jun
0:00 - GMT
# Seychelles
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Indian/Mahe 3:41:48 - LMT 1906 Jun # Victoria
4:00 - SCT # Seychelles Time
# From Paul Eggert (2001-05-30):
# Aldabra, Farquhar, and Desroches, originally dependencies of the
# Seychelles, were transferred to the British Indian Ocean Territory
# in 1965 and returned to Seychelles control in 1976. We don't know
# whether this affected their time zone, so omit this for now.
# Possibly the islands were uninhabited.
# Sierra Leone
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
# Whitman gives Mar 31 - Aug 31 for 1931 on; go with Shanks & Pottenger.
Rule SL 1935 1942 - Jun 1 0:00 0:40 SLST
Rule SL 1935 1942 - Oct 1 0:00 0 WAT
Rule SL 1957 1962 - Jun 1 0:00 1:00 SLST
Rule SL 1957 1962 - Sep 1 0:00 0 GMT
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Freetown -0:53:00 - LMT 1882
-0:53:00 - FMT 1913 Jun # Freetown Mean Time
-1:00 SL %s 1957
0:00 SL %s
# Somalia
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Mogadishu 3:01:28 - LMT 1893 Nov
3:00 - EAT 1931
2:30 - BEAT 1957
3:00 - EAT
# South Africa
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule SA 1942 1943 - Sep Sun>=15 2:00 1:00 -
Rule SA 1943 1944 - Mar Sun>=15 2:00 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Johannesburg 1:52:00 - LMT 1892 Feb 8
1:30 - SAST 1903 Mar
2:00 SA SAST
# Marion and Prince Edward Is
# scientific station since 1947
# no information
# Sudan
#
# From <a href="http://www.sunanews.net/sn13jane.html">
# Sudan News Agency (2000-01-13)
# </a>, also reported by Michael De Beukelaer-Dossche via Steffen Thorsen:
# Clocks will be moved ahead for 60 minutes all over the Sudan as of noon
# Saturday.... This was announced Thursday by Caretaker State Minister for
# Manpower Abdul-Rahman Nur-Eddin.
#
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Sudan 1970 only - May 1 0:00 1:00 S
Rule Sudan 1970 1985 - Oct 15 0:00 0 -
Rule Sudan 1971 only - Apr 30 0:00 1:00 S
Rule Sudan 1972 1985 - Apr lastSun 0:00 1:00 S
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Khartoum 2:10:08 - LMT 1931
2:00 Sudan CA%sT 2000 Jan 15 12:00
3:00 - EAT
# South Sudan
Zone Africa/Juba 2:06:24 - LMT 1931
2:00 Sudan CA%sT 2000 Jan 15 12:00
3:00 - EAT
# Swaziland
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Mbabane 2:04:24 - LMT 1903 Mar
2:00 - SAST
# Tanzania
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Dar_es_Salaam 2:37:08 - LMT 1931
3:00 - EAT 1948
2:45 - BEAUT 1961
3:00 - EAT
# Togo
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Lome 0:04:52 - LMT 1893
0:00 - GMT
# Tunisia
# From Gwillim Law (2005-04-30):
# My correspondent, Risto Nykanen, has alerted me to another adoption of DST,
# this time in Tunisia. According to Yahoo France News
# <http://fr.news.yahoo.com/050426/5/4dumk.html>, in a story attributed to AP
# and dated 2005-04-26, "Tunisia has decided to advance its official time by
# one hour, starting on Sunday, May 1. Henceforth, Tunisian time will be
# UTC+2 instead of UTC+1. The change will take place at 23:00 UTC next
# Saturday." (My translation)
#
# From Oscar van Vlijmen (2005-05-02):
# LaPresse, the first national daily newspaper ...
# <http://www.lapresse.tn/archives/archives280405/actualites/lheure.html>
# ... DST for 2005: on: Sun May 1 0h standard time, off: Fri Sept. 30,
# 1h standard time.
#
# From Atef Loukil (2006-03-28):
# The daylight saving time will be the same each year:
# Beginning : the last Sunday of March at 02:00
# Ending : the last Sunday of October at 03:00 ...
# http://www.tap.info.tn/en/index.php?option=com_content&task=view&id=1188&Itemid=50
# From Steffen Thorsen (2009-03-16):
# According to several news sources, Tunisia will not observe DST this year.
# (Arabic)
# <a href="http://www.elbashayer.com/?page=viewn&nid=42546">
# http://www.elbashayer.com/?page=viewn&nid=42546
# </a>
# <a href="http://www.babnet.net/kiwidetail-15295.asp">
# http://www.babnet.net/kiwidetail-15295.asp
# </a>
#
# We have also confirmed this with the US embassy in Tunisia.
# We have a wrap-up about this on the following page:
# <a href="http://www.timeanddate.com/news/time/tunisia-cancels-dst-2009.html">
# http://www.timeanddate.com/news/time/tunisia-cancels-dst-2009.html
# </a>
# From Alexander Krivenyshev (2009-03-17):
# Here is a link to Tunis Afrique Presse News Agency
#
# Standard time to be kept the whole year long (tap.info.tn):
#
# (in English)
# <a href="http://www.tap.info.tn/en/index.php?option=com_content&task=view&id=26813&Itemid=157">
# http://www.tap.info.tn/en/index.php?option=com_content&task=view&id=26813&Itemid=157
# </a>
#
# (in Arabic)
# <a href="http://www.tap.info.tn/ar/index.php?option=com_content&task=view&id=61240&Itemid=1">
# http://www.tap.info.tn/ar/index.php?option=com_content&task=view&id=61240&Itemid=1
# </a>
# From Arthur David Olson (2009--3-18):
# The Tunis Afrique Presse News Agency notice contains this: "This measure is due to the fact
# that the fasting month of ramadan coincides with the period concerned by summer time.
# Therefore, the standard time will be kept unchanged the whole year long."
# So foregoing DST seems to be an exception (albeit one that may be repeated in the future).
# From Alexander Krivenyshev (2010-03-27):
# According to some news reports Tunis confirmed not to use DST in 2010
#
# (translation):
# "The Tunisian government has decided to abandon DST, which was scheduled on
# Sunday...
# Tunisian authorities had suspended the DST for the first time last year also
# coincided with the month of Ramadan..."
#
# (in Arabic)
# <a href="http://www.moheet.com/show_news.aspx?nid=358861&pg=1">
# http://www.moheet.com/show_news.aspx?nid=358861&pg=1
# <a href="http://www.almadenahnews.com/newss/news.php?c=118&id=38036">
# http://www.almadenahnews.com/newss/news.php?c=118&id=38036
# or
# <a href="http://www.worldtimezone.com/dst_news/dst_news_tunis02.html">
# http://www.worldtimezone.com/dst_news/dst_news_tunis02.html
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Tunisia 1939 only - Apr 15 23:00s 1:00 S
Rule Tunisia 1939 only - Nov 18 23:00s 0 -
Rule Tunisia 1940 only - Feb 25 23:00s 1:00 S
Rule Tunisia 1941 only - Oct 6 0:00 0 -
Rule Tunisia 1942 only - Mar 9 0:00 1:00 S
Rule Tunisia 1942 only - Nov 2 3:00 0 -
Rule Tunisia 1943 only - Mar 29 2:00 1:00 S
Rule Tunisia 1943 only - Apr 17 2:00 0 -
Rule Tunisia 1943 only - Apr 25 2:00 1:00 S
Rule Tunisia 1943 only - Oct 4 2:00 0 -
Rule Tunisia 1944 1945 - Apr Mon>=1 2:00 1:00 S
Rule Tunisia 1944 only - Oct 8 0:00 0 -
Rule Tunisia 1945 only - Sep 16 0:00 0 -
Rule Tunisia 1977 only - Apr 30 0:00s 1:00 S
Rule Tunisia 1977 only - Sep 24 0:00s 0 -
Rule Tunisia 1978 only - May 1 0:00s 1:00 S
Rule Tunisia 1978 only - Oct 1 0:00s 0 -
Rule Tunisia 1988 only - Jun 1 0:00s 1:00 S
Rule Tunisia 1988 1990 - Sep lastSun 0:00s 0 -
Rule Tunisia 1989 only - Mar 26 0:00s 1:00 S
Rule Tunisia 1990 only - May 1 0:00s 1:00 S
Rule Tunisia 2005 only - May 1 0:00s 1:00 S
Rule Tunisia 2005 only - Sep 30 1:00s 0 -
Rule Tunisia 2006 2008 - Mar lastSun 2:00s 1:00 S
Rule Tunisia 2006 2008 - Oct lastSun 2:00s 0 -
# Shanks & Pottenger give 0:09:20 for Paris Mean Time; go with Howse's
# more precise 0:09:21.
# Shanks & Pottenger say the 1911 switch was on Mar 9; go with Howse's Mar 11.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Tunis 0:40:44 - LMT 1881 May 12
0:09:21 - PMT 1911 Mar 11 # Paris Mean Time
1:00 Tunisia CE%sT
# Uganda
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Kampala 2:09:40 - LMT 1928 Jul
3:00 - EAT 1930
2:30 - BEAT 1948
2:45 - BEAUT 1957
3:00 - EAT
# Zambia
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Lusaka 1:53:08 - LMT 1903 Mar
2:00 - CAT
# Zimbabwe
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Harare 2:04:12 - LMT 1903 Mar
2:00 - CAT
|
274056675/springboot-openai-chatgpt | 5,492 | mng_web/src/views/system/param.vue | <template>
<basic-container>
<avue-crud :option="option"
:table-loading="loading"
:data="data"
ref="crud"
v-model="form"
:page="page"
:permission="permissionList"
@row-del="rowDel"
@row-update="rowUpdate"
@row-save="rowSave"
@search-change="searchChange"
@search-reset="searchReset"
@selection-change="selectionChange"
@current-change="currentChange"
@size-change="sizeChange"
@on-load="onLoad">
<template slot="menuLeft">
<el-button type="danger"
size="small"
icon="el-icon-delete"
v-if="permission.param_delete"
plain
@click="handleDelete">删 除
</el-button>
</template>
</avue-crud>
</basic-container>
</template>
<script>
import {getList, remove, update, add} from "@/api/system/param";
import {mapGetters} from "vuex";
export default {
data() {
return {
form: {},
loading: true,
selectionList: [],
query: {},
page: {
pageSize: 10,
currentPage: 1,
total: 0
},
option: {
height: 'auto',
calcHeight: 210,
searchShow: true,
searchMenuSpan: 6,
tip: false,
border: true,
index: true,
selection: true,
viewBtn: true,
column: [
{
label: "参数名称",
prop: "paramName",
search: true,
rules: [{
required: true,
message: "请输入参数名称",
trigger: "blur"
}]
},
{
label: "参数键名",
prop: "paramKey",
search: true,
rules: [{
required: true,
message: "请输入参数键名",
trigger: "blur"
}]
},
{
label: "参数键值",
prop: "paramValue",
rules: [{
required: true,
message: "请输入参数键值",
trigger: "blur"
}]
}
]
},
data: []
};
},
computed: {
...mapGetters(["permission"]),
permissionList() {
return {
addBtn: this.vaildData(this.permission.param_add, false),
viewBtn: this.vaildData(this.permission.param_view, false),
delBtn: this.vaildData(this.permission.param_delete, false),
editBtn: this.vaildData(this.permission.param_edit, false)
};
},
ids() {
let ids = [];
this.selectionList.forEach(ele => {
ids.push(ele.id);
});
return ids.join(",");
}
},
methods: {
rowSave(row, done, loading) {
add(row).then(() => {
done();
this.onLoad(this.page);
this.$message({
type: "success",
message: "操作成功!"
});
}, error => {
window.console.log(error);
loading();
});
},
rowUpdate(row, index, done, loading) {
update(row).then(() => {
done();
this.onLoad(this.page);
this.$message({
type: "success",
message: "操作成功!"
});
}, error => {
window.console.log(error);
loading();
});
},
rowDel(row) {
this.$confirm("确定将选择数据删除?", {
confirmButtonText: "确定",
cancelButtonText: "取消",
type: "warning"
})
.then(() => {
return remove(row.id);
})
.then(() => {
this.onLoad(this.page);
this.$message({
type: "success",
message: "操作成功!"
});
});
},
searchReset() {
this.query = {};
this.onLoad(this.page);
},
searchChange(params, done) {
this.query = params;
this.page.currentPage = 1;
this.onLoad(this.page, params);
done();
},
selectionChange(list) {
this.selectionList = list;
},
handleDelete() {
if (this.selectionList.length === 0) {
this.$message.warning("请选择至少一条数据");
return;
}
this.$confirm("确定将选择数据删除?", {
confirmButtonText: "确定",
cancelButtonText: "取消",
type: "warning"
})
.then(() => {
return remove(this.ids);
})
.then(() => {
this.onLoad(this.page);
this.$message({
type: "success",
message: "操作成功!"
});
this.$refs.crud.toggleSelection();
});
},
currentChange(currentPage){
this.page.currentPage = currentPage;
},
sizeChange(pageSize){
this.page.pageSize = pageSize;
},
onLoad(page, params = {}) {
this.loading = true;
getList(page.currentPage, page.pageSize, Object.assign(params, this.query)).then(res => {
const data = res.data.data;
this.page.total = data.total;
this.data = data.records;
this.loading = false;
});
}
}
};
</script>
<style>
</style>
|
274056675/springboot-openai-chatgpt | 5,096 | mng_web/src/views/report/reportlist.vue | <template>
<basic-container>
<avue-crud :option="option"
:table-loading="loading"
:data="data"
ref="crud"
v-model="form"
:page.sync="page"
:permission="permissionList"
@row-del="rowDel"
@search-change="searchChange"
@search-reset="searchReset"
@selection-change="selectionChange"
@current-change="currentChange"
@size-change="sizeChange"
@refresh-change="refreshChange"
@on-load="onLoad">
<template slot="menuLeft">
<el-button type="danger"
size="small"
icon="el-icon-delete"
plain
@click="handleDelete">删 除
</el-button>
</template>
<template slot-scope="scope" slot="menu">
<el-button
type="text"
icon="el-icon-edit-outline"
size="small"
@click.stop="handleDesign(scope.row.name)"
>设计
</el-button>
<el-button
type="text"
icon="el-icon-view"
size="small"
@click.stop="handlePreview(scope.row.name)"
>预览
</el-button>
</template>
<template slot-scope="{row}" slot="name">
<el-tag style="cursor:pointer" @click="handlePreview(row.name)">{{ row.name }}</el-tag>
</template>
</avue-crud>
</basic-container>
</template>
<script>
import {getList, remove} from "@/api/report/report";
import {mapGetters} from "vuex";
import website from '@/config/website';
export default {
data() {
return {
form: {},
selectionList: [],
query: {},
loading: true,
page: {
pageSize: 10,
currentPage: 1,
total: 0
},
option: {
height: 'auto',
calcHeight: 210,
tip: false,
searchShow: true,
searchMenuSpan: 6,
border: true,
index: true,
selection: true,
viewBtn: true,
dialogClickModal: false,
column: [
{
label: "文件名",
prop: "name",
search: true,
slot: true,
},
{
label: "创建时间",
prop: "createTime",
},
{
label: "更新时间",
prop: "updateTime",
}
]
},
data: []
};
},
computed: {
...mapGetters(["userInfo", "permission"]),
permissionList() {
return {
addBtn: false,
viewBtn: false,
delBtn: true,
editBtn: false
};
},
ids() {
let ids = [];
this.selectionList.forEach(ele => {
ids.push(ele.id);
});
return ids.join(",");
}
},
methods: {
handlePreview(name) {
this.$router.push({path: `/myiframe/urlPath?name=preview-${name}&src=${website.reportUrl}/preview?_u=blade-${name}`});
},
handleDesign(name) {
this.$router.push({path: `/myiframe/urlPath?name=designer-${name}&src=${website.reportUrl}/designer?_u=blade-${name}`});
},
rowDel(row) {
this.$confirm("确定将选择数据删除?", {
confirmButtonText: "确定",
cancelButtonText: "取消",
type: "warning"
})
.then(() => {
return remove(row.id);
})
.then(() => {
this.onLoad(this.page);
this.$message({
type: "success",
message: "操作成功!"
});
});
},
searchReset() {
this.query = {};
this.onLoad(this.page);
},
searchChange(params, done) {
this.query = params;
this.page.currentPage = 1;
this.onLoad(this.page, params);
done();
},
selectionChange(list) {
this.selectionList = list;
},
selectionClear() {
this.selectionList = [];
this.$refs.crud.toggleSelection();
},
handleDelete() {
if (this.selectionList.length === 0) {
this.$message.warning("请选择至少一条数据");
return;
}
this.$confirm("确定将选择数据删除?", {
confirmButtonText: "确定",
cancelButtonText: "取消",
type: "warning"
})
.then(() => {
return remove(this.ids);
})
.then(() => {
this.onLoad(this.page);
this.$message({
type: "success",
message: "操作成功!"
});
this.$refs.crud.toggleSelection();
});
},
currentChange(currentPage) {
this.page.currentPage = currentPage;
},
sizeChange(pageSize) {
this.page.pageSize = pageSize;
},
refreshChange() {
this.onLoad(this.page, this.query);
},
onLoad(page, params = {}) {
this.loading = true;
getList(page.currentPage, page.pageSize, Object.assign(params, this.query)).then(res => {
const data = res.data.data;
this.page.total = data.total;
this.data = data.records;
this.loading = false;
this.selectionClear();
});
}
}
};
</script>
<style>
</style>
|
274056675/springboot-openai-chatgpt | 10,034 | mng_web/src/views/authority/role.vue | <template>
<basic-container>
<avue-crud :option="option"
:table-loading="loading"
:data="data"
ref="crud"
v-model="form"
:permission="permissionList"
@row-del="rowDel"
@row-update="rowUpdate"
@row-save="rowSave"
@search-change="searchChange"
@search-reset="searchReset"
@selection-change="selectionChange"
@current-change="currentChange"
@size-change="sizeChange"
@on-load="onLoad">
<template slot="menuLeft">
<el-button type="danger"
size="small"
icon="el-icon-delete"
v-if="permission.role_delete"
plain
@click="handleDelete">删 除
</el-button>
<el-button size="small"
icon="el-icon-delete"
@click="handleRole"
plain>权限设置
</el-button>
</template>
</avue-crud>
<el-dialog title="角色配置"
:visible.sync="box"
width="345px">
<el-tabs type="border-card">
<el-tab-pane label="菜单权限">
<el-tree :data="menuGrantList"
show-checkbox
node-key="id"
ref="treeMenu"
:default-checked-keys="menuTreeObj"
:props="props">
</el-tree>
</el-tab-pane>
<el-tab-pane label="数据权限">
<el-tree :data="dataScopeGrantList"
show-checkbox
node-key="id"
ref="treeDataScope"
:default-checked-keys="dataScopeTreeObj"
:props="props">
</el-tree>
</el-tab-pane>
</el-tabs>
<span slot="footer"
class="dialog-footer">
<el-button @click="box = false">取 消</el-button>
<el-button type="primary"
@click="submit">确 定</el-button>
</span>
</el-dialog>
</basic-container>
</template>
<script>
import {add, getList, getRole, getRoleTree, grant, grantTree, remove, update} from "@/api/system/role";
import {mapGetters} from "vuex";
import website from '@/config/website';
export default {
data() {
return {
form: {},
box: false,
props: {
label: "title",
value: "key"
},
menuGrantList: [],
dataScopeGrantList: [],
menuTreeObj: [],
dataScopeTreeObj: [],
selectionList: [],
query: {},
loading: true,
page: {
pageSize: 10,
currentPage: 1,
total: 0
},
option: {
searchShow: true,
searchMenuSpan: 6,
tip: false,
tree: true,
border: true,
index: true,
selection: true,
viewBtn: true,
column: [
{
label: "角色名称",
prop: "roleName",
search: true,
span: 24,
rules: [
{
required: true,
message: "请输入角色名称",
trigger: "blur"
}
]
},
{
label: "所属租户",
prop: "tenantId",
type: "tree",
dicUrl: "/api/blade-system/tenant/select",
addDisplay: false,
editDisplay: false,
viewDisplay: website.tenantMode,
span: 24,
props: {
label: "tenantName",
value: "tenantId"
},
hide: !website.tenantMode,
search: website.tenantMode,
rules: [{
required: true,
message: "请输入所属租户",
trigger: "click"
}]
},
{
label: "角色别名",
prop: "roleAlias",
search: true,
span: 24,
rules: [
{
required: true,
message: "请输入角色别名",
trigger: "blur"
}
]
},
{
label: "上级角色",
prop: "parentId",
dicData: [],
type: "tree",
hide: true,
span: 24,
props: {
label: "title"
},
rules: [
{
required: false,
message: "请选择上级角色",
trigger: "click"
}
]
},
{
label: "角色排序",
prop: "sort",
type: "number",
span: 24,
rules: [
{
required: true,
message: "请输入角色排序",
trigger: "blur"
}
]
}
]
},
data: []
};
},
computed: {
...mapGetters(["permission"]),
permissionList() {
return {
addBtn: this.vaildData(this.permission.role_add, false),
viewBtn: this.vaildData(this.permission.role_view, false),
delBtn: this.vaildData(this.permission.role_delete, false),
editBtn: this.vaildData(this.permission.role_edit, false)
};
},
ids() {
let ids = [];
this.selectionList.forEach(ele => {
ids.push(ele.id);
});
return ids.join(",");
},
idsArray() {
let ids = [];
this.selectionList.forEach(ele => {
ids.push(ele.id);
});
return ids;
}
},
methods: {
submit() {
const menuList = this.$refs.treeMenu.getCheckedKeys();
const dataScopeList = this.$refs.treeDataScope.getCheckedKeys();
grant(this.idsArray, menuList, dataScopeList).then(() => {
this.box = false;
this.$message({
type: "success",
message: "操作成功!"
});
this.onLoad(this.page);
});
/*const menuLIst = this.$refs.tree.getCheckedKeys();
grant(this.idsArray, menuLIst).then(() => {
this.box = false;
this.$message({
type: "success",
message: "操作成功!"
});
this.onLoad(this.page);
});*/
},
rowSave(row, done, loading) {
add(row).then(() => {
done();
this.onLoad(this.page);
this.$message({
type: "success",
message: "操作成功!"
});
}, error => {
window.console.log(error);
loading();
});
},
rowUpdate(row, index, done, loading) {
update(row).then(() => {
done();
this.onLoad(this.page);
this.$message({
type: "success",
message: "操作成功!"
});
}, error => {
window.console.log(error);
loading();
});
},
rowDel(row) {
this.$confirm("确定将选择数据删除?", {
confirmButtonText: "确定",
cancelButtonText: "取消",
type: "warning"
})
.then(() => {
return remove(row.id);
})
.then(() => {
this.onLoad(this.page);
this.$message({
type: "success",
message: "操作成功!"
});
});
},
searchReset() {
this.query = {};
this.onLoad(this.page);
},
searchChange(params, done) {
this.query = params;
this.page.currentPage = 1;
this.onLoad(this.page, params);
done();
},
selectionChange(list) {
this.selectionList = list;
},
handleRole() {
if (this.selectionList.length !== 1) {
this.$message.warning("只能选择一条数据");
return;
}
this.menuTreeObj = [];
this.dataScopeTreeObj = [];
grantTree()
.then(res => {
this.menuGrantList = res.data.data.menu;
this.dataScopeGrantList = res.data.data.dataScope;
getRole(this.ids).then(res => {
this.menuTreeObj = res.data.data.menu;
this.dataScopeTreeObj = res.data.data.dataScope;
this.box = true;
});
});
/*this.defaultObj = [];
grantTree()
.then(res => {
this.list = res.data.data;
return getRole(this.ids);
})
.then(res => {
this.defaultObj = res.data.data;
this.box = true;
});*/
},
handleDelete() {
if (this.selectionList.length === 0) {
this.$message.warning("请选择至少一条数据");
return;
}
this.$confirm("确定将选择数据删除?", {
confirmButtonText: "确定",
cancelButtonText: "取消",
type: "warning"
})
.then(() => {
return remove(this.ids);
})
.then(() => {
this.onLoad(this.page);
this.$message({
type: "success",
message: "操作成功!"
});
this.$refs.crud.toggleSelection();
});
},
currentChange(currentPage) {
this.page.currentPage = currentPage;
},
sizeChange(pageSize) {
this.page.pageSize = pageSize;
},
onLoad(page, params = {}) {
this.loading = true;
getList(page.currentPage, page.pageSize, Object.assign(params, this.query)).then(res => {
this.data = res.data.data;
this.loading = false;
getRoleTree().then(res => {
const column = this.findObject(this.option.column, "parentId");
column.dicData = res.data.data;
});
});
}
}
};
</script>
<style>
</style>
|
27182812/ChatGLM-LLaMA-chinese-insturct | 45,542 | src/transformers/utils/fx.py | # coding=utf-8
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import builtins
import collections
import functools
import inspect
import math
import operator
import os
import random
import warnings
from typing import Any, Callable, Dict, List, Optional, Type, Union
import torch
from packaging import version
from torch import nn
from torch.fx import Graph, GraphModule, Proxy, Tracer
from torch.fx.proxy import ParameterProxy
from .. import PretrainedConfig, PreTrainedModel, logging
from ..models.auto import get_values
from ..models.auto.modeling_auto import (
MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
MODEL_FOR_BACKBONE_MAPPING_NAMES,
MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
MODEL_FOR_CTC_MAPPING_NAMES,
MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES,
MODEL_FOR_MASKED_LM_MAPPING_NAMES,
MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES,
MODEL_FOR_PRETRAINING_MAPPING_NAMES,
MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
MODEL_MAPPING_NAMES,
)
from ..utils import ENV_VARS_TRUE_VALUES, TORCH_FX_REQUIRED_VERSION, is_torch_fx_available
from ..utils.versions import importlib_metadata
logger = logging.get_logger(__name__)
_IS_IN_DEBUG_MODE = os.environ.get("FX_DEBUG_MODE", "").upper() in ENV_VARS_TRUE_VALUES
def _generate_supported_model_class_names(
model_name: Type[PretrainedConfig],
supported_tasks: Optional[Union[str, List[str]]] = None,
) -> List[str]:
task_mapping = {
"default": MODEL_MAPPING_NAMES,
"pretraining": MODEL_FOR_PRETRAINING_MAPPING_NAMES,
"next-sentence-prediction": MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES,
"masked-lm": MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"causal-lm": MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"seq2seq-lm": MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"speech-seq2seq": MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES,
"multiple-choice": MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"document-question-answering": MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"question-answering": MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"sequence-classification": MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"token-classification": MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"masked-image-modeling": MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES,
"image-classification": MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"ctc": MODEL_FOR_CTC_MAPPING_NAMES,
"audio-classification": MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"semantic-segmentation": MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"backbone": MODEL_FOR_BACKBONE_MAPPING_NAMES,
}
if supported_tasks is None:
supported_tasks = task_mapping.keys()
if isinstance(supported_tasks, str):
supported_tasks = [supported_tasks]
model_class_names = []
for task in supported_tasks:
class_name = task_mapping[task].get(model_name, None)
if class_name:
model_class_names.append(class_name)
return model_class_names
_REGULAR_SUPPORTED_MODEL_NAMES_AND_TASKS = [
"altclip",
"albert",
"bart",
"bert",
"blenderbot",
"blenderbot-small",
"bloom",
"clip",
"convnext",
"deberta",
"deberta-v2",
"distilbert",
"donut-swin",
"electra",
"gpt2",
"gpt_neo",
"gptj",
"hubert",
"layoutlm",
"lxmert",
"m2m_100",
"marian",
"mbart",
"megatron-bert",
"mobilebert",
"mt5",
"nezha",
"opt",
"pegasus",
"plbart",
"resnet",
"roberta",
"segformer",
"speech_to_text",
"speech_to_text_2",
"swin",
"t5",
"trocr",
"vit",
"xglm",
"wav2vec2",
# "xlnet",
]
_REGULAR_SUPPORTED_MODELS = []
for item in _REGULAR_SUPPORTED_MODEL_NAMES_AND_TASKS:
if isinstance(item, dict):
_REGULAR_SUPPORTED_MODELS.extend(_generate_supported_model_class_names(**item))
else:
_REGULAR_SUPPORTED_MODELS.extend(_generate_supported_model_class_names(item))
_SPECIAL_SUPPORTED_MODELS = [
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
"AltCLIPTextModel",
"AltCLIPVisionModel",
"GitVisionModel",
"GPT2DoubleHeadsModel",
"Speech2Text2Decoder",
"TrOCRDecoder",
# TODO: add support for them as it should be quite easy to do so (small blocking issues).
# XLNetForQuestionAnswering,
]
_SUPPORTED_MODELS = tuple(sorted(set(_REGULAR_SUPPORTED_MODELS + _SPECIAL_SUPPORTED_MODELS)))
def torch_nn_embedding(self, input):
return torch.empty(*input.shape, self.weight.shape[-1], device="meta")
def torch_nn_functional_embedding(
input, weight, padding_idx=None, max_norm=None, norm_type=2.0, scale_grad_by_freq=False, sparse=False
):
return torch.empty(*input.shape, weight.shape[-1], device="meta")
def torch_nn_layernorm(self, input):
return input
def torch_nn_groupnorm(self, input):
return input
def torch_nn_linear(self, input):
return torch.empty(input.shape[:-1] + (self.out_features,), device="meta")
def torch_relu(x):
return x
def torch_nn_relu(self, x):
return x
def torch_nn_functional_relu(x, inplace=False):
if not inplace:
raise ValueError("Don't support in-place functional.relu for MetaTensor analysis")
return x
def torch_where(condition, x, y):
# torch.where returns the broadcasted tensor of condition, x, and y,
# so hack it by using addition
return condition.to(device="meta") + x.to(device="meta") + y.to(device="meta")
def torch_abs(input, *, out=None):
if out is not None:
raise ValueError("Don't support in-place abs for MetaTensor analysis")
return input
def torch_arange(*args, **kwargs):
n = len(args)
step = 1
if n == 1:
start = 0
end = args[0]
elif n == 2:
start, end = args
else:
start, end, step = args
if isinstance(start, float):
start = int(start)
if isinstance(end, float):
start = int(end)
if isinstance(step, float):
step = int(step)
step = kwargs.get("step", step)
dtype = kwargs.get("dtype")
return torch.empty((end - start) // step, dtype=dtype, device="meta")
def torch_full(*args, **kwargs):
args = list(args)
if isinstance(args[1], torch.Tensor) and args[1].device == torch.device("meta"):
args[1] = 1 # Any value.
kwargs_without_device = dict(kwargs)
kwargs_without_device.pop("device", None)
return torch.full(*args, **kwargs_without_device)
def torch_cat(tensors, dim=None, axis=None, *, out=None):
if dim is None and axis is None:
dim = 0
if dim is None and axis is not None:
dim = axis
if dim < 0:
dim = tensors[0].dim() + dim
shapes = [t.shape for t in tensors]
shape = list(shapes[0])
concatenated_dim = sum(shape[dim] for shape in shapes)
final_shape = shape[:dim] + [concatenated_dim] + shape[dim + 1 :]
return torch.empty(final_shape, device="meta")
def torch_stack(tensors, dim=None, axis=None, *, out=None):
if dim is None and axis is None:
dim = 0
if dim is None and axis is not None:
dim = axis
if dim < 0:
dim = tensors[0].dim() + 1 + dim
shape = list(tensors[0].shape)
shape.insert(dim, len(tensors))
return torch.empty(shape, device="meta")
def torch_add(input, other, *, alpha=1, out=None):
if not isinstance(input, torch.Tensor):
return torch.empty_like(other, device="meta")
if not isinstance(other, torch.Tensor):
return torch.empty_like(input, device="meta")
max_length = max(input.dim(), other.dim())
input_shape = list(input.shape) + [1] * (max_length - input.dim())
other_shape = list(other.shape) + [1] * (max_length - other.dim())
shape = []
for i in range(max_length):
shape.append(max(input_shape[i], other_shape[i]))
return torch.empty(shape, device="meta")
def torch_mul(input, other, *, out=None):
return torch_add(input, other, out=out)
def torch_tensor_mul(self, other):
return torch_mul(self, other)
def torch_matmul(input, other, *, out=None):
d1 = input.dim()
d2 = other.dim()
shape = None
if d1 == 1 and d2 == 1:
shape = None
elif d1 == 2 and d2 == 2:
shape = (input.size(0), other.size(1))
elif d1 == 1 and d2 == 2:
shape = (other.size(1),)
elif d1 == 2 and d1 == 1:
shape = (input.size(0),)
else:
max_length = max(input.dim(), other.dim())
shape1 = list(input.shape)
shape2 = list(other.shape)
if d1 == 1:
shape1 = [1] + shape1
if d2 == 1:
shape2.append(1)
shape1 = [-1] * (max_length - d1) + list(input.shape)
shape2 = [-1] * (max_length - d2) + list(other.shape)
shape = []
for i in range(max_length):
shape.append(max(shape1[i], shape2[i]))
shape[-2] = shape1[-2]
shape[-1] = shape2[-1]
if d1 == 1:
shape.pop(-2)
if d2 == 1:
shape.pop(-1)
if shape is None:
return torch.tensor(0.0, device="meta")
return torch.empty(*shape, device="meta")
def torch_bmm(input, mat2, *, out=None):
if out is not None:
raise ValueError("Don't support in-place bmm for MetaTensor analysis")
batch_size, n, m = input.shape
_, _, p = mat2.shape
return torch.empty(batch_size, n, p, device="meta")
def torch_baddbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None):
if out is not None:
raise ValueError("Don't support in-place baddbmm for MetaTensor analysis")
return torch_bmm(batch1, batch2)
def torch_tensor_baddbmm(self, batch1, batch2, *, beta=1, alpha=1, out=None):
return torch_baddbmm(self, batch1, batch2, beta=beta, alpha=alpha, out=out)
def torch_einsum(equation, *operands):
# TODO: infer shape without performing the computation, this might be quite hard.
concrete_operands = (torch.empty_like(operand, device="cpu") for operand in operands)
return torch.einsum(equation, *concrete_operands).to("meta")
def torch_tensor_repeat(self, *sizes):
shape = list(self.shape)
for i, x in enumerate(sizes):
shape[i] *= x
return torch.empty(shape, device="meta")
def torch_index_select(input, dim, index, *, out=None):
shape = list(input.shape)
shape[dim] = len(index)
return torch.empty(*shape, device="meta")
def torch_tensor_index_select(self, dim, index):
return torch_index_select(self, dim, index)
def torch_roll(input, shifts, dims=None):
return input
def torch_flip(input, dims):
return input
def torch_tensor_flip(self, dims):
return self
def torch_nn_conv1d(self, input):
l_in = input.shape[-1]
shape = None
padding = self.padding
if padding == "valid":
padding = (0, 0)
if padding == "same":
shape = list(input.shape)
if shape is None:
shape = list(input.shape)
l_out = math.floor(
(l_in + 2 * padding[0] - self.dilation[0] * (self.kernel_size[0] - 1) - 1) / self.stride[0] + 1
)
shape[-1] = l_out
shape[-2] = self.out_channels
return torch.empty(shape, device="meta")
def torch_nn_conv2d(self, input):
h_in, w_in = input.shape[-2:]
shape = None
padding = self.padding
if padding == "valid":
padding = (0, 0)
if padding == "same":
shape = list(input.shape)
if shape is None:
shape = list(input.shape)
h_out = math.floor(
(h_in + 2 * padding[0] - self.dilation[0] * (self.kernel_size[0] - 1) - 1) / self.stride[0] + 1
)
w_out = math.floor(
(w_in + 2 * padding[1] - self.dilation[1] * (self.kernel_size[1] - 1) - 1) / self.stride[1] + 1
)
shape[-2:] = [h_out, w_out]
shape[-3] = self.out_channels
return torch.empty(shape, device="meta")
def torch_squeeze(input, dim=None):
shape = list(input.shape)
if dim is not None:
if dim < 0:
dim = input.dim() + dim
if shape[dim] == 1:
shape.pop(dim)
else:
new_shape = []
for dim_value in shape:
if dim_value == 1:
continue
new_shape.append(dim_value)
shape = new_shape
return torch.empty(shape, device="meta")
def torch_tensor_squeeze(self, dim=None):
return torch_squeeze(self, dim)
def torch_unsqueeze(input, dim):
shape = list(input.shape)
if dim < 0:
dim = input.dim() + 1 + dim
shape.insert(dim, 1)
return torch.empty(shape, device="meta")
def torch_tensor_unsqueeze(self, dim):
return torch_unsqueeze(self, dim)
def torch_unique_consecutive(input, **kwargs):
output = torch.unique_consecutive(torch.zeros_like(input, device="cpu"), **kwargs)
if isinstance(output, torch.Tensor):
return output.to("meta")
else:
return tuple(map(output, lambda x: x.to("meta")))
def torch_nn_functional_one_hot(tensor, num_classes=-1):
if num_classes < 0:
raise ValueError("Don't support automatic num_classes inference for MetaTensor analysis")
shape = list(tensor.shape) + [num_classes]
return torch.empty(shape, device="meta")
def torch_nn_mseloss(self, input, target):
if self.reduction == "none":
shape = target.shape
else:
shape = (1,)
return torch.empty(shape, device="meta")
def torch_nn_crossentropyloss(self, input, target):
if self.reduction == "none":
shape = target.shape
else:
shape = (1,)
return torch.empty(shape, device="meta")
def torch_nn_bcewithlogitsloss(self, input, target):
if self.reduction == "none":
shape = target.shape
else:
shape = (1,)
return torch.empty(shape, device="meta")
def operator_getitem(a, b):
def to_concrete(t):
if isinstance(t, torch.Tensor):
concrete = torch.ones_like(t, device="cpu")
if concrete.dtype in [torch.float16, torch.float32, torch.float64, torch.int32]:
concrete = concrete.to(torch.int64)
return concrete
return t
if isinstance(a, torch.Tensor):
# TODO: infer shape without performing the computation.
if isinstance(b, tuple):
b = tuple(map(to_concrete, b))
else:
b = to_concrete(b)
return operator.getitem(torch.empty_like(a, device="cpu"), b).to("meta")
return operator.getitem(a, b)
_MANUAL_META_OVERRIDES: Dict[Callable, Callable] = {
torch.nn.Embedding: torch_nn_embedding,
torch.nn.functional.embedding: torch_nn_functional_embedding,
torch.nn.LayerNorm: torch_nn_layernorm,
torch.nn.GroupNorm: torch_nn_groupnorm,
torch.nn.Linear: torch_nn_linear,
torch.relu: torch_relu,
torch.nn.functional.relu: torch_nn_functional_relu,
torch.nn.ReLU: torch_nn_relu,
torch.where: torch_where,
torch.abs: torch_abs,
torch.arange: torch_arange,
torch.full: torch_full,
torch.cat: torch_cat,
torch.stack: torch_stack,
torch.add: torch_add,
torch.mul: torch_mul,
torch.Tensor.mul: torch_tensor_mul,
torch.matmul: torch_matmul,
torch.bmm: torch_bmm,
torch.baddbmm: torch_baddbmm,
torch.Tensor.baddbmm: torch_tensor_baddbmm,
torch.einsum: torch_einsum,
torch.Tensor.repeat: torch_tensor_repeat,
torch.roll: torch_roll,
torch.flip: torch_flip,
torch.Tensor.flip: torch_tensor_flip,
torch.index_select: torch_index_select,
torch.Tensor.index_select: torch_tensor_index_select,
torch.nn.Conv1d: torch_nn_conv1d,
torch.nn.Conv2d: torch_nn_conv2d,
torch.squeeze: torch_squeeze,
torch.Tensor.squeeze: torch_tensor_squeeze,
torch.unsqueeze: torch_unsqueeze,
torch.Tensor.unsqueeze: torch_tensor_unsqueeze,
torch.unique_consecutive: torch_unique_consecutive,
torch.nn.functional.one_hot: torch_nn_functional_one_hot,
torch.nn.MSELoss: torch_nn_mseloss,
torch.nn.CrossEntropyLoss: torch_nn_crossentropyloss,
torch.nn.BCEWithLogitsLoss: torch_nn_bcewithlogitsloss,
operator.getitem: operator_getitem,
}
class HFProxy(Proxy):
"""
Proxy that uses metadata to handle data-dependent control-flow.
"""
def install_metadata(self, metadata):
self._metadata = metadata
@property
def shape(self):
return self.tracer.create_proxy("call_method", "size", (self,), {})
@property
def device(self):
# Hack so we can track when devices are used. During meta-tensor propagation,
# replace these values with a constant 'meta'
return MetaDeviceAttribute(self, "device")
def __len__(self):
if hasattr(self, "_metadata") and self._metadata is not None:
return len(self._metadata)
return super().__len__()
def __bool__(self):
if hasattr(self, "_metadata") and self._metadata is not None:
return self._metadata
return super().__bool__()
def __getattr__(self, k):
if k == "_metadata":
return self.__getattribute__(k)
# note: not added to the graph yet, if this is a method call
# we peephole optimize to the method invocation
return HFAttribute(self, k)
def __setitem__(self, indices, values):
return self.tracer.create_proxy("call_function", operator.setitem, (self, indices, values), {})
def __contains__(self, key):
if hasattr(self, "_metadata") and self._metadata is not None:
return key in self._metadata
return super().__contains__(key)
class HFAttribute(HFProxy):
def __init__(self, root, attr: str):
self.root = root
self.attr = attr
self.tracer = root.tracer
self._node = None
if hasattr(self.root, "_metadata"):
self.install_metadata(getattr(self.root._metadata, attr))
@property
def node(self):
# the node for attributes is added lazily, since most will just be method calls
# which do not rely on the getitem call
if self._node is None:
self._node = self.tracer.create_proxy("call_function", builtins.getattr, (self.root, self.attr), {}).node
return self._node
def __call__(self, *args, **kwargs):
return self.tracer.create_proxy("call_method", self.attr, (self.root,) + args, kwargs)
class MetaDeviceAttribute(HFAttribute):
pass
def _proxies_to_metas(v):
"""Returns the underlying metadata for HFProxies, and behaves like the identity for the others."""
if isinstance(v, MetaDeviceAttribute):
return "meta"
if isinstance(v, torch.fx.Proxy):
if not (isinstance(v, HFProxy) and hasattr(v, "_metadata")):
raise RuntimeError(f"No metadata was found for {v}")
return v._metadata
return v
def _gen_constructor_wrapper(target):
@functools.wraps(target)
def wrapper(*args, **kwargs):
proxy = None
def check_has_proxy(v):
if isinstance(v, Proxy):
nonlocal proxy
proxy = v
torch.fx.node.map_aggregate(args, check_has_proxy)
torch.fx.node.map_aggregate(kwargs, check_has_proxy)
if proxy is not None:
return proxy.tracer.create_proxy("call_function", target, args, kwargs)
else:
return target(*args, **kwargs)
return wrapper, target
def _generate_random_int(low: int = 10, high: int = 20, forbidden_values: Optional[List[int]] = None):
if forbidden_values is None:
forbidden_values = []
value = random.randint(low, high)
while value in forbidden_values:
value = random.randint(low, high)
return value
class HFTracer(Tracer):
"""
Tracer that is able to symbolically trace models from the library. To do that, it uses the HFProxy instead of the
regular PyTorch torch.fx.Proxy.
"""
# Feature flag for proxying accesses to buffer values
proxy_buffer_attributes: bool = True
allow_insert_stateless_mods: bool = True
_TORCH_METHODS_TO_PATCH = [
"arange",
"zeros",
"ones",
"full",
"full_like",
"eye",
"empty",
"tensor",
"clamp",
"finfo",
]
def __init__(self, autowrap_modules=(math,), autowrap_functions=()):
super().__init__(autowrap_modules=autowrap_modules, autowrap_functions=autowrap_functions)
if not is_torch_fx_available():
torch_version = version.parse(importlib_metadata.version("torch"))
raise ImportError(
f"Found an incompatible version of torch. Found version {torch_version}, but only version "
f"{TORCH_FX_REQUIRED_VERSION} is supported."
)
def _generate_dummy_input(
self, model: PreTrainedModel, input_name: str, shape: List[int]
) -> Dict[str, torch.Tensor]:
"""Generates dummy input for model inference recording."""
# Retrieving the model class, either from the "class_for_deserialization" attribute if the model was restored
# from pickle, or from the "__class__" attribute in the general case.
model_class_name = getattr(model, "class_for_deserialization", model.__class__).__name__
device = model.device
inputs_dict = {}
if input_name in ["labels", "start_positions", "end_positions"]:
batch_size = shape[0]
if model_class_name in [
*get_values(MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES),
*get_values(MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES),
*get_values(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES),
*get_values(MODEL_FOR_BACKBONE_MAPPING_NAMES),
*get_values(MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES),
]:
inputs_dict["labels"] = torch.zeros(batch_size, dtype=torch.long, device=device)
elif model_class_name in [
*get_values(MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES),
*get_values(MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES),
"XLNetForQuestionAnswering",
]:
inputs_dict["start_positions"] = torch.zeros(batch_size, dtype=torch.long, device=device)
inputs_dict["end_positions"] = torch.zeros(batch_size, dtype=torch.long, device=device)
elif model_class_name in get_values(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES):
if not hasattr(model.config, "problem_type") or model.config.problem_type is None:
raise ValueError(
"Could not retrieve the problem type for the sequence classification task, please set "
'model.config.problem_type to one of the following values: "regression", '
'"single_label_classification", or "multi_label_classification".'
)
if model.config.problem_type == "regression":
labels_shape = (batch_size, model.config.num_labels)
labels_dtype = torch.float32
elif model.config.problem_type == "single_label_classification":
labels_shape = (batch_size,)
labels_dtype = torch.long
elif model.config.problem_type == "multi_label_classification":
labels_shape = (batch_size, model.config.num_labels)
labels_dtype = torch.float32
else:
raise ValueError(
'Expected model.config.problem_type to be either: "regression", "single_label_classification"'
f', or "multi_label_classification", but "{model.config.problem_type}" was provided.'
)
inputs_dict["labels"] = torch.zeros(*labels_shape, dtype=labels_dtype, device=device)
elif model_class_name in [
*get_values(MODEL_FOR_PRETRAINING_MAPPING_NAMES),
*get_values(MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES),
*get_values(MODEL_FOR_CAUSAL_LM_MAPPING_NAMES),
*get_values(MODEL_FOR_MASKED_LM_MAPPING_NAMES),
*get_values(MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES),
*get_values(MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES),
"GPT2DoubleHeadsModel",
]:
inputs_dict["labels"] = torch.zeros(shape, dtype=torch.long, device=device)
elif model_class_name in [*get_values(MODEL_FOR_CTC_MAPPING_NAMES)]:
inputs_dict["labels"] = torch.zeros(shape, dtype=torch.float32, device=device)
else:
raise NotImplementedError(
f"Generating the dummy input named {input_name} for {model_class_name} is not supported yet."
)
elif "pixel_values" in input_name:
batch_size = shape[0]
image_size = getattr(model.config, "image_size", None)
if image_size is None:
if hasattr(model.config, "vision_config"):
image_size = model.config.vision_config.image_size
elif hasattr(model.config, "encoder"):
image_size = model.config.encoder.image_size
else:
image_size = (_generate_random_int(), _generate_random_int())
# If no num_channels is in the config, use some arbitrary value.
num_channels = getattr(model.config, "num_channels", 3)
if not isinstance(image_size, collections.abc.Iterable):
image_size = (image_size, image_size)
height, width = image_size
inputs_dict[input_name] = torch.zeros(
batch_size, num_channels, height, width, dtype=torch.float32, device=device
)
elif "bbox" in input_name:
inputs_dict[input_name] = torch.zeros(*shape, 4, dtype=torch.float, device=device)
elif "input_features" in input_name:
inputs_dict[input_name] = torch.zeros(
*shape, model.config.input_feat_per_channel, dtype=torch.float, device=device
)
elif "visual_feats" in input_name:
inputs_dict[input_name] = torch.zeros(
shape
+ [
model.config.visual_feat_dim,
],
dtype=torch.float,
device=device,
)
elif "visual_pos" in input_name:
inputs_dict[input_name] = torch.zeros(
shape
+ [
model.config.visual_pos_dim,
],
dtype=torch.float,
device=device,
)
elif "inputs" in input_name:
inputs_dict[input_name] = torch.zeros(*shape, dtype=torch.float, device=device)
elif "input_values" in input_name:
batch_size, _ = shape
# Generating big sequence length for audio inputs.
seq_length = _generate_random_int(low=10000, high=20000)
inputs_dict[input_name] = torch.zeros(batch_size, seq_length, dtype=torch.float, device=device)
elif "mask" in input_name or "ids" in input_name:
inputs_dict[input_name] = torch.zeros(shape, dtype=torch.long, device=device)
else:
shape_with_hidden_size = shape + [model.config.hidden_size]
inputs_dict[input_name] = torch.zeros(shape_with_hidden_size, dtype=torch.float, device=device)
return inputs_dict
def create_proxy(self, kind, target, args, kwargs, name=None, type_expr=None, proxy_factory_fn=None):
rv = super().create_proxy(kind, target, args, kwargs, name, type_expr, proxy_factory_fn)
if kind == "placeholder" and target in self.meta_args:
rv.install_metadata(self.meta_args[target])
return rv
if target in self.orig_fns:
# NOTE: tensor constructors in PyTorch define the `device` argument as
# *kwargs-only*. That is why this works. If you add methods to
# _TORCH_METHODS_TO_PATCH that do not define `device` as kwarg-only,
# this will break and you will likely see issues where we cannot infer
# the size of the output.
if "device" in kwargs:
kwargs["device"] = "meta"
try:
args_metas = torch.fx.node.map_aggregate(args, _proxies_to_metas)
kwargs_metas = torch.fx.node.map_aggregate(kwargs, _proxies_to_metas)
if kind == "call_function":
meta_target = _MANUAL_META_OVERRIDES.get(target, target)
meta_out = meta_target(*args_metas, **kwargs_metas)
if isinstance(meta_out, torch.Tensor):
meta_out = meta_out.to(device="meta")
elif kind == "call_method":
method = getattr(args_metas[0].__class__, target)
meta_target = _MANUAL_META_OVERRIDES.get(method, method)
meta_out = meta_target(*args_metas, **kwargs_metas)
elif kind == "call_module":
if not hasattr(self, "orig_forward"):
raise AttributeError(f"{self} does not have an attribute called orig_forward")
self._disable_module_getattr = True
try:
mod = self.root.get_submodule(target)
mod_type = type(mod)
if mod_type in _MANUAL_META_OVERRIDES:
meta_out = _MANUAL_META_OVERRIDES[mod_type](mod, *args_metas, **kwargs_metas)
else:
meta_out = self.orig_forward(*args_metas, **kwargs_metas)
finally:
self._disable_module_getattr = False
elif kind == "get_attr":
self._disable_module_getattr = True
try:
attr_itr = self.root
atoms = target.split(".")
for atom in atoms:
attr_itr = getattr(attr_itr, atom)
if isinstance(attr_itr, torch.Tensor):
meta_out = attr_itr.to(device="meta")
else:
meta_out = attr_itr
finally:
self._disable_module_getattr = False
else:
return rv
if not isinstance(rv, Proxy):
raise ValueError("Don't support composite output yet")
rv.install_metadata(meta_out)
except Exception as e:
if _IS_IN_DEBUG_MODE:
warnings.warn(f"Could not compute metadata for {kind} target {target}: {e}")
return rv
# Replaced by .getattr from PyTorch 1.13
def _module_getattr(self, attr, attr_val, parameter_proxy_cache):
if getattr(self, "_disable_module_getattr", False):
return attr_val
else:
def maybe_get_proxy_for_attr(attr_val, collection_to_search, parameter_proxy_cache):
for n, p in collection_to_search:
if attr_val is p:
if n not in parameter_proxy_cache:
kwargs = {}
if "proxy_factory_fn" in inspect.signature(self.create_proxy).parameters:
kwargs["proxy_factory_fn"] = (
None
if not self.param_shapes_constant
else lambda node: ParameterProxy(self, node, n, attr_val)
)
val_proxy = self.create_proxy("get_attr", n, (), {}, **kwargs) # type: ignore[arg-type]
parameter_proxy_cache[n] = val_proxy
return parameter_proxy_cache[n]
return None
if isinstance(attr_val, torch.nn.Parameter):
maybe_parameter_proxy = maybe_get_proxy_for_attr(
attr_val, self.root.named_parameters(), parameter_proxy_cache
)
if maybe_parameter_proxy is not None:
return maybe_parameter_proxy
if self.proxy_buffer_attributes and isinstance(attr_val, torch.Tensor):
maybe_buffer_proxy = maybe_get_proxy_for_attr(
attr_val, self.root.named_buffers(), parameter_proxy_cache
)
if maybe_buffer_proxy is not None:
return maybe_buffer_proxy
return attr_val
# Needed for PyTorch 1.13+
def getattr(self, attr: str, attr_val: Any, parameter_proxy_cache: Dict[str, Any]):
return self._module_getattr(attr, attr_val, parameter_proxy_cache)
def call_module(self, m, forward, args, kwargs):
self.orig_forward = forward
return super().call_module(m, forward, args, kwargs)
def proxy(self, node):
return HFProxy(node, self)
def trace(
self,
root: Union[torch.nn.Module, Callable[..., Any]],
concrete_args: Optional[Dict[str, Any]] = None,
dummy_inputs: Optional[Dict[str, Any]] = None,
complete_concrete_args_with_inputs_not_in_dummy_inputs: bool = True,
) -> Graph:
"""
Traces `root` and returns the corresponding FX `torch.fx.Graph` representation. `root` can either be a
`torch.nn.Module` instance or a Python callable. Note that after this call, `self.root` may be different from
the `root` passed in here. For example, when a free function is passed to `trace()`, we will create a
`torch.nn.Module` instance to use as the root and add embedded constants to.
Args:
root (`torch.nn.Module` or `Callable`):
Either a `torch.nn.Module`` or a function to be traced through. If root is not a
[`~transformers.PreTrainedModel`], then `dummy_inputs` must be passed, otherwise tracing will fail.
concrete_args (`Dict[str, Any], *optional*):
Concrete arguments that should not be treated as Proxies
dummy_inputs (`Dict[str, Any]`, *optional*):
The dummy inputs needed to handle data-dependent control-flow if `root` is not a
[`~transformers.PreTrainedModel`]. It can also be used when `root` is a
[`~transformers.PreTrainedModel`] to specify custom dummy inputs for a subset or all the model inputs.
complete_concrete_args_with_inputs_not_in_dummy_inputs (`bool`, *optional*, defaults to `True`):
If `True`, and `dummy_inputs` is specified, every argument that `root` can take that is not in
`dummy_inputs` and not in `concrete_args` will be added to `concrete_args`, otherwise does nothing.
Returns:
`torch.fx.Graph`:
A FX `torch.fx.Graph` representing the semantics of the passed-in `root`.
"""
sig = inspect.signature(root.forward if isinstance(root, torch.nn.Module) else root)
if concrete_args is None:
concrete_args = {}
if dummy_inputs is not None and complete_concrete_args_with_inputs_not_in_dummy_inputs:
for param in sig.parameters.values():
if param.name in dummy_inputs:
continue
if param.default is inspect.Parameter.empty:
raise ValueError(f"You need to specify a default value for the parameter {param.name}.")
concrete_args.update(
{
p.name: p.default
for p in sig.parameters.values()
if (p.name not in dummy_inputs and p.name not in concrete_args)
}
)
input_names = sig.parameters.keys() - concrete_args.keys()
# Creating a random input shape to generate dummy inputs.
batch_size = _generate_random_int()
sequence_length = _generate_random_int()
shape = [batch_size, sequence_length]
if root.__class__.__name__ in get_values(MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES):
num_choices = _generate_random_int(low=2, high=5)
shape.insert(1, num_choices)
inputs = dict(dummy_inputs) if dummy_inputs is not None else {}
for input_name in input_names:
if input_name in inputs:
continue
# We enforce that root must either be a PreTrainedModel or deserialized from a serialized traced model to
# be able to use HFTracer._generate_dummy_input.
if isinstance(root, PreTrainedModel) or type(root).__qualname__.startswith("_deserialize_graph_module"):
inputs.update(self._generate_dummy_input(root, input_name, shape))
else:
raise RuntimeError(
f"Could not generate input named {input_name} for because root is not a"
" transformers.PreTrainedModel."
)
concrete_metas = {
input_name: input_.to("meta") if isinstance(input_, torch.Tensor) else input_
for input_name, input_ in inputs.items()
}
for param in sig.parameters.values():
if param.kind == inspect.Parameter.VAR_KEYWORD and param.name not in input_names:
concrete_metas[f"**{param.name}"] = {}
self.meta_args = concrete_metas
self.patched_torch_methods = {
target: _gen_constructor_wrapper(getattr(torch, target)) for target in self._TORCH_METHODS_TO_PATCH
}
self.orig_fns = set()
for name, (wrapper, orig) in self.patched_torch_methods.items():
setattr(torch, name, wrapper)
self.orig_fns.add(orig)
try:
self.graph = super().trace(root, concrete_args=concrete_args)
finally:
for name, (_, orig) in self.patched_torch_methods.items():
setattr(torch, name, orig)
# This is necessary because concrete args are added as input to the traced module since
# https://github.com/pytorch/pytorch/pull/55888.
for node in self.graph.nodes:
if node.op == "placeholder":
# Removing default values for inputs as the forward pass will fail with them.
if node.target in input_names:
node.args = ()
# Without this, torch.jit.script fails because the inputs type is Optional[torch.Tensor].
# It cannot infer on the attributes and methods the input should have, and fails.
node.type = torch.Tensor
# It is a concrete arg so it is not used and should be removed.
else:
to_visit = [node]
to_delete = collections.OrderedDict()
while to_visit:
n = to_visit.pop(0)
to_delete[n] = None
to_visit += list(n.users.keys())
for user in reversed(to_delete.keys()):
self.graph.erase_node(user)
# TODO: solves GraphModule creation.
# Without this, return type annotation "Tuple" is causing code execution failure.
if node.op == "output":
node.type = None
return self.graph
def _stateless_mod_instanciation_depends_on_proxies(self, mod: nn.Module) -> bool:
"""
Whether the module was instantiated with Proxies. If that is the case, such module cannot be a leaf module
because its attributes are input-dependent.
"""
return any(isinstance(attr, Proxy) for attr in mod.__dict__.values())
def _insert_module_as_submodule(self, mod: nn.Module) -> str:
"""
Helper method which tries to insert a module that was not declared as submodule.
"""
# If one of the module attributes is a Proxy, it means that its instantiation is input-dependent.
# It is not possible to insert such modules, those should be traced through.
if self._stateless_mod_instanciation_depends_on_proxies(mod):
return ""
idx = 0
mod_name = mod.__class__.__name__.lower()
path = f"{mod_name}_{idx}"
already_inserted = False
while hasattr(self.root, path):
if getattr(self.root, path) is mod:
already_inserted = True
break
path = f"{mod_name}_{idx}"
idx += 1
# No need to add multiple instances of the same module.
if not already_inserted:
self.root.add_module(path, mod)
return path
def path_of_module(self, mod: nn.Module) -> str:
"""
Helper method to find the qualified name of `mod` in the Module hierarchy of `root`. For example, if `root` has
a submodule named `foo`, which has a submodule named `bar`, passing `bar` into this function will return the
string "foo.bar".
Args:
mod (str): The `Module` to retrieve the qualified name for.
"""
try:
return super().path_of_module(mod)
except NameError as e:
if self.allow_insert_stateless_mods and len(list(mod.parameters())) == 0 and len(list(mod.buffers())) == 0:
path = self._insert_module_as_submodule(mod)
return path
raise e
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name: str) -> bool:
return (not self._stateless_mod_instanciation_depends_on_proxies(m)) and super().is_leaf_module(
m, module_qualified_name
)
def get_concrete_args(model: nn.Module, input_names: List[str]):
sig = inspect.signature(model.forward)
if not (set(input_names) <= set(sig.parameters.keys())):
formatted_input_names = input_names[0] if len(input_names) == 1 else ", ".join(input_names)
formatted_allowed_input_names = ", ".join(sig.parameters.keys())
raise ValueError(
f"The model does not have input(s) named: {formatted_input_names}, expected a subset of the following:"
f" {formatted_allowed_input_names}"
)
return {p.name: p.default for p in sig.parameters.values() if p.name not in input_names}
def check_if_model_is_supported(model: PreTrainedModel):
if model.__class__.__name__ not in _SUPPORTED_MODELS:
supported_model_names = ", ".join(_SUPPORTED_MODELS)
raise NotImplementedError(
f"Model {model.__class__.__name__} is not supported yet, supported models: {supported_model_names}"
)
def symbolic_trace(
model: PreTrainedModel,
input_names: Optional[List[str]] = None,
disable_check: bool = False,
) -> GraphModule:
"""
Performs symbolic tracing on the model.
Args:
model ([`PretrainedModel`]):
The model to trace.
input_names (`List[str]`, *optional*):
The names of the inputs of the traced model. If unset, model.dummy_inputs.keys() are used instead.
disable_check (`bool`, *optional*, defaults to `False`):
If `True`, no check is done before trying to trace the model, this is mostly usesul for debugging purposes.
Returns:
`torch.fx.GraphModule`: A GraphModule constructed by recording operations seen while tracing the model.
Example:
```python
from transformers.utils.fx import symbolic_trace
traced_model = symbolic_trace(model, input_names=["input_ids", "attention_mask", "token_type_ids"])
```
"""
if input_names is None:
input_names = model.dummy_inputs.keys()
input_names = list(input_names)
concrete_args = get_concrete_args(model, input_names)
if not disable_check:
check_if_model_is_supported(model)
# Tracing.
tracer = HFTracer()
traced_graph = tracer.trace(model, concrete_args=concrete_args)
traced = torch.fx.GraphModule(model, traced_graph)
traced.config = model.config
# The model class must be stored as an attribute to allow model deserialization, which uses trace, and thus
# _generate_dummy_input, where the model class is needed.
traced.class_for_deserialization = model.__class__
traced.device = model.device
return traced
|
274056675/springboot-openai-chatgpt | 19,821 | mng_web/src/views/authority/datascope.vue | <template>
<basic-container>
<avue-crud :option="option"
:table-loading="loading"
:data="data"
ref="crud"
v-model="form"
:permission="permissionList"
:before-open="beforeOpen"
@row-del="rowDel"
@row-update="rowUpdate"
@row-save="rowSave"
@search-change="searchChange"
@search-reset="searchReset"
@selection-change="selectionChange"
@current-change="currentChange"
@size-change="sizeChange"
@refresh-change="refreshChange"
@on-load="onLoad"
@tree-load="treeLoad">
<template slot-scope="{row}" slot="menu">
<el-button type="text"
icon="el-icon-setting"
size="small"
v-if="permission.data_scope_setting"
plain
style="border: 0;background-color: transparent !important;"
@click.stop="handleDataScope(row)">权限配置
</el-button>
</template>
<template slot-scope="{row}" slot="source">
<div style="text-align:center">
<i :class="row.source"/>
</div>
</template>
</avue-crud>
<el-drawer :title="`[${scopeMenuName}] 数据权限配置`" :visible.sync="drawerVisible" :direction="direction"
append-to-body
:before-close="handleDrawerClose" size="1000px">
<basic-container>
<avue-crud :option="optionScope"
:data="dataScope"
:page="pageScope"
v-model="formScope"
:table-loading="scopeLoading"
ref="crudScope"
@row-del="rowDelScope"
@row-update="rowUpdateScope"
@row-save="rowSaveScope"
:before-open="beforeOpenScope"
@search-change="searchChangeScope"
@search-reset="searchResetScope"
@selection-change="selectionChangeScope"
@current-change="currentChangeScope"
@size-change="sizeChangeScope"
@on-load="onLoadScope">
<template slot="menuLeft">
<el-button type="danger"
size="small"
icon="el-icon-delete"
plain
@click="handleDeleteScope">删 除
</el-button>
</template>
<template slot-scope="{row}"
slot="scopeType">
<el-tag>{{row.scopeTypeName}}</el-tag>
</template>
</avue-crud>
</basic-container>
</el-drawer>
</basic-container>
</template>
<script>
import {
add,
remove,
update,
getLazyMenuList,
getMenu
} from "@/api/system/menu";
import {
addDataScope,
removeDataScope,
updateDataScope,
getListDataScope,
getMenuDataScope
} from "@/api/system/scope";
import {mapGetters} from "vuex";
import iconList from "@/config/iconList";
import func from "@/util/func";
export default {
data() {
return {
form: {},
selectionList: [],
query: {},
loading: true,
parentId: 0,
page: {
pageSize: 10,
currentPage: 1,
total: 0
},
drawerVisible: false,
direction: 'rtl',
scopeMenuId: 0,
scopeMenuCode: '',
scopeMenuName: "菜单",
scopeLoading: false,
menu: true,
watchMode: true,
option: {
lazy: true,
tip: false,
simplePage: true,
searchShow: true,
searchMenuSpan: 6,
dialogWidth: "60%",
tree: true,
border: true,
index: true,
selection: true,
viewBtn: false,
editBtn: false,
addBtn: false,
delBtn: false,
menuWidth: 150,
dialogClickModal: false,
column: [
{
label: "菜单名称",
prop: "name",
search: true,
rules: [
{
required: true,
message: "请输入菜单名称",
trigger: "blur"
}
]
},
{
label: "路由地址",
prop: "path",
rules: [
{
required: true,
message: "请输入路由地址",
trigger: "blur"
}
]
},
{
label: "上级菜单",
prop: "parentId",
type: "tree",
dicUrl: "/api/blade-system/menu/tree",
hide: true,
props: {
label: "title"
},
rules: [
{
required: false,
message: "请选择上级菜单",
trigger: "click"
}
]
},
{
label: "菜单图标",
prop: "source",
type: "icon",
slot: true,
width: 80,
iconList: iconList,
rules: [
{
required: true,
message: "请输入菜单图标",
trigger: "click"
}
]
},
{
label: "菜单编号",
prop: "code",
search: true,
rules: [
{
required: true,
message: "请输入菜单编号",
trigger: "blur"
}
]
},
{
label: "菜单类型",
prop: "category",
type: "radio",
dicData: [
{
label: "菜单",
value: 1
},
{
label: "按钮",
value: 2
}
],
hide: true,
rules: [
{
required: true,
message: "请选择菜单类型",
trigger: "blur"
}
]
},
{
label: "菜单别名",
prop: "alias",
rules: [
{
required: true,
message: "请输入菜单别名",
trigger: "blur"
}
]
},
{
label: "按钮功能",
prop: "action",
type: "radio",
dicData: [
{
label: "工具栏",
value: 0
},
{
label: "操作栏",
value: 1
},
{
label: "工具操作栏",
value: 2
}
],
hide: true,
rules: [
{
required: true,
message: "请选择按钮功能",
trigger: "blur"
}
]
},
{
label: "菜单排序",
prop: "sort",
type: "number",
width: 80,
rules: [
{
required: true,
message: "请输入菜单排序",
trigger: "blur"
}
]
},
{
label: "新窗口",
prop: "isOpen",
type: "radio",
dicData: [
{
label: "否",
value: 0
},
{
label: "是",
value: 1
},
],
hide: true
},
{
label: "菜单备注",
prop: "remark",
type: "textarea",
span: 24,
minRows: 6,
hide: true
}
]
},
data: [],
formScope: {},
selectionListScope: [],
pageScope: {
pageSize: 10,
currentPage: 1,
total: 0
},
optionScope: {
tip: false,
searchShow: true,
searchMenuSpan: 6,
border: true,
index: true,
viewBtn: true,
selection: true,
menuWidth: 200,
dialogWidth: 900,
dialogClickModal: false,
column: [
{
label: "权限名称",
prop: "scopeName",
search: true,
value: "",
rules: [{
required: true,
message: "请输入数据权限名称",
trigger: "blur"
}]
},
{
label: "权限编号",
prop: "resourceCode",
search: true,
width: 100,
rules: [{
required: true,
message: "请输入数据权限编号",
trigger: "blur"
}]
},
{
label: "权限字段",
prop: "scopeColumn",
width: 130,
rules: [{
required: true,
message: "请输入数据权限编号",
trigger: "blur"
}]
},
{
label: "规则类型",
type: "select",
dicUrl: "/api/blade-system/dict/dictionary?code=data_scope_type",
props: {
label: "dictValue",
value: "dictKey"
},
dataType: "number",
slot: true,
width: 140,
prop: "scopeType",
rules: [{
required: true,
message: "请输入通知类型",
trigger: "blur"
}]
},
{
label: "可见字段",
prop: "scopeField",
span: 24,
hide: true,
value: "*",
rules: [{
required: true,
message: "请输入数据权限可见的字段",
trigger: "blur"
}],
},
{
label: "权限类名",
prop: "scopeClass",
span: 24,
hide: true,
rules: [{
required: true,
message: "请输入MybatisMapper对应方法的完整类名路径",
trigger: "blur"
}],
},
{
label: "规则值",
prop: "scopeValue",
span: 24,
minRows: 5,
type: "textarea",
display: true,
hide: true,
},
{
label: "备注",
prop: "remark",
span: 24,
hide: true,
},
]
},
dataScope: []
};
},
watch: {
'formScope.scopeType'() {
this.initScope();
}
},
computed: {
...mapGetters(["permission"]),
permissionList() {
return {
addBtn: this.vaildData(this.permission.menu_add, false),
viewBtn: this.vaildData(this.permission.menu_view, false),
delBtn: this.vaildData(this.permission.menu_delete, false),
editBtn: this.vaildData(this.permission.menu_edit, false)
};
},
ids() {
let ids = [];
this.selectionList.forEach(ele => {
ids.push(ele.id);
});
return ids.join(",");
},
scopeIds() {
let ids = [];
this.selectionListScope.forEach(ele => {
ids.push(ele.id);
});
return ids.join(",");
}
},
methods: {
initScope() {
const scopeType = func.toInt(this.formScope.scopeType);
const watchMode = this.watchMode;
let column = "-", name = "暂无";
if (scopeType === 1) {
column = "-";
name = "全部可见";
} else if (scopeType === 2) {
column = "create_user";
name = "本人可见";
} else if (scopeType === 3) {
column = "create_dept";
name = "所在机构可见";
} else if (scopeType === 4) {
column = "create_dept";
name = "所在机构可见及子级可见";
} else if (scopeType === 5) {
column = "";
name = "自定义";
}
this.$refs.crudScope.option.column.filter(item => {
if (watchMode) {
if (item.prop === "scopeName") {
this.formScope.scopeName = `${this.scopeMenuName} [${name}]`;
}
if (item.prop === "resourceCode") {
this.formScope.resourceCode = this.scopeMenuCode;
}
if (item.prop === "scopeColumn") {
this.formScope.scopeColumn = column;
}
}
if (item.prop === "scopeValue") {
item.display = scopeType === 5;
}
});
},
// 菜单管理模块
rowSave(row, done, loading) {
add(row).then(() => {
this.onLoad(this.page);
this.$message({
type: "success",
message: "操作成功!"
});
done();
}, error => {
window.console.log(error);
loading();
});
},
rowUpdate(row, index, done, loading) {
update(row).then(() => {
this.onLoad(this.page);
this.$message({
type: "success",
message: "操作成功!"
});
done();
}, error => {
window.console.log(error);
loading();
});
},
rowDel(row) {
this.$confirm("确定将选择数据删除?", {
confirmButtonText: "确定",
cancelButtonText: "取消",
type: "warning"
})
.then(() => {
return remove(row.id);
})
.then(() => {
this.onLoad(this.page);
this.$message({
type: "success",
message: "操作成功!"
});
});
},
searchReset() {
this.query = {};
this.parentId = 0;
this.onLoad(this.page);
},
searchChange(params, done) {
this.query = params;
this.parentId = '';
this.page.currentPage = 1;
this.onLoad(this.page, params);
done();
},
selectionChange(list) {
this.selectionList = list;
},
selectionClear() {
this.selectionList = [];
this.$refs.crud.toggleSelection();
},
handleDelete() {
if (this.selectionList.length === 0) {
this.$message.warning("请选择至少一条数据");
return;
}
this.$confirm("确定将选择数据删除?", {
confirmButtonText: "确定",
cancelButtonText: "取消",
type: "warning"
})
.then(() => {
return remove(this.ids);
})
.then(() => {
this.onLoad(this.page);
this.$message({
type: "success",
message: "操作成功!"
});
this.$refs.crud.toggleSelection();
});
},
beforeOpen(done, type) {
if (["edit", "view"].includes(type)) {
getMenu(this.form.id).then(res => {
this.form = res.data.data;
});
}
done();
},
currentChange(currentPage) {
this.page.currentPage = currentPage;
},
sizeChange(pageSize) {
this.page.pageSize = pageSize;
},
refreshChange() {
this.onLoad(this.page, this.query);
},
onLoad(page, params = {}) {
this.loading = true;
getLazyMenuList(this.parentId, Object.assign(params, this.query)).then(res => {
this.data = res.data.data;
this.loading = false;
this.selectionClear();
});
},
treeLoad(tree, treeNode, resolve) {
const parentId = tree.id;
getLazyMenuList(parentId).then(res => {
resolve(res.data.data);
});
},
// 数据权限模块
handleDataScope(row) {
this.drawerVisible = true;
this.scopeMenuId = row.id;
this.scopeMenuCode = row.code;
this.scopeMenuName = row.name;
this.onLoadScope(this.pageScope)
},
handleDrawerClose(hide) {
hide();
},
rowSaveScope(row, done, loading) {
row = {
...row,
menuId: this.scopeMenuId,
};
addDataScope(row).then(() => {
this.onLoadScope(this.pageScope);
this.$message({
type: "success",
message: "操作成功!"
});
done();
}, error => {
window.console.log(error);
loading();
});
},
rowUpdateScope(row, index, done, loading) {
row = {
...row,
menuId: this.scopeMenuId,
};
updateDataScope(row).then(() => {
this.onLoadScope(this.pageScope);
this.$message({
type: "success",
message: "操作成功!"
});
done();
}, error => {
window.console.log(error);
loading();
});
},
rowDelScope(row) {
this.$confirm("确定将选择数据删除?", {
confirmButtonText: "确定",
cancelButtonText: "取消",
type: "warning"
})
.then(() => {
return removeDataScope(row.id);
})
.then(() => {
this.onLoadScope(this.pageScope);
this.$message({
type: "success",
message: "操作成功!"
});
});
},
handleDeleteScope() {
if (this.selectionListScope.length === 0) {
this.$message.warning("请选择至少一条数据");
return;
}
this.$confirm("确定将选择数据删除?", {
confirmButtonText: "确定",
cancelButtonText: "取消",
type: "warning"
})
.then(() => {
return removeDataScope(this.scopeIds);
})
.then(() => {
this.onLoadScope(this.pageScope);
this.$message({
type: "success",
message: "操作成功!"
});
this.$refs.crudScope.toggleSelection();
});
},
beforeOpenScope(done, type) {
if (["add"].includes(type)) {
this.watchMode = true;
this.initScope();
}
if (["edit", "view"].includes(type)) {
this.watchMode = false;
getMenuDataScope(this.formScope.id).then(res => {
this.formScope = res.data.data;
});
}
done();
},
searchResetScope() {
this.onLoadScope(this.pageScope);
},
searchChangeScope(params, done) {
this.onLoadScope(this.pageScope, params);
done();
},
selectionChangeScope(list) {
this.selectionListScope = list;
},
currentChangeScope(currentPage) {
this.pageScope.currentPage = currentPage;
},
sizeChangeScope(pageSize) {
this.pageScope.pageSize = pageSize;
},
onLoadScope(page, params = {}) {
this.scopeLoading = true;
const values = {
...params,
menuId: this.scopeMenuId,
}
getListDataScope(page.currentPage, page.pageSize, Object.assign(values, this.query)).then(res => {
const data = res.data.data;
this.pageScope.total = data.total;
this.dataScope = data.records;
this.selectionListScope = [];
this.scopeLoading = false;
});
},
}
};
</script>
|
274056675/springboot-openai-chatgpt | 2,411 | mng_web/src/styles/theme/white.scss | .theme-white {
.el-menu--popup{
.el-menu-item{
background-color: #fff;
i,span{
color:#666;
}
&:hover{
i,span{
color:#333;
}
}
&.is-active {
background-color: #409EFF;
&:before {
content: '';
top: 0;
left: 0;
bottom: 0;
width: 4px;
background: #409eff;
position: absolute;
}
i,span{
color:#fff;
}
}
}
}
.avue-header,
.avue-logo,
.tags-container {
background-color: #409EFF;
}
.avue-sidebar--tip{
background-color:transparent;
color:#333;
}
.el-dropdown{
color:#fff;
}
.avue-logo_title{
font-weight: 400;
color:#fff;
}
.logo_title,
.avue-breadcrumb
{
color: #fff ;
i {
color: #fff;
}
}
.avue-top{
.el-menu-item {
i,
span {
color: #fff ;
}
&:hover {
i,
span {
color: #fff ;
}
}
}
}
.avue-sidebar{
box-shadow: 2px 0 6px rgba(0, 21, 41, 0.15);
background-color: #fff;
padding-top: 70px;
.el-menu-item,.el-submenu__title{
i,span{
color:#666
}
&:hover{
background: transparent;
i,span{
color:#333;
}
}
&.is-active {
background-color: #409EFF;
i,span{
color:#fff;
}
}
}
}
.top-search {
.el-input__inner{
color: #333;
}
input::-webkit-input-placeholder,
textarea::-webkit-input-placeholder {
/* WebKit browsers */
color: #fff;
}
input:-moz-placeholder,
textarea:-moz-placeholder {
/* Mozilla Firefox 4 to 18 */
color: #fff;
}
input::-moz-placeholder,
textarea::-moz-placeholder {
/* Mozilla Firefox 19+ */
color: #fff;
}
input:-ms-input-placeholder,
textarea:-ms-input-placeholder {
/* Internet Explorer 10+ */
color: #fff;
}
}
.top-bar__item {
i {
color: #fff;
}
}
} |
233zzh/TitanDataOperationSystem | 113,565 | 代码/web代码/titanApp/src/main/resources/static/src/assets/extra-libs/flot/examples/axes-time-zones/tz/asia | # <pre>
# This file is in the public domain, so clarified as of
# 2009-05-17 by Arthur David Olson.
# This data is by no means authoritative; if you think you know better,
# go ahead and edit the file (and please send any changes to
# tz@iana.org for general use in the future).
# From Paul Eggert (2006-03-22):
#
# A good source for time zone historical data outside the U.S. is
# Thomas G. Shanks and Rique Pottenger, The International Atlas (6th edition),
# San Diego: ACS Publications, Inc. (2003).
#
# Gwillim Law writes that a good source
# for recent time zone data is the International Air Transport
# Association's Standard Schedules Information Manual (IATA SSIM),
# published semiannually. Law sent in several helpful summaries
# of the IATA's data after 1990.
#
# Except where otherwise noted, Shanks & Pottenger is the source for
# entries through 1990, and IATA SSIM is the source for entries afterwards.
#
# Another source occasionally used is Edward W. Whitman, World Time Differences,
# Whitman Publishing Co, 2 Niagara Av, Ealing, London (undated), which
# I found in the UCLA library.
#
# A reliable and entertaining source about time zones is
# Derek Howse, Greenwich time and longitude, Philip Wilson Publishers (1997).
#
# I invented the abbreviations marked `*' in the following table;
# the rest are from earlier versions of this file, or from other sources.
# Corrections are welcome!
# std dst
# LMT Local Mean Time
# 2:00 EET EEST Eastern European Time
# 2:00 IST IDT Israel
# 3:00 AST ADT Arabia*
# 3:30 IRST IRDT Iran
# 4:00 GST Gulf*
# 5:30 IST India
# 7:00 ICT Indochina*
# 7:00 WIT west Indonesia
# 8:00 CIT central Indonesia
# 8:00 CST China
# 9:00 CJT Central Japanese Time (1896/1937)*
# 9:00 EIT east Indonesia
# 9:00 JST JDT Japan
# 9:00 KST KDT Korea
# 9:30 CST (Australian) Central Standard Time
#
# See the `europe' file for Russia and Turkey in Asia.
# From Guy Harris:
# Incorporates data for Singapore from Robert Elz' asia 1.1, as well as
# additional information from Tom Yap, Sun Microsystems Intercontinental
# Technical Support (including a page from the Official Airline Guide -
# Worldwide Edition). The names for time zones are guesses.
###############################################################################
# These rules are stolen from the `europe' file.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule EUAsia 1981 max - Mar lastSun 1:00u 1:00 S
Rule EUAsia 1979 1995 - Sep lastSun 1:00u 0 -
Rule EUAsia 1996 max - Oct lastSun 1:00u 0 -
Rule E-EurAsia 1981 max - Mar lastSun 0:00 1:00 S
Rule E-EurAsia 1979 1995 - Sep lastSun 0:00 0 -
Rule E-EurAsia 1996 max - Oct lastSun 0:00 0 -
Rule RussiaAsia 1981 1984 - Apr 1 0:00 1:00 S
Rule RussiaAsia 1981 1983 - Oct 1 0:00 0 -
Rule RussiaAsia 1984 1991 - Sep lastSun 2:00s 0 -
Rule RussiaAsia 1985 1991 - Mar lastSun 2:00s 1:00 S
Rule RussiaAsia 1992 only - Mar lastSat 23:00 1:00 S
Rule RussiaAsia 1992 only - Sep lastSat 23:00 0 -
Rule RussiaAsia 1993 max - Mar lastSun 2:00s 1:00 S
Rule RussiaAsia 1993 1995 - Sep lastSun 2:00s 0 -
Rule RussiaAsia 1996 max - Oct lastSun 2:00s 0 -
# Afghanistan
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Kabul 4:36:48 - LMT 1890
4:00 - AFT 1945
4:30 - AFT
# Armenia
# From Paul Eggert (2006-03-22):
# Shanks & Pottenger have Yerevan switching to 3:00 (with Russian DST)
# in spring 1991, then to 4:00 with no DST in fall 1995, then
# readopting Russian DST in 1997. Go with Shanks & Pottenger, even
# when they disagree with others. Edgar Der-Danieliantz
# reported (1996-05-04) that Yerevan probably wouldn't use DST
# in 1996, though it did use DST in 1995. IATA SSIM (1991/1998) reports that
# Armenia switched from 3:00 to 4:00 in 1998 and observed DST after 1991,
# but started switching at 3:00s in 1998.
# From Arthur David Olson (2011-06-15):
# While Russia abandoned DST in 2011, Armenia may choose to
# follow Russia's "old" rules.
# From Alexander Krivenyshev (2012-02-10):
# According to News Armenia, on Feb 9, 2012,
# http://newsarmenia.ru/society/20120209/42609695.html
#
# The Armenia National Assembly adopted final reading of Amendments to the
# Law "On procedure of calculation time on the territory of the Republic of
# Armenia" according to which Armenia [is] abolishing Daylight Saving Time.
# or
# (brief)
# http://www.worldtimezone.com/dst_news/dst_news_armenia03.html
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Yerevan 2:58:00 - LMT 1924 May 2
3:00 - YERT 1957 Mar # Yerevan Time
4:00 RussiaAsia YER%sT 1991 Mar 31 2:00s
3:00 1:00 YERST 1991 Sep 23 # independence
3:00 RussiaAsia AM%sT 1995 Sep 24 2:00s
4:00 - AMT 1997
4:00 RussiaAsia AM%sT 2012 Mar 25 2:00s
4:00 - AMT
# Azerbaijan
# From Rustam Aliyev of the Azerbaijan Internet Forum (2005-10-23):
# According to the resolution of Cabinet of Ministers, 1997
# Resolution available at: http://aif.az/docs/daylight_res.pdf
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Azer 1997 max - Mar lastSun 4:00 1:00 S
Rule Azer 1997 max - Oct lastSun 5:00 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Baku 3:19:24 - LMT 1924 May 2
3:00 - BAKT 1957 Mar # Baku Time
4:00 RussiaAsia BAK%sT 1991 Mar 31 2:00s
3:00 1:00 BAKST 1991 Aug 30 # independence
3:00 RussiaAsia AZ%sT 1992 Sep lastSat 23:00
4:00 - AZT 1996 # Azerbaijan time
4:00 EUAsia AZ%sT 1997
4:00 Azer AZ%sT
# Bahrain
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Bahrain 3:22:20 - LMT 1920 # Al Manamah
4:00 - GST 1972 Jun
3:00 - AST
# Bangladesh
# From Alexander Krivenyshev (2009-05-13):
# According to newspaper Asian Tribune (May 6, 2009) Bangladesh may introduce
# Daylight Saving Time from June 16 to Sept 30
#
# Bangladesh to introduce daylight saving time likely from June 16
# <a href="http://www.asiantribune.com/?q=node/17288">
# http://www.asiantribune.com/?q=node/17288
# </a>
# or
# <a href="http://www.worldtimezone.com/dst_news/dst_news_bangladesh02.html">
# http://www.worldtimezone.com/dst_news/dst_news_bangladesh02.html
# </a>
#
# "... Bangladesh government has decided to switch daylight saving time from
# June
# 16 till September 30 in a bid to ensure maximum use of daylight to cope with
# crippling power crisis. "
#
# The switch will remain in effect from June 16 to Sept 30 (2009) but if
# implemented the next year, it will come in force from April 1, 2010
# From Steffen Thorsen (2009-06-02):
# They have finally decided now, but changed the start date to midnight between
# the 19th and 20th, and they have not set the end date yet.
#
# Some sources:
# <a href="http://in.reuters.com/article/southAsiaNews/idINIndia-40017620090601">
# http://in.reuters.com/article/southAsiaNews/idINIndia-40017620090601
# </a>
# <a href="http://bdnews24.com/details.php?id=85889&cid=2">
# http://bdnews24.com/details.php?id=85889&cid=2
# </a>
#
# Our wrap-up:
# <a href="http://www.timeanddate.com/news/time/bangladesh-daylight-saving-2009.html">
# http://www.timeanddate.com/news/time/bangladesh-daylight-saving-2009.html
# </a>
# From A. N. M. Kamrus Saadat (2009-06-15):
# Finally we've got the official mail regarding DST start time where DST start
# time is mentioned as Jun 19 2009, 23:00 from BTRC (Bangladesh
# Telecommunication Regulatory Commission).
#
# No DST end date has been announced yet.
# From Alexander Krivenyshev (2009-09-25):
# Bangladesh won't go back to Standard Time from October 1, 2009,
# instead it will continue DST measure till the cabinet makes a fresh decision.
#
# Following report by same newspaper-"The Daily Star Friday":
# "DST change awaits cabinet decision-Clock won't go back by 1-hr from Oct 1"
# <a href="http://www.thedailystar.net/newDesign/news-details.php?nid=107021">
# http://www.thedailystar.net/newDesign/news-details.php?nid=107021
# </a>
# or
# <a href="http://www.worldtimezone.com/dst_news/dst_news_bangladesh04.html">
# http://www.worldtimezone.com/dst_news/dst_news_bangladesh04.html
# </a>
# From Steffen Thorsen (2009-10-13):
# IANS (Indo-Asian News Service) now reports:
# Bangladesh has decided that the clock advanced by an hour to make
# maximum use of daylight hours as an energy saving measure would
# "continue for an indefinite period."
#
# One of many places where it is published:
# <a href="http://www.thaindian.com/newsportal/business/bangladesh-to-continue-indefinitely-with-advanced-time_100259987.html">
# http://www.thaindian.com/newsportal/business/bangladesh-to-continue-indefinitely-with-advanced-time_100259987.html
# </a>
# From Alexander Krivenyshev (2009-12-24):
# According to Bangladesh newspaper "The Daily Star,"
# Bangladesh will change its clock back to Standard Time on Dec 31, 2009.
#
# Clock goes back 1-hr on Dec 31 night.
# <a href="http://www.thedailystar.net/newDesign/news-details.php?nid=119228">
# http://www.thedailystar.net/newDesign/news-details.php?nid=119228
# </a>
# and
# <a href="http://www.worldtimezone.com/dst_news/dst_news_bangladesh05.html">
# http://www.worldtimezone.com/dst_news/dst_news_bangladesh05.html
# </a>
#
# "...The government yesterday decided to put the clock back by one hour
# on December 31 midnight and the new time will continue until March 31,
# 2010 midnight. The decision came at a cabinet meeting at the Prime
# Minister's Office last night..."
# From Alexander Krivenyshev (2010-03-22):
# According to Bangladesh newspaper "The Daily Star,"
# Cabinet cancels Daylight Saving Time
# <a href="http://www.thedailystar.net/newDesign/latest_news.php?nid=22817">
# http://www.thedailystar.net/newDesign/latest_news.php?nid=22817
# </a>
# or
# <a href="http://www.worldtimezone.com/dst_news/dst_news_bangladesh06.html">
# http://www.worldtimezone.com/dst_news/dst_news_bangladesh06.html
# </a>
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Dhaka 2009 only - Jun 19 23:00 1:00 S
Rule Dhaka 2009 only - Dec 31 23:59 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Dhaka 6:01:40 - LMT 1890
5:53:20 - HMT 1941 Oct # Howrah Mean Time?
6:30 - BURT 1942 May 15 # Burma Time
5:30 - IST 1942 Sep
6:30 - BURT 1951 Sep 30
6:00 - DACT 1971 Mar 26 # Dacca Time
6:00 - BDT 2009
6:00 Dhaka BD%sT
# Bhutan
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Thimphu 5:58:36 - LMT 1947 Aug 15 # or Thimbu
5:30 - IST 1987 Oct
6:00 - BTT # Bhutan Time
# British Indian Ocean Territory
# Whitman and the 1995 CIA time zone map say 5:00, but the
# 1997 and later maps say 6:00. Assume the switch occurred in 1996.
# We have no information as to when standard time was introduced;
# assume it occurred in 1907, the same year as Mauritius (which
# then contained the Chagos Archipelago).
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Indian/Chagos 4:49:40 - LMT 1907
5:00 - IOT 1996 # BIOT Time
6:00 - IOT
# Brunei
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Brunei 7:39:40 - LMT 1926 Mar # Bandar Seri Begawan
7:30 - BNT 1933
8:00 - BNT
# Burma / Myanmar
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Rangoon 6:24:40 - LMT 1880 # or Yangon
6:24:36 - RMT 1920 # Rangoon Mean Time?
6:30 - BURT 1942 May # Burma Time
9:00 - JST 1945 May 3
6:30 - MMT # Myanmar Time
# Cambodia
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Phnom_Penh 6:59:40 - LMT 1906 Jun 9
7:06:20 - SMT 1911 Mar 11 0:01 # Saigon MT?
7:00 - ICT 1912 May
8:00 - ICT 1931 May
7:00 - ICT
# China
# From Guy Harris:
# People's Republic of China. Yes, they really have only one time zone.
# From Bob Devine (1988-01-28):
# No they don't. See TIME mag, 1986-02-17 p.52. Even though
# China is across 4 physical time zones, before Feb 1, 1986 only the
# Peking (Bejing) time zone was recognized. Since that date, China
# has two of 'em -- Peking's and Urumqi (named after the capital of
# the Xinjiang Uyghur Autonomous Region). I don't know about DST for it.
#
# . . .I just deleted the DST table and this editor makes it too
# painful to suck in another copy.. So, here is what I have for
# DST start/end dates for Peking's time zone (info from AP):
#
# 1986 May 4 - Sept 14
# 1987 mid-April - ??
# From U. S. Naval Observatory (1989-01-19):
# CHINA 8 H AHEAD OF UTC ALL OF CHINA, INCL TAIWAN
# CHINA 9 H AHEAD OF UTC APR 17 - SEP 10
# From Paul Eggert (2006-03-22):
# Shanks & Pottenger write that China (except for Hong Kong and Macau)
# has had a single time zone since 1980 May 1, observing summer DST
# from 1986 through 1991; this contradicts Devine's
# note about Time magazine, though apparently _something_ happened in 1986.
# Go with Shanks & Pottenger for now. I made up names for the other
# pre-1980 time zones.
# From Shanks & Pottenger:
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Shang 1940 only - Jun 3 0:00 1:00 D
Rule Shang 1940 1941 - Oct 1 0:00 0 S
Rule Shang 1941 only - Mar 16 0:00 1:00 D
Rule PRC 1986 only - May 4 0:00 1:00 D
Rule PRC 1986 1991 - Sep Sun>=11 0:00 0 S
Rule PRC 1987 1991 - Apr Sun>=10 0:00 1:00 D
# From Anthony Fok (2001-12-20):
# BTW, I did some research on-line and found some info regarding these five
# historic timezones from some Taiwan websites. And yes, there are official
# Chinese names for these locales (before 1949).
#
# From Jesper Norgaard Welen (2006-07-14):
# I have investigated the timezones around 1970 on the
# http://www.astro.com/atlas site [with provinces and county
# boundaries summarized below].... A few other exceptions were two
# counties on the Sichuan side of the Xizang-Sichuan border,
# counties Dege and Baiyu which lies on the Sichuan side and are
# therefore supposed to be GMT+7, Xizang region being GMT+6, but Dege
# county is GMT+8 according to astro.com while Baiyu county is GMT+6
# (could be true), for the moment I am assuming that those two
# counties are mistakes in the astro.com data.
# From Paul Eggert (2008-02-11):
# I just now checked Google News for western news sources that talk
# about China's single time zone, and couldn't find anything before 1986
# talking about China being in one time zone. (That article was: Jim
# Mann, "A clumsy embrace for another western custom: China on daylight
# time--sort of", Los Angeles Times, 1986-05-05. By the way, this
# article confirms the tz database's data claiming that China began
# observing daylight saving time in 1986.
#
# From Thomas S. Mullaney (2008-02-11):
# I think you're combining two subjects that need to treated
# separately: daylight savings (which, you're correct, wasn't
# implemented until the 1980s) and the unified time zone centered near
# Beijing (which was implemented in 1949). Briefly, there was also a
# "Lhasa Time" in Tibet and "Urumqi Time" in Xinjiang. The first was
# ceased, and the second eventually recognized (again, in the 1980s).
#
# From Paul Eggert (2008-06-30):
# There seems to be a good chance China switched to a single time zone in 1949
# rather than in 1980 as Shanks & Pottenger have it, but we don't have a
# reliable documentary source saying so yet, so for now we still go with
# Shanks & Pottenger.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
# Changbai Time ("Long-white Time", Long-white = Heilongjiang area)
# Heilongjiang (except Mohe county), Jilin
Zone Asia/Harbin 8:26:44 - LMT 1928 # or Haerbin
8:30 - CHAT 1932 Mar # Changbai Time
8:00 - CST 1940
9:00 - CHAT 1966 May
8:30 - CHAT 1980 May
8:00 PRC C%sT
# Zhongyuan Time ("Central plain Time")
# most of China
Zone Asia/Shanghai 8:05:52 - LMT 1928
8:00 Shang C%sT 1949
8:00 PRC C%sT
# Long-shu Time (probably due to Long and Shu being two names of that area)
# Guangxi, Guizhou, Hainan, Ningxia, Sichuan, Shaanxi, and Yunnan;
# most of Gansu; west Inner Mongolia; west Qinghai; and the Guangdong
# counties Deqing, Enping, Kaiping, Luoding, Taishan, Xinxing,
# Yangchun, Yangjiang, Yu'nan, and Yunfu.
Zone Asia/Chongqing 7:06:20 - LMT 1928 # or Chungking
7:00 - LONT 1980 May # Long-shu Time
8:00 PRC C%sT
# Xin-zang Time ("Xinjiang-Tibet Time")
# The Gansu counties Aksay, Anxi, Dunhuang, Subei; west Qinghai;
# the Guangdong counties Xuwen, Haikang, Suixi, Lianjiang,
# Zhanjiang, Wuchuan, Huazhou, Gaozhou, Maoming, Dianbai, and Xinyi;
# east Tibet, including Lhasa, Chamdo, Shigaise, Jimsar, Shawan and Hutubi;
# east Xinjiang, including Urumqi, Turpan, Karamay, Korla, Minfeng, Jinghe,
# Wusu, Qiemo, Xinyan, Wulanwusu, Jinghe, Yumin, Tacheng, Tuoli, Emin,
# Shihezi, Changji, Yanqi, Heshuo, Tuokexun, Tulufan, Shanshan, Hami,
# Fukang, Kuitun, Kumukuli, Miquan, Qitai, and Turfan.
Zone Asia/Urumqi 5:50:20 - LMT 1928 # or Urumchi
6:00 - URUT 1980 May # Urumqi Time
8:00 PRC C%sT
# Kunlun Time
# West Tibet, including Pulan, Aheqi, Shufu, Shule;
# West Xinjiang, including Aksu, Atushi, Yining, Hetian, Cele, Luopu, Nileke,
# Zhaosu, Tekesi, Gongliu, Chabuchaer, Huocheng, Bole, Pishan, Suiding,
# and Yarkand.
# From Luther Ma (2009-10-17):
# Almost all (>99.9%) ethnic Chinese (properly ethnic Han) living in
# Xinjiang use Chinese Standard Time. Some are aware of Xinjiang time,
# but have no need of it. All planes, trains, and schools function on
# what is called "Beijing time." When Han make an appointment in Chinese
# they implicitly use Beijing time.
#
# On the other hand, ethnic Uyghurs, who make up about half the
# population of Xinjiang, typically use "Xinjiang time" which is two
# hours behind Beijing time, or UTC +0600. The government of the Xinjiang
# Uyghur Autonomous Region, (XAUR, or just Xinjiang for short) as well as
# local governments such as the Urumqi city government use both times in
# publications, referring to what is popularly called Xinjiang time as
# "Urumqi time." When Uyghurs make an appointment in the Uyghur language
# they almost invariably use Xinjiang time.
#
# (Their ethnic Han compatriots would typically have no clue of its
# widespread use, however, because so extremely few of them are fluent in
# Uyghur, comparable to the number of Anglo-Americans fluent in Navajo.)
#
# (...As with the rest of China there was a brief interval ending in 1990
# or 1991 when summer time was in use. The confusion was severe, with
# the province not having dual times but four times in use at the same
# time. Some areas remained on standard Xinjiang time or Beijing time and
# others moving their clocks ahead.)
#
# ...an example of an official website using of Urumqi time.
#
# The first few lines of the Google translation of
# <a href="http://www.fjysgl.gov.cn/show.aspx?id=2379&cid=39">
# http://www.fjysgl.gov.cn/show.aspx?id=2379&cid=39
# </a>
# (retrieved 2009-10-13)
# > Urumqi fire seven people are missing the alleged losses of at least
# > 500 million yuan
# >
# > (Reporter Dong Liu) the day before 20:20 or so (Urumqi Time 18:20),
# > Urumqi City Department of International Plaza Luther Qiantang River
# > burst fire. As of yesterday, 18:30, Urumqi City Fire officers and men
# > have worked continuously for 22 hours...
# From Luther Ma (2009-11-19):
# With the risk of being redundant to previous answers these are the most common
# English "transliterations" (w/o using non-English symbols):
#
# 1. Wulumuqi...
# 2. Kashi...
# 3. Urumqi...
# 4. Kashgar...
# ...
# 5. It seems that Uyghurs in Urumqi has been using Xinjiang since at least the
# 1960's. I know of one Han, now over 50, who grew up in the surrounding
# countryside and used Xinjiang time as a child.
#
# 6. Likewise for Kashgar and the rest of south Xinjiang I don't know of any
# start date for Xinjiang time.
#
# Without having access to local historical records, nor the ability to legally
# publish them, I would go with October 1, 1949, when Xinjiang became the Uyghur
# Autonomous Region under the PRC. (Before that Uyghurs, of course, would also
# not be using Beijing time, but some local time.)
Zone Asia/Kashgar 5:03:56 - LMT 1928 # or Kashi or Kaxgar
5:30 - KAST 1940 # Kashgar Time
5:00 - KAST 1980 May
8:00 PRC C%sT
# From Lee Yiu Chung (2009-10-24):
# I found there are some mistakes for the...DST rule for Hong
# Kong. [According] to the DST record from Hong Kong Observatory (actually,
# it is not [an] observatory, but the official meteorological agency of HK,
# and also serves as the official timing agency), there are some missing
# and incorrect rules. Although the exact switch over time is missing, I
# think 3:30 is correct. The official DST record for Hong Kong can be
# obtained from
# <a href="http://www.hko.gov.hk/gts/time/Summertime.htm">
# http://www.hko.gov.hk/gts/time/Summertime.htm
# </a>.
# From Arthur David Olson (2009-10-28):
# Here are the dates given at
# <a href="http://www.hko.gov.hk/gts/time/Summertime.htm">
# http://www.hko.gov.hk/gts/time/Summertime.htm
# </a>
# as of 2009-10-28:
# Year Period
# 1941 1 Apr to 30 Sep
# 1942 Whole year
# 1943 Whole year
# 1944 Whole year
# 1945 Whole year
# 1946 20 Apr to 1 Dec
# 1947 13 Apr to 30 Dec
# 1948 2 May to 31 Oct
# 1949 3 Apr to 30 Oct
# 1950 2 Apr to 29 Oct
# 1951 1 Apr to 28 Oct
# 1952 6 Apr to 25 Oct
# 1953 5 Apr to 1 Nov
# 1954 21 Mar to 31 Oct
# 1955 20 Mar to 6 Nov
# 1956 18 Mar to 4 Nov
# 1957 24 Mar to 3 Nov
# 1958 23 Mar to 2 Nov
# 1959 22 Mar to 1 Nov
# 1960 20 Mar to 6 Nov
# 1961 19 Mar to 5 Nov
# 1962 18 Mar to 4 Nov
# 1963 24 Mar to 3 Nov
# 1964 22 Mar to 1 Nov
# 1965 18 Apr to 17 Oct
# 1966 17 Apr to 16 Oct
# 1967 16 Apr to 22 Oct
# 1968 21 Apr to 20 Oct
# 1969 20 Apr to 19 Oct
# 1970 19 Apr to 18 Oct
# 1971 18 Apr to 17 Oct
# 1972 16 Apr to 22 Oct
# 1973 22 Apr to 21 Oct
# 1973/74 30 Dec 73 to 20 Oct 74
# 1975 20 Apr to 19 Oct
# 1976 18 Apr to 17 Oct
# 1977 Nil
# 1978 Nil
# 1979 13 May to 21 Oct
# 1980 to Now Nil
# The page does not give start or end times of day.
# The page does not give a start date for 1942.
# The page does not givw an end date for 1945.
# The Japanese occupation of Hong Kong began on 1941-12-25.
# The Japanese surrender of Hong Kong was signed 1945-09-15.
# For lack of anything better, use start of those days as the transition times.
# Hong Kong (Xianggang)
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule HK 1941 only - Apr 1 3:30 1:00 S
Rule HK 1941 only - Sep 30 3:30 0 -
Rule HK 1946 only - Apr 20 3:30 1:00 S
Rule HK 1946 only - Dec 1 3:30 0 -
Rule HK 1947 only - Apr 13 3:30 1:00 S
Rule HK 1947 only - Dec 30 3:30 0 -
Rule HK 1948 only - May 2 3:30 1:00 S
Rule HK 1948 1951 - Oct lastSun 3:30 0 -
Rule HK 1952 only - Oct 25 3:30 0 -
Rule HK 1949 1953 - Apr Sun>=1 3:30 1:00 S
Rule HK 1953 only - Nov 1 3:30 0 -
Rule HK 1954 1964 - Mar Sun>=18 3:30 1:00 S
Rule HK 1954 only - Oct 31 3:30 0 -
Rule HK 1955 1964 - Nov Sun>=1 3:30 0 -
Rule HK 1965 1976 - Apr Sun>=16 3:30 1:00 S
Rule HK 1965 1976 - Oct Sun>=16 3:30 0 -
Rule HK 1973 only - Dec 30 3:30 1:00 S
Rule HK 1979 only - May Sun>=8 3:30 1:00 S
Rule HK 1979 only - Oct Sun>=16 3:30 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Hong_Kong 7:36:36 - LMT 1904 Oct 30
8:00 HK HK%sT 1941 Dec 25
9:00 - JST 1945 Sep 15
8:00 HK HK%sT
###############################################################################
# Taiwan
# Shanks & Pottenger write that Taiwan observed DST during 1945, when it
# was still controlled by Japan. This is hard to believe, but we don't
# have any other information.
# From smallufo (2010-04-03):
# According to Taiwan's CWB,
# <a href="http://www.cwb.gov.tw/V6/astronomy/cdata/summert.htm">
# http://www.cwb.gov.tw/V6/astronomy/cdata/summert.htm
# </a>
# Taipei has DST in 1979 between July 1st and Sep 30.
# From Arthur David Olson (2010-04-07):
# Here's Google's translation of the table at the bottom of the "summert.htm" page:
# Decade Name Start and end date
# Republic of China 34 years to 40 years (AD 1945-1951 years) Summer Time May 1 to September 30
# 41 years of the Republic of China (AD 1952) Daylight Saving Time March 1 to October 31
# Republic of China 42 years to 43 years (AD 1953-1954 years) Daylight Saving Time April 1 to October 31
# In the 44 years to 45 years (AD 1955-1956 years) Daylight Saving Time April 1 to September 30
# Republic of China 46 years to 48 years (AD 1957-1959) Summer Time April 1 to September 30
# Republic of China 49 years to 50 years (AD 1960-1961) Summer Time June 1 to September 30
# Republic of China 51 years to 62 years (AD 1962-1973 years) Stop Summer Time
# Republic of China 63 years to 64 years (1974-1975 AD) Daylight Saving Time April 1 to September 30
# Republic of China 65 years to 67 years (1976-1978 AD) Stop Daylight Saving Time
# Republic of China 68 years (AD 1979) Daylight Saving Time July 1 to September 30
# Republic of China since 69 years (AD 1980) Stop Daylight Saving Time
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Taiwan 1945 1951 - May 1 0:00 1:00 D
Rule Taiwan 1945 1951 - Oct 1 0:00 0 S
Rule Taiwan 1952 only - Mar 1 0:00 1:00 D
Rule Taiwan 1952 1954 - Nov 1 0:00 0 S
Rule Taiwan 1953 1959 - Apr 1 0:00 1:00 D
Rule Taiwan 1955 1961 - Oct 1 0:00 0 S
Rule Taiwan 1960 1961 - Jun 1 0:00 1:00 D
Rule Taiwan 1974 1975 - Apr 1 0:00 1:00 D
Rule Taiwan 1974 1975 - Oct 1 0:00 0 S
Rule Taiwan 1979 only - Jun 30 0:00 1:00 D
Rule Taiwan 1979 only - Sep 30 0:00 0 S
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Taipei 8:06:00 - LMT 1896 # or Taibei or T'ai-pei
8:00 Taiwan C%sT
# Macau (Macao, Aomen)
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Macau 1961 1962 - Mar Sun>=16 3:30 1:00 S
Rule Macau 1961 1964 - Nov Sun>=1 3:30 0 -
Rule Macau 1963 only - Mar Sun>=16 0:00 1:00 S
Rule Macau 1964 only - Mar Sun>=16 3:30 1:00 S
Rule Macau 1965 only - Mar Sun>=16 0:00 1:00 S
Rule Macau 1965 only - Oct 31 0:00 0 -
Rule Macau 1966 1971 - Apr Sun>=16 3:30 1:00 S
Rule Macau 1966 1971 - Oct Sun>=16 3:30 0 -
Rule Macau 1972 1974 - Apr Sun>=15 0:00 1:00 S
Rule Macau 1972 1973 - Oct Sun>=15 0:00 0 -
Rule Macau 1974 1977 - Oct Sun>=15 3:30 0 -
Rule Macau 1975 1977 - Apr Sun>=15 3:30 1:00 S
Rule Macau 1978 1980 - Apr Sun>=15 0:00 1:00 S
Rule Macau 1978 1980 - Oct Sun>=15 0:00 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Macau 7:34:20 - LMT 1912
8:00 Macau MO%sT 1999 Dec 20 # return to China
8:00 PRC C%sT
###############################################################################
# Cyprus
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Cyprus 1975 only - Apr 13 0:00 1:00 S
Rule Cyprus 1975 only - Oct 12 0:00 0 -
Rule Cyprus 1976 only - May 15 0:00 1:00 S
Rule Cyprus 1976 only - Oct 11 0:00 0 -
Rule Cyprus 1977 1980 - Apr Sun>=1 0:00 1:00 S
Rule Cyprus 1977 only - Sep 25 0:00 0 -
Rule Cyprus 1978 only - Oct 2 0:00 0 -
Rule Cyprus 1979 1997 - Sep lastSun 0:00 0 -
Rule Cyprus 1981 1998 - Mar lastSun 0:00 1:00 S
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Nicosia 2:13:28 - LMT 1921 Nov 14
2:00 Cyprus EE%sT 1998 Sep
2:00 EUAsia EE%sT
# IATA SSIM (1998-09) has Cyprus using EU rules for the first time.
# Classically, Cyprus belongs to Asia; e.g. see Herodotus, Histories, I.72.
# However, for various reasons many users expect to find it under Europe.
Link Asia/Nicosia Europe/Nicosia
# Georgia
# From Paul Eggert (1994-11-19):
# Today's _Economist_ (p 60) reports that Georgia moved its clocks forward
# an hour recently, due to a law proposed by Zurab Murvanidze,
# an MP who went on a hunger strike for 11 days to force discussion about it!
# We have no details, but we'll guess they didn't move the clocks back in fall.
#
# From Mathew Englander, quoting AP (1996-10-23 13:05-04):
# Instead of putting back clocks at the end of October, Georgia
# will stay on daylight savings time this winter to save energy,
# President Eduard Shevardnadze decreed Wednesday.
#
# From the BBC via Joseph S. Myers (2004-06-27):
#
# Georgia moved closer to Western Europe on Sunday... The former Soviet
# republic has changed its time zone back to that of Moscow. As a result it
# is now just four hours ahead of Greenwich Mean Time, rather than five hours
# ahead. The switch was decreed by the pro-Western president of Georgia,
# Mikhail Saakashvili, who said the change was partly prompted by the process
# of integration into Europe.
# From Teimuraz Abashidze (2005-11-07):
# Government of Georgia ... decided to NOT CHANGE daylight savings time on
# [Oct.] 30, as it was done before during last more than 10 years.
# Currently, we are in fact GMT +4:00, as before 30 October it was GMT
# +3:00.... The problem is, there is NO FORMAL LAW or governmental document
# about it. As far as I can find, I was told, that there is no document,
# because we just DIDN'T ISSUE document about switching to winter time....
# I don't know what can be done, especially knowing that some years ago our
# DST rules where changed THREE TIMES during one month.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Tbilisi 2:59:16 - LMT 1880
2:59:16 - TBMT 1924 May 2 # Tbilisi Mean Time
3:00 - TBIT 1957 Mar # Tbilisi Time
4:00 RussiaAsia TBI%sT 1991 Mar 31 2:00s
3:00 1:00 TBIST 1991 Apr 9 # independence
3:00 RussiaAsia GE%sT 1992 # Georgia Time
3:00 E-EurAsia GE%sT 1994 Sep lastSun
4:00 E-EurAsia GE%sT 1996 Oct lastSun
4:00 1:00 GEST 1997 Mar lastSun
4:00 E-EurAsia GE%sT 2004 Jun 27
3:00 RussiaAsia GE%sT 2005 Mar lastSun 2:00
4:00 - GET
# East Timor
# See Indonesia for the 1945 transition.
# From Joao Carrascalao, brother of the former governor of East Timor, in
# <a href="http://etan.org/et99c/december/26-31/30ETMAY.htm">
# East Timor may be late for its millennium
# </a> (1999-12-26/31):
# Portugal tried to change the time forward in 1974 because the sun
# rises too early but the suggestion raised a lot of problems with the
# Timorese and I still don't think it would work today because it
# conflicts with their way of life.
# From Paul Eggert (2000-12-04):
# We don't have any record of the above attempt.
# Most likely our records are incomplete, but we have no better data.
# <a href="http://www.hri.org/news/world/undh/last/00-08-16.undh.html">
# From Manoel de Almeida e Silva, Deputy Spokesman for the UN Secretary-General
# (2000-08-16)</a>:
# The Cabinet of the East Timor Transition Administration decided
# today to advance East Timor's time by one hour. The time change,
# which will be permanent, with no seasonal adjustment, will happen at
# midnight on Saturday, September 16.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Dili 8:22:20 - LMT 1912
8:00 - TLT 1942 Feb 21 23:00 # E Timor Time
9:00 - JST 1945 Sep 23
9:00 - TLT 1976 May 3
8:00 - CIT 2000 Sep 17 00:00
9:00 - TLT
# India
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Kolkata 5:53:28 - LMT 1880 # Kolkata
5:53:20 - HMT 1941 Oct # Howrah Mean Time?
6:30 - BURT 1942 May 15 # Burma Time
5:30 - IST 1942 Sep
5:30 1:00 IST 1945 Oct 15
5:30 - IST
# The following are like Asia/Kolkata:
# Andaman Is
# Lakshadweep (Laccadive, Minicoy and Amindivi Is)
# Nicobar Is
# Indonesia
#
# From Gwillim Law (2001-05-28), overriding Shanks & Pottenger:
# <http://www.sumatera-inc.com/go_to_invest/about_indonesia.asp#standtime>
# says that Indonesia's time zones changed on 1988-01-01. Looking at some
# time zone maps, I think that must refer to Western Borneo (Kalimantan Barat
# and Kalimantan Tengah) switching from UTC+8 to UTC+7.
#
# From Paul Eggert (2007-03-10):
# Here is another correction to Shanks & Pottenger.
# JohnTWB writes that Japanese forces did not surrender control in
# Indonesia until 1945-09-01 00:00 at the earliest (in Jakarta) and
# other formal surrender ceremonies were September 9, 11, and 13, plus
# September 12 for the regional surrender to Mountbatten in Singapore.
# These would be the earliest possible times for a change.
# Regimes horaires pour le monde entier, by Henri Le Corre, (Editions
# Traditionnelles, 1987, Paris) says that Java and Madura switched
# from JST to UTC+07:30 on 1945-09-23, and gives 1944-09-01 for Jayapura
# (Hollandia). For now, assume all Indonesian locations other than Jayapura
# switched on 1945-09-23.
#
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Jakarta 7:07:12 - LMT 1867 Aug 10
# Shanks & Pottenger say the next transition was at 1924 Jan 1 0:13,
# but this must be a typo.
7:07:12 - JMT 1923 Dec 31 23:47:12 # Jakarta
7:20 - JAVT 1932 Nov # Java Time
7:30 - WIT 1942 Mar 23
9:00 - JST 1945 Sep 23
7:30 - WIT 1948 May
8:00 - WIT 1950 May
7:30 - WIT 1964
7:00 - WIT
Zone Asia/Pontianak 7:17:20 - LMT 1908 May
7:17:20 - PMT 1932 Nov # Pontianak MT
7:30 - WIT 1942 Jan 29
9:00 - JST 1945 Sep 23
7:30 - WIT 1948 May
8:00 - WIT 1950 May
7:30 - WIT 1964
8:00 - CIT 1988 Jan 1
7:00 - WIT
Zone Asia/Makassar 7:57:36 - LMT 1920
7:57:36 - MMT 1932 Nov # Macassar MT
8:00 - CIT 1942 Feb 9
9:00 - JST 1945 Sep 23
8:00 - CIT
Zone Asia/Jayapura 9:22:48 - LMT 1932 Nov
9:00 - EIT 1944 Sep 1
9:30 - CST 1964
9:00 - EIT
# Iran
# From Roozbeh Pournader (2003-03-15):
# This is an English translation of what I just found (originally in Persian).
# The Gregorian dates in brackets are mine:
#
# Official Newspaper No. 13548-1370/6/25 [1991-09-16]
# No. 16760/T233 H 1370/6/10 [1991-09-01]
#
# The Rule About Change of the Official Time of the Country
#
# The Board of Ministers, in the meeting dated 1370/5/23 [1991-08-14],
# based on the suggestion number 2221/D dated 1370/4/22 [1991-07-13]
# of the Country's Organization for Official and Employment Affairs,
# and referring to the law for equating the working hours of workers
# and officers in the whole country dated 1359/4/23 [1980-07-14], and
# for synchronizing the official times of the country, agreed that:
#
# The official time of the country will should move forward one hour
# at the 24[:00] hours of the first day of Farvardin and should return
# to its previous state at the 24[:00] hours of the 30th day of
# Shahrivar.
#
# First Deputy to the President - Hassan Habibi
#
# From personal experience, that agrees with what has been followed
# for at least the last 5 years. Before that, for a few years, the
# date used was the first Thursday night of Farvardin and the last
# Thursday night of Shahrivar, but I can't give exact dates....
# I have also changed the abbreviations to what is considered correct
# here in Iran, IRST for regular time and IRDT for daylight saving time.
#
# From Roozbeh Pournader (2005-04-05):
# The text of the Iranian law, in effect since 1925, clearly mentions
# that the true solar year is the measure, and there is no arithmetic
# leap year calculation involved. There has never been any serious
# plan to change that law....
#
# From Paul Eggert (2006-03-22):
# Go with Shanks & Pottenger before Sept. 1991, and with Pournader thereafter.
# I used Ed Reingold's cal-persia in GNU Emacs 21.2 to check Persian dates,
# stopping after 2037 when 32-bit time_t's overflow.
# That cal-persia used Birashk's approximation, which disagrees with the solar
# calendar predictions for the year 2025, so I corrected those dates by hand.
#
# From Oscar van Vlijmen (2005-03-30), writing about future
# discrepancies between cal-persia and the Iranian calendar:
# For 2091 solar-longitude-after yields 2091-03-20 08:40:07.7 UT for
# the vernal equinox and that gets so close to 12:00 some local
# Iranian time that the definition of the correct location needs to be
# known exactly, amongst other factors. 2157 is even closer:
# 2157-03-20 08:37:15.5 UT. But the Gregorian year 2025 should give
# no interpretation problem whatsoever. By the way, another instant
# in the near future where there will be a discrepancy between
# arithmetical and astronomical Iranian calendars will be in 2058:
# vernal equinox on 2058-03-20 09:03:05.9 UT. The Java version of
# Reingold's/Dershowitz' calculator gives correctly the Gregorian date
# 2058-03-21 for 1 Farvardin 1437 (astronomical).
#
# From Steffen Thorsen (2006-03-22):
# Several of my users have reported that Iran will not observe DST anymore:
# http://www.irna.ir/en/news/view/line-17/0603193812164948.htm
#
# From Reuters (2007-09-16), with a heads-up from Jesper Norgaard Welen:
# ... the Guardian Council ... approved a law on Sunday to re-introduce
# daylight saving time ...
# http://uk.reuters.com/article/oilRpt/idUKBLA65048420070916
#
# From Roozbeh Pournader (2007-11-05):
# This is quoted from Official Gazette of the Islamic Republic of
# Iran, Volume 63, Number 18242, dated Tuesday 1386/6/24
# [2007-10-16]. I am doing the best translation I can:...
# The official time of the country will be moved forward for one hour
# on the 24 hours of the first day of the month of Farvardin and will
# be changed back to its previous state on the 24 hours of the
# thirtieth day of Shahrivar.
#
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Iran 1978 1980 - Mar 21 0:00 1:00 D
Rule Iran 1978 only - Oct 21 0:00 0 S
Rule Iran 1979 only - Sep 19 0:00 0 S
Rule Iran 1980 only - Sep 23 0:00 0 S
Rule Iran 1991 only - May 3 0:00 1:00 D
Rule Iran 1992 1995 - Mar 22 0:00 1:00 D
Rule Iran 1991 1995 - Sep 22 0:00 0 S
Rule Iran 1996 only - Mar 21 0:00 1:00 D
Rule Iran 1996 only - Sep 21 0:00 0 S
Rule Iran 1997 1999 - Mar 22 0:00 1:00 D
Rule Iran 1997 1999 - Sep 22 0:00 0 S
Rule Iran 2000 only - Mar 21 0:00 1:00 D
Rule Iran 2000 only - Sep 21 0:00 0 S
Rule Iran 2001 2003 - Mar 22 0:00 1:00 D
Rule Iran 2001 2003 - Sep 22 0:00 0 S
Rule Iran 2004 only - Mar 21 0:00 1:00 D
Rule Iran 2004 only - Sep 21 0:00 0 S
Rule Iran 2005 only - Mar 22 0:00 1:00 D
Rule Iran 2005 only - Sep 22 0:00 0 S
Rule Iran 2008 only - Mar 21 0:00 1:00 D
Rule Iran 2008 only - Sep 21 0:00 0 S
Rule Iran 2009 2011 - Mar 22 0:00 1:00 D
Rule Iran 2009 2011 - Sep 22 0:00 0 S
Rule Iran 2012 only - Mar 21 0:00 1:00 D
Rule Iran 2012 only - Sep 21 0:00 0 S
Rule Iran 2013 2015 - Mar 22 0:00 1:00 D
Rule Iran 2013 2015 - Sep 22 0:00 0 S
Rule Iran 2016 only - Mar 21 0:00 1:00 D
Rule Iran 2016 only - Sep 21 0:00 0 S
Rule Iran 2017 2019 - Mar 22 0:00 1:00 D
Rule Iran 2017 2019 - Sep 22 0:00 0 S
Rule Iran 2020 only - Mar 21 0:00 1:00 D
Rule Iran 2020 only - Sep 21 0:00 0 S
Rule Iran 2021 2023 - Mar 22 0:00 1:00 D
Rule Iran 2021 2023 - Sep 22 0:00 0 S
Rule Iran 2024 only - Mar 21 0:00 1:00 D
Rule Iran 2024 only - Sep 21 0:00 0 S
Rule Iran 2025 2027 - Mar 22 0:00 1:00 D
Rule Iran 2025 2027 - Sep 22 0:00 0 S
Rule Iran 2028 2029 - Mar 21 0:00 1:00 D
Rule Iran 2028 2029 - Sep 21 0:00 0 S
Rule Iran 2030 2031 - Mar 22 0:00 1:00 D
Rule Iran 2030 2031 - Sep 22 0:00 0 S
Rule Iran 2032 2033 - Mar 21 0:00 1:00 D
Rule Iran 2032 2033 - Sep 21 0:00 0 S
Rule Iran 2034 2035 - Mar 22 0:00 1:00 D
Rule Iran 2034 2035 - Sep 22 0:00 0 S
Rule Iran 2036 2037 - Mar 21 0:00 1:00 D
Rule Iran 2036 2037 - Sep 21 0:00 0 S
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Tehran 3:25:44 - LMT 1916
3:25:44 - TMT 1946 # Tehran Mean Time
3:30 - IRST 1977 Nov
4:00 Iran IR%sT 1979
3:30 Iran IR%sT
# Iraq
#
# From Jonathan Lennox (2000-06-12):
# An article in this week's Economist ("Inside the Saddam-free zone", p. 50 in
# the U.S. edition) on the Iraqi Kurds contains a paragraph:
# "The three northern provinces ... switched their clocks this spring and
# are an hour ahead of Baghdad."
#
# But Rives McDow (2000-06-18) quotes a contact in Iraqi-Kurdistan as follows:
# In the past, some Kurdish nationalists, as a protest to the Iraqi
# Government, did not adhere to daylight saving time. They referred
# to daylight saving as Saddam time. But, as of today, the time zone
# in Iraqi-Kurdistan is on standard time with Baghdad, Iraq.
#
# So we'll ignore the Economist's claim.
# From Steffen Thorsen (2008-03-10):
# The cabinet in Iraq abolished DST last week, according to the following
# news sources (in Arabic):
# <a href="http://www.aljeeran.net/wesima_articles/news-20080305-98602.html">
# http://www.aljeeran.net/wesima_articles/news-20080305-98602.html
# </a>
# <a href="http://www.aswataliraq.info/look/article.tpl?id=2047&IdLanguage=17&IdPublication=4&NrArticle=71743&NrIssue=1&NrSection=10">
# http://www.aswataliraq.info/look/article.tpl?id=2047&IdLanguage=17&IdPublication=4&NrArticle=71743&NrIssue=1&NrSection=10
# </a>
#
# We have published a short article in English about the change:
# <a href="http://www.timeanddate.com/news/time/iraq-dumps-daylight-saving.html">
# http://www.timeanddate.com/news/time/iraq-dumps-daylight-saving.html
# </a>
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Iraq 1982 only - May 1 0:00 1:00 D
Rule Iraq 1982 1984 - Oct 1 0:00 0 S
Rule Iraq 1983 only - Mar 31 0:00 1:00 D
Rule Iraq 1984 1985 - Apr 1 0:00 1:00 D
Rule Iraq 1985 1990 - Sep lastSun 1:00s 0 S
Rule Iraq 1986 1990 - Mar lastSun 1:00s 1:00 D
# IATA SSIM (1991/1996) says Apr 1 12:01am UTC; guess the `:01' is a typo.
# Shanks & Pottenger say Iraq did not observe DST 1992/1997; ignore this.
#
Rule Iraq 1991 2007 - Apr 1 3:00s 1:00 D
Rule Iraq 1991 2007 - Oct 1 3:00s 0 S
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Baghdad 2:57:40 - LMT 1890
2:57:36 - BMT 1918 # Baghdad Mean Time?
3:00 - AST 1982 May
3:00 Iraq A%sT
###############################################################################
# Israel
# From Ephraim Silverberg (2001-01-11):
#
# I coined "IST/IDT" circa 1988. Until then there were three
# different abbreviations in use:
#
# JST Jerusalem Standard Time [Danny Braniss, Hebrew University]
# IZT Israel Zonal (sic) Time [Prof. Haim Papo, Technion]
# EEST Eastern Europe Standard Time [used by almost everyone else]
#
# Since timezones should be called by country and not capital cities,
# I ruled out JST. As Israel is in Asia Minor and not Eastern Europe,
# EEST was equally unacceptable. Since "zonal" was not compatible with
# any other timezone abbreviation, I felt that 'IST' was the way to go
# and, indeed, it has received almost universal acceptance in timezone
# settings in Israeli computers.
#
# In any case, I am happy to share timezone abbreviations with India,
# high on my favorite-country list (and not only because my wife's
# family is from India).
# From Shanks & Pottenger:
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Zion 1940 only - Jun 1 0:00 1:00 D
Rule Zion 1942 1944 - Nov 1 0:00 0 S
Rule Zion 1943 only - Apr 1 2:00 1:00 D
Rule Zion 1944 only - Apr 1 0:00 1:00 D
Rule Zion 1945 only - Apr 16 0:00 1:00 D
Rule Zion 1945 only - Nov 1 2:00 0 S
Rule Zion 1946 only - Apr 16 2:00 1:00 D
Rule Zion 1946 only - Nov 1 0:00 0 S
Rule Zion 1948 only - May 23 0:00 2:00 DD
Rule Zion 1948 only - Sep 1 0:00 1:00 D
Rule Zion 1948 1949 - Nov 1 2:00 0 S
Rule Zion 1949 only - May 1 0:00 1:00 D
Rule Zion 1950 only - Apr 16 0:00 1:00 D
Rule Zion 1950 only - Sep 15 3:00 0 S
Rule Zion 1951 only - Apr 1 0:00 1:00 D
Rule Zion 1951 only - Nov 11 3:00 0 S
Rule Zion 1952 only - Apr 20 2:00 1:00 D
Rule Zion 1952 only - Oct 19 3:00 0 S
Rule Zion 1953 only - Apr 12 2:00 1:00 D
Rule Zion 1953 only - Sep 13 3:00 0 S
Rule Zion 1954 only - Jun 13 0:00 1:00 D
Rule Zion 1954 only - Sep 12 0:00 0 S
Rule Zion 1955 only - Jun 11 2:00 1:00 D
Rule Zion 1955 only - Sep 11 0:00 0 S
Rule Zion 1956 only - Jun 3 0:00 1:00 D
Rule Zion 1956 only - Sep 30 3:00 0 S
Rule Zion 1957 only - Apr 29 2:00 1:00 D
Rule Zion 1957 only - Sep 22 0:00 0 S
Rule Zion 1974 only - Jul 7 0:00 1:00 D
Rule Zion 1974 only - Oct 13 0:00 0 S
Rule Zion 1975 only - Apr 20 0:00 1:00 D
Rule Zion 1975 only - Aug 31 0:00 0 S
Rule Zion 1985 only - Apr 14 0:00 1:00 D
Rule Zion 1985 only - Sep 15 0:00 0 S
Rule Zion 1986 only - May 18 0:00 1:00 D
Rule Zion 1986 only - Sep 7 0:00 0 S
Rule Zion 1987 only - Apr 15 0:00 1:00 D
Rule Zion 1987 only - Sep 13 0:00 0 S
Rule Zion 1988 only - Apr 9 0:00 1:00 D
Rule Zion 1988 only - Sep 3 0:00 0 S
# From Ephraim Silverberg
# (1997-03-04, 1998-03-16, 1998-12-28, 2000-01-17, 2000-07-25, 2004-12-22,
# and 2005-02-17):
# According to the Office of the Secretary General of the Ministry of
# Interior, there is NO set rule for Daylight-Savings/Standard time changes.
# One thing is entrenched in law, however: that there must be at least 150
# days of daylight savings time annually. From 1993-1998, the change to
# daylight savings time was on a Friday morning from midnight IST to
# 1 a.m IDT; up until 1998, the change back to standard time was on a
# Saturday night from midnight daylight savings time to 11 p.m. standard
# time. 1996 is an exception to this rule where the change back to standard
# time took place on Sunday night instead of Saturday night to avoid
# conflicts with the Jewish New Year. In 1999, the change to
# daylight savings time was still on a Friday morning but from
# 2 a.m. IST to 3 a.m. IDT; furthermore, the change back to standard time
# was also on a Friday morning from 2 a.m. IDT to 1 a.m. IST for
# 1999 only. In the year 2000, the change to daylight savings time was
# similar to 1999, but although the change back will be on a Friday, it
# will take place from 1 a.m. IDT to midnight IST. Starting in 2001, all
# changes to/from will take place at 1 a.m. old time, but now there is no
# rule as to what day of the week it will take place in as the start date
# (except in 2003) is the night after the Passover Seder (i.e. the eve
# of the 16th of Nisan in the lunar Hebrew calendar) and the end date
# (except in 2002) is three nights before Yom Kippur [Day of Atonement]
# (the eve of the 7th of Tishrei in the lunar Hebrew calendar).
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Zion 1989 only - Apr 30 0:00 1:00 D
Rule Zion 1989 only - Sep 3 0:00 0 S
Rule Zion 1990 only - Mar 25 0:00 1:00 D
Rule Zion 1990 only - Aug 26 0:00 0 S
Rule Zion 1991 only - Mar 24 0:00 1:00 D
Rule Zion 1991 only - Sep 1 0:00 0 S
Rule Zion 1992 only - Mar 29 0:00 1:00 D
Rule Zion 1992 only - Sep 6 0:00 0 S
Rule Zion 1993 only - Apr 2 0:00 1:00 D
Rule Zion 1993 only - Sep 5 0:00 0 S
# The dates for 1994-1995 were obtained from Office of the Spokeswoman for the
# Ministry of Interior, Jerusalem, Israel. The spokeswoman can be reached by
# calling the office directly at 972-2-6701447 or 972-2-6701448.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Zion 1994 only - Apr 1 0:00 1:00 D
Rule Zion 1994 only - Aug 28 0:00 0 S
Rule Zion 1995 only - Mar 31 0:00 1:00 D
Rule Zion 1995 only - Sep 3 0:00 0 S
# The dates for 1996 were determined by the Minister of Interior of the
# time, Haim Ramon. The official announcement regarding 1996-1998
# (with the dates for 1997-1998 no longer being relevant) can be viewed at:
#
# ftp://ftp.cs.huji.ac.il/pub/tz/announcements/1996-1998.ramon.ps.gz
#
# The dates for 1997-1998 were altered by his successor, Rabbi Eli Suissa.
#
# The official announcements for the years 1997-1999 can be viewed at:
#
# ftp://ftp.cs.huji.ac.il/pub/tz/announcements/YYYY.ps.gz
#
# where YYYY is the relevant year.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Zion 1996 only - Mar 15 0:00 1:00 D
Rule Zion 1996 only - Sep 16 0:00 0 S
Rule Zion 1997 only - Mar 21 0:00 1:00 D
Rule Zion 1997 only - Sep 14 0:00 0 S
Rule Zion 1998 only - Mar 20 0:00 1:00 D
Rule Zion 1998 only - Sep 6 0:00 0 S
Rule Zion 1999 only - Apr 2 2:00 1:00 D
Rule Zion 1999 only - Sep 3 2:00 0 S
# The Knesset Interior Committee has changed the dates for 2000 for
# the third time in just over a year and have set new dates for the
# years 2001-2004 as well.
#
# The official announcement for the start date of 2000 can be viewed at:
#
# ftp://ftp.cs.huji.ac.il/pub/tz/announcements/2000-start.ps.gz
#
# The official announcement for the end date of 2000 and the dates
# for the years 2001-2004 can be viewed at:
#
# ftp://ftp.cs.huji.ac.il/pub/tz/announcements/2000-2004.ps.gz
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Zion 2000 only - Apr 14 2:00 1:00 D
Rule Zion 2000 only - Oct 6 1:00 0 S
Rule Zion 2001 only - Apr 9 1:00 1:00 D
Rule Zion 2001 only - Sep 24 1:00 0 S
Rule Zion 2002 only - Mar 29 1:00 1:00 D
Rule Zion 2002 only - Oct 7 1:00 0 S
Rule Zion 2003 only - Mar 28 1:00 1:00 D
Rule Zion 2003 only - Oct 3 1:00 0 S
Rule Zion 2004 only - Apr 7 1:00 1:00 D
Rule Zion 2004 only - Sep 22 1:00 0 S
# The proposed law agreed upon by the Knesset Interior Committee on
# 2005-02-14 is that, for 2005 and beyond, DST starts at 02:00 the
# last Friday before April 2nd (i.e. the last Friday in March or April
# 1st itself if it falls on a Friday) and ends at 02:00 on the Saturday
# night _before_ the fast of Yom Kippur.
#
# Those who can read Hebrew can view the announcement at:
#
# ftp://ftp.cs.huji.ac.il/pub/tz/announcements/2005+beyond.ps
# From Paul Eggert (2012-10-26):
# I used Ephraim Silverberg's dst-israel.el program
# <ftp://ftp.cs.huji.ac.il/pub/tz/software/dst-israel.el> (2005-02-20)
# along with Ed Reingold's cal-hebrew in GNU Emacs 21.4,
# to generate the transitions from 2005 through 2012.
# (I replaced "lastFri" with "Fri>=26" by hand.)
# The spring transitions all correspond to the following Rule:
#
# Rule Zion 2005 2012 - Mar Fri>=26 2:00 1:00 D
#
# but older zic implementations (e.g., Solaris 8) do not support
# "Fri>=26" to mean April 1 in years like 2005, so for now we list the
# springtime transitions explicitly.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Zion 2005 only - Apr 1 2:00 1:00 D
Rule Zion 2005 only - Oct 9 2:00 0 S
Rule Zion 2006 2010 - Mar Fri>=26 2:00 1:00 D
Rule Zion 2006 only - Oct 1 2:00 0 S
Rule Zion 2007 only - Sep 16 2:00 0 S
Rule Zion 2008 only - Oct 5 2:00 0 S
Rule Zion 2009 only - Sep 27 2:00 0 S
Rule Zion 2010 only - Sep 12 2:00 0 S
Rule Zion 2011 only - Apr 1 2:00 1:00 D
Rule Zion 2011 only - Oct 2 2:00 0 S
Rule Zion 2012 only - Mar Fri>=26 2:00 1:00 D
Rule Zion 2012 only - Sep 23 2:00 0 S
# From Ephraim Silverberg (2012-10-18):
# Yesterday, the Interior Ministry Committee, after more than a year
# past, approved sending the proposed June 2011 changes to the Time
# Decree Law back to the Knesset for second and third (final) votes
# before the upcoming elections on Jan. 22, 2013. Hence, although the
# changes are not yet law, they are expected to be so before February 2013.
#
# As of 2013, DST starts at 02:00 on the Friday before the last Sunday in March.
# DST ends at 02:00 on the first Sunday after October 1, unless it occurs on the
# second day of the Jewish Rosh Hashana holiday, in which case DST ends a day
# later (i.e. at 02:00 the first Monday after October 2).
# [Rosh Hashana holidays are factored in until 2100.]
# From Ephraim Silverberg (2012-11-05):
# The Knesset passed today (in second and final readings) the amendment to the
# Time Decree Law making the changes ... law.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Zion 2013 max - Mar Fri>=23 2:00 1:00 D
Rule Zion 2013 2026 - Oct Sun>=2 2:00 0 S
Rule Zion 2027 only - Oct Mon>=3 2:00 0 S
Rule Zion 2028 max - Oct Sun>=2 2:00 0 S
# The following rules are commented out for now, as they break older
# versions of zic that support only signed 32-bit timestamps, i.e.,
# through 2038-01-19 03:14:07 UTC.
#Rule Zion 2028 2053 - Oct Sun>=2 2:00 0 S
#Rule Zion 2054 only - Oct Mon>=3 2:00 0 S
#Rule Zion 2055 2080 - Oct Sun>=2 2:00 0 S
#Rule Zion 2081 only - Oct Mon>=3 2:00 0 S
#Rule Zion 2082 max - Oct Sun>=2 2:00 0 S
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Jerusalem 2:20:56 - LMT 1880
2:20:40 - JMT 1918 # Jerusalem Mean Time?
2:00 Zion I%sT
###############################################################################
# Japan
# `9:00' and `JST' is from Guy Harris.
# From Paul Eggert (1995-03-06):
# Today's _Asahi Evening News_ (page 4) reports that Japan had
# daylight saving between 1948 and 1951, but ``the system was discontinued
# because the public believed it would lead to longer working hours.''
# From Mayumi Negishi in the 2005-08-10 Japan Times
# <http://www.japantimes.co.jp/cgi-bin/getarticle.pl5?nn20050810f2.htm>:
# Occupation authorities imposed daylight-saving time on Japan on
# [1948-05-01].... But lack of prior debate and the execution of
# daylight-saving time just three days after the bill was passed generated
# deep hatred of the concept.... The Diet unceremoniously passed a bill to
# dump the unpopular system in October 1951, less than a month after the San
# Francisco Peace Treaty was signed. (A government poll in 1951 showed 53%
# of the Japanese wanted to scrap daylight-saving time, as opposed to 30% who
# wanted to keep it.)
# From Paul Eggert (2006-03-22):
# Shanks & Pottenger write that DST in Japan during those years was as follows:
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Japan 1948 only - May Sun>=1 2:00 1:00 D
Rule Japan 1948 1951 - Sep Sat>=8 2:00 0 S
Rule Japan 1949 only - Apr Sun>=1 2:00 1:00 D
Rule Japan 1950 1951 - May Sun>=1 2:00 1:00 D
# but the only locations using it (for birth certificates, presumably, since
# their audience is astrologers) were US military bases. For now, assume
# that for most purposes daylight-saving time was observed; otherwise, what
# would have been the point of the 1951 poll?
# From Hideyuki Suzuki (1998-11-09):
# 'Tokyo' usually stands for the former location of Tokyo Astronomical
# Observatory: E 139 44' 40".90 (9h 18m 58s.727), N 35 39' 16".0.
# This data is from 'Rika Nenpyou (Chronological Scientific Tables) 1996'
# edited by National Astronomical Observatory of Japan....
# JST (Japan Standard Time) has been used since 1888-01-01 00:00 (JST).
# The law is enacted on 1886-07-07.
# From Hideyuki Suzuki (1998-11-16):
# The ordinance No. 51 (1886) established "standard time" in Japan,
# which stands for the time on E 135 degree.
# In the ordinance No. 167 (1895), "standard time" was renamed to "central
# standard time". And the same ordinance also established "western standard
# time", which stands for the time on E 120 degree.... But "western standard
# time" was abolished in the ordinance No. 529 (1937). In the ordinance No.
# 167, there is no mention regarding for what place western standard time is
# standard....
#
# I wrote "ordinance" above, but I don't know how to translate.
# In Japanese it's "chokurei", which means ordinance from emperor.
# Shanks & Pottenger claim JST in use since 1896, and that a few
# places (e.g. Ishigaki) use +0800; go with Suzuki. Guess that all
# ordinances took effect on Jan 1.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Tokyo 9:18:59 - LMT 1887 Dec 31 15:00u
9:00 - JST 1896
9:00 - CJT 1938
9:00 Japan J%sT
# Since 1938, all Japanese possessions have been like Asia/Tokyo.
# Jordan
#
# From <a href="http://star.arabia.com/990701/JO9.html">
# Jordan Week (1999-07-01) </a> via Steffen Thorsen (1999-09-09):
# Clocks in Jordan were forwarded one hour on Wednesday at midnight,
# in accordance with the government's decision to implement summer time
# all year round.
#
# From <a href="http://star.arabia.com/990930/JO9.html">
# Jordan Week (1999-09-30) </a> via Steffen Thorsen (1999-11-09):
# Winter time starts today Thursday, 30 September. Clocks will be turned back
# by one hour. This is the latest government decision and it's final!
# The decision was taken because of the increase in working hours in
# government's departments from six to seven hours.
#
# From Paul Eggert (2005-11-22):
# Starting 2003 transitions are from Steffen Thorsen's web site timeanddate.com.
#
# From Steffen Thorsen (2005-11-23):
# For Jordan I have received multiple independent user reports every year
# about DST end dates, as the end-rule is different every year.
#
# From Steffen Thorsen (2006-10-01), after a heads-up from Hilal Malawi:
# http://www.petranews.gov.jo/nepras/2006/Sep/05/4000.htm
# "Jordan will switch to winter time on Friday, October 27".
#
# From Phil Pizzey (2009-04-02):
# ...I think I may have spotted an error in the timezone data for
# Jordan.
# The current (2009d) asia file shows Jordan going to daylight
# saving
# time on the last Thursday in March.
#
# Rule Jordan 2000 max - Mar lastThu 0:00s 1:00 S
#
# However timeanddate.com, which I usually find reliable, shows Jordan
# going to daylight saving time on the last Friday in March since 2002.
# Please see
# <a href="http://www.timeanddate.com/worldclock/timezone.html?n=11">
# http://www.timeanddate.com/worldclock/timezone.html?n=11
# </a>
# From Steffen Thorsen (2009-04-02):
# This single one might be good enough, (2009-03-24, Arabic):
# <a href="http://petra.gov.jo/Artical.aspx?Lng=2&Section=8&Artical=95279">
# http://petra.gov.jo/Artical.aspx?Lng=2&Section=8&Artical=95279
# </a>
#
# Google's translation:
#
# > The Council of Ministers decided in 2002 to adopt the principle of timely
# > submission of the summer at 60 minutes as of midnight on the last Thursday
# > of the month of March of each year.
#
# So - this means the midnight between Thursday and Friday since 2002.
# From Arthur David Olson (2009-04-06):
# We still have Jordan switching to DST on Thursdays in 2000 and 2001.
# From Steffen Thorsen (2012-10-25):
# Yesterday the government in Jordan announced that they will not
# switch back to standard time this winter, so the will stay on DST
# until about the same time next year (at least).
# http://www.petra.gov.jo/Public_News/Nws_NewsDetails.aspx?NewsID=88950
#
# From Paul Eggert (2012-10-25):
# For now, assume this is just a one-year measure. If it becomes
# permanent, we should move Jordan from EET to AST effective tomorrow.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Jordan 1973 only - Jun 6 0:00 1:00 S
Rule Jordan 1973 1975 - Oct 1 0:00 0 -
Rule Jordan 1974 1977 - May 1 0:00 1:00 S
Rule Jordan 1976 only - Nov 1 0:00 0 -
Rule Jordan 1977 only - Oct 1 0:00 0 -
Rule Jordan 1978 only - Apr 30 0:00 1:00 S
Rule Jordan 1978 only - Sep 30 0:00 0 -
Rule Jordan 1985 only - Apr 1 0:00 1:00 S
Rule Jordan 1985 only - Oct 1 0:00 0 -
Rule Jordan 1986 1988 - Apr Fri>=1 0:00 1:00 S
Rule Jordan 1986 1990 - Oct Fri>=1 0:00 0 -
Rule Jordan 1989 only - May 8 0:00 1:00 S
Rule Jordan 1990 only - Apr 27 0:00 1:00 S
Rule Jordan 1991 only - Apr 17 0:00 1:00 S
Rule Jordan 1991 only - Sep 27 0:00 0 -
Rule Jordan 1992 only - Apr 10 0:00 1:00 S
Rule Jordan 1992 1993 - Oct Fri>=1 0:00 0 -
Rule Jordan 1993 1998 - Apr Fri>=1 0:00 1:00 S
Rule Jordan 1994 only - Sep Fri>=15 0:00 0 -
Rule Jordan 1995 1998 - Sep Fri>=15 0:00s 0 -
Rule Jordan 1999 only - Jul 1 0:00s 1:00 S
Rule Jordan 1999 2002 - Sep lastFri 0:00s 0 -
Rule Jordan 2000 2001 - Mar lastThu 0:00s 1:00 S
Rule Jordan 2002 max - Mar lastThu 24:00 1:00 S
Rule Jordan 2003 only - Oct 24 0:00s 0 -
Rule Jordan 2004 only - Oct 15 0:00s 0 -
Rule Jordan 2005 only - Sep lastFri 0:00s 0 -
Rule Jordan 2006 2011 - Oct lastFri 0:00s 0 -
Rule Jordan 2013 max - Oct lastFri 0:00s 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Amman 2:23:44 - LMT 1931
2:00 Jordan EE%sT
# Kazakhstan
# From Paul Eggert (1996-11-22):
# Andrew Evtichov (1996-04-13) writes that Kazakhstan
# stayed in sync with Moscow after 1990, and that Aqtobe (formerly Aktyubinsk)
# and Aqtau (formerly Shevchenko) are the largest cities in their zones.
# Guess that Aqtau and Aqtobe diverged in 1995, since that's the first time
# IATA SSIM mentions a third time zone in Kazakhstan.
# From Paul Eggert (2006-03-22):
# German Iofis, ELSI, Almaty (2001-10-09) reports that Kazakhstan uses
# RussiaAsia rules, instead of switching at 00:00 as the IATA has it.
# Go with Shanks & Pottenger, who have them always using RussiaAsia rules.
# Also go with the following claims of Shanks & Pottenger:
#
# - Kazakhstan did not observe DST in 1991.
# - Qyzylorda switched from +5:00 to +6:00 on 1992-01-19 02:00.
# - Oral switched from +5:00 to +4:00 in spring 1989.
# <a href="http://www.kazsociety.org.uk/news/2005/03/30.htm">
# From Kazakhstan Embassy's News Bulletin #11 (2005-03-21):
# </a>
# The Government of Kazakhstan passed a resolution March 15 abolishing
# daylight saving time citing lack of economic benefits and health
# complications coupled with a decrease in productivity.
#
# From Branislav Kojic (in Astana) via Gwillim Law (2005-06-28):
# ... what happened was that the former Kazakhstan Eastern time zone
# was "blended" with the Central zone. Therefore, Kazakhstan now has
# two time zones, and difference between them is one hour. The zone
# closer to UTC is the former Western zone (probably still called the
# same), encompassing four provinces in the west: Aqtobe, Atyrau,
# Mangghystau, and West Kazakhstan. The other zone encompasses
# everything else.... I guess that would make Kazakhstan time zones
# de jure UTC+5 and UTC+6 respectively.
#
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
#
# Almaty (formerly Alma-Ata), representing most locations in Kazakhstan
Zone Asia/Almaty 5:07:48 - LMT 1924 May 2 # or Alma-Ata
5:00 - ALMT 1930 Jun 21 # Alma-Ata Time
6:00 RussiaAsia ALM%sT 1991
6:00 - ALMT 1992
6:00 RussiaAsia ALM%sT 2005 Mar 15
6:00 - ALMT
# Qyzylorda (aka Kyzylorda, Kizilorda, Kzyl-Orda, etc.)
Zone Asia/Qyzylorda 4:21:52 - LMT 1924 May 2
4:00 - KIZT 1930 Jun 21 # Kizilorda Time
5:00 - KIZT 1981 Apr 1
5:00 1:00 KIZST 1981 Oct 1
6:00 - KIZT 1982 Apr 1
5:00 RussiaAsia KIZ%sT 1991
5:00 - KIZT 1991 Dec 16 # independence
5:00 - QYZT 1992 Jan 19 2:00
6:00 RussiaAsia QYZ%sT 2005 Mar 15
6:00 - QYZT
# Aqtobe (aka Aktobe, formerly Akt'ubinsk)
Zone Asia/Aqtobe 3:48:40 - LMT 1924 May 2
4:00 - AKTT 1930 Jun 21 # Aktyubinsk Time
5:00 - AKTT 1981 Apr 1
5:00 1:00 AKTST 1981 Oct 1
6:00 - AKTT 1982 Apr 1
5:00 RussiaAsia AKT%sT 1991
5:00 - AKTT 1991 Dec 16 # independence
5:00 RussiaAsia AQT%sT 2005 Mar 15 # Aqtobe Time
5:00 - AQTT
# Mangghystau
# Aqtau was not founded until 1963, but it represents an inhabited region,
# so include time stamps before 1963.
Zone Asia/Aqtau 3:21:04 - LMT 1924 May 2
4:00 - FORT 1930 Jun 21 # Fort Shevchenko T
5:00 - FORT 1963
5:00 - SHET 1981 Oct 1 # Shevchenko Time
6:00 - SHET 1982 Apr 1
5:00 RussiaAsia SHE%sT 1991
5:00 - SHET 1991 Dec 16 # independence
5:00 RussiaAsia AQT%sT 1995 Mar lastSun 2:00 # Aqtau Time
4:00 RussiaAsia AQT%sT 2005 Mar 15
5:00 - AQTT
# West Kazakhstan
Zone Asia/Oral 3:25:24 - LMT 1924 May 2 # or Ural'sk
4:00 - URAT 1930 Jun 21 # Ural'sk time
5:00 - URAT 1981 Apr 1
5:00 1:00 URAST 1981 Oct 1
6:00 - URAT 1982 Apr 1
5:00 RussiaAsia URA%sT 1989 Mar 26 2:00
4:00 RussiaAsia URA%sT 1991
4:00 - URAT 1991 Dec 16 # independence
4:00 RussiaAsia ORA%sT 2005 Mar 15 # Oral Time
5:00 - ORAT
# Kyrgyzstan (Kirgizstan)
# Transitions through 1991 are from Shanks & Pottenger.
# From Paul Eggert (2005-08-15):
# According to an article dated today in the Kyrgyzstan Development Gateway
# <http://eng.gateway.kg/cgi-bin/page.pl?id=1&story_name=doc9979.shtml>
# Kyrgyzstan is canceling the daylight saving time system. I take the article
# to mean that they will leave their clocks at 6 hours ahead of UTC.
# From Malik Abdugaliev (2005-09-21):
# Our government cancels daylight saving time 6th of August 2005.
# From 2005-08-12 our GMT-offset is +6, w/o any daylight saving.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Kyrgyz 1992 1996 - Apr Sun>=7 0:00s 1:00 S
Rule Kyrgyz 1992 1996 - Sep lastSun 0:00 0 -
Rule Kyrgyz 1997 2005 - Mar lastSun 2:30 1:00 S
Rule Kyrgyz 1997 2004 - Oct lastSun 2:30 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Bishkek 4:58:24 - LMT 1924 May 2
5:00 - FRUT 1930 Jun 21 # Frunze Time
6:00 RussiaAsia FRU%sT 1991 Mar 31 2:00s
5:00 1:00 FRUST 1991 Aug 31 2:00 # independence
5:00 Kyrgyz KG%sT 2005 Aug 12 # Kyrgyzstan Time
6:00 - KGT
###############################################################################
# Korea (North and South)
# From Annie I. Bang (2006-07-10) in
# <http://www.koreaherald.co.kr/SITE/data/html_dir/2006/07/10/200607100012.asp>:
# The Ministry of Commerce, Industry and Energy has already
# commissioned a research project [to reintroduce DST] and has said
# the system may begin as early as 2008.... Korea ran a daylight
# saving program from 1949-61 but stopped it during the 1950-53 Korean War.
# From Shanks & Pottenger:
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule ROK 1960 only - May 15 0:00 1:00 D
Rule ROK 1960 only - Sep 13 0:00 0 S
Rule ROK 1987 1988 - May Sun>=8 0:00 1:00 D
Rule ROK 1987 1988 - Oct Sun>=8 0:00 0 S
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Seoul 8:27:52 - LMT 1890
8:30 - KST 1904 Dec
9:00 - KST 1928
8:30 - KST 1932
9:00 - KST 1954 Mar 21
8:00 ROK K%sT 1961 Aug 10
8:30 - KST 1968 Oct
9:00 ROK K%sT
Zone Asia/Pyongyang 8:23:00 - LMT 1890
8:30 - KST 1904 Dec
9:00 - KST 1928
8:30 - KST 1932
9:00 - KST 1954 Mar 21
8:00 - KST 1961 Aug 10
9:00 - KST
###############################################################################
# Kuwait
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
# From the Arab Times (2007-03-14):
# The Civil Service Commission (CSC) has approved a proposal forwarded
# by MP Ahmad Baqer on implementing the daylight saving time (DST) in
# Kuwait starting from April until the end of Sept this year, reports Al-Anba.
# <http://www.arabtimesonline.com/arabtimes/kuwait/Viewdet.asp?ID=9950>.
# From Paul Eggert (2007-03-29):
# We don't know the details, or whether the approval means it'll happen,
# so for now we assume no DST.
Zone Asia/Kuwait 3:11:56 - LMT 1950
3:00 - AST
# Laos
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Vientiane 6:50:24 - LMT 1906 Jun 9 # or Viangchan
7:06:20 - SMT 1911 Mar 11 0:01 # Saigon MT?
7:00 - ICT 1912 May
8:00 - ICT 1931 May
7:00 - ICT
# Lebanon
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Lebanon 1920 only - Mar 28 0:00 1:00 S
Rule Lebanon 1920 only - Oct 25 0:00 0 -
Rule Lebanon 1921 only - Apr 3 0:00 1:00 S
Rule Lebanon 1921 only - Oct 3 0:00 0 -
Rule Lebanon 1922 only - Mar 26 0:00 1:00 S
Rule Lebanon 1922 only - Oct 8 0:00 0 -
Rule Lebanon 1923 only - Apr 22 0:00 1:00 S
Rule Lebanon 1923 only - Sep 16 0:00 0 -
Rule Lebanon 1957 1961 - May 1 0:00 1:00 S
Rule Lebanon 1957 1961 - Oct 1 0:00 0 -
Rule Lebanon 1972 only - Jun 22 0:00 1:00 S
Rule Lebanon 1972 1977 - Oct 1 0:00 0 -
Rule Lebanon 1973 1977 - May 1 0:00 1:00 S
Rule Lebanon 1978 only - Apr 30 0:00 1:00 S
Rule Lebanon 1978 only - Sep 30 0:00 0 -
Rule Lebanon 1984 1987 - May 1 0:00 1:00 S
Rule Lebanon 1984 1991 - Oct 16 0:00 0 -
Rule Lebanon 1988 only - Jun 1 0:00 1:00 S
Rule Lebanon 1989 only - May 10 0:00 1:00 S
Rule Lebanon 1990 1992 - May 1 0:00 1:00 S
Rule Lebanon 1992 only - Oct 4 0:00 0 -
Rule Lebanon 1993 max - Mar lastSun 0:00 1:00 S
Rule Lebanon 1993 1998 - Sep lastSun 0:00 0 -
Rule Lebanon 1999 max - Oct lastSun 0:00 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Beirut 2:22:00 - LMT 1880
2:00 Lebanon EE%sT
# Malaysia
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule NBorneo 1935 1941 - Sep 14 0:00 0:20 TS # one-Third Summer
Rule NBorneo 1935 1941 - Dec 14 0:00 0 -
#
# peninsular Malaysia
# The data here are taken from Mok Ly Yng (2003-10-30)
# <http://www.math.nus.edu.sg/aslaksen/teaching/timezone.html>.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Kuala_Lumpur 6:46:46 - LMT 1901 Jan 1
6:55:25 - SMT 1905 Jun 1 # Singapore M.T.
7:00 - MALT 1933 Jan 1 # Malaya Time
7:00 0:20 MALST 1936 Jan 1
7:20 - MALT 1941 Sep 1
7:30 - MALT 1942 Feb 16
9:00 - JST 1945 Sep 12
7:30 - MALT 1982 Jan 1
8:00 - MYT # Malaysia Time
# Sabah & Sarawak
# From Paul Eggert (2006-03-22):
# The data here are mostly from Shanks & Pottenger, but the 1942, 1945 and 1982
# transition dates are from Mok Ly Yng.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Kuching 7:21:20 - LMT 1926 Mar
7:30 - BORT 1933 # Borneo Time
8:00 NBorneo BOR%sT 1942 Feb 16
9:00 - JST 1945 Sep 12
8:00 - BORT 1982 Jan 1
8:00 - MYT
# Maldives
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Indian/Maldives 4:54:00 - LMT 1880 # Male
4:54:00 - MMT 1960 # Male Mean Time
5:00 - MVT # Maldives Time
# Mongolia
# Shanks & Pottenger say that Mongolia has three time zones, but
# usno1995 and the CIA map Standard Time Zones of the World (2005-03)
# both say that it has just one.
# From Oscar van Vlijmen (1999-12-11):
# <a href="http://www.mongoliatourism.gov.mn/general.htm">
# General Information Mongolia
# </a> (1999-09)
# "Time: Mongolia has two time zones. Three westernmost provinces of
# Bayan-Ulgii, Uvs, and Hovd are one hour earlier than the capital city, and
# the rest of the country follows the Ulaanbaatar time, which is UTC/GMT plus
# eight hours."
# From Rives McDow (1999-12-13):
# Mongolia discontinued the use of daylight savings time in 1999; 1998
# being the last year it was implemented. The dates of implementation I am
# unsure of, but most probably it was similar to Russia, except for the time
# of implementation may have been different....
# Some maps in the past have indicated that there was an additional time
# zone in the eastern part of Mongolia, including the provinces of Dornod,
# Suhbaatar, and possibly Khentij.
# From Paul Eggert (1999-12-15):
# Naming and spelling is tricky in Mongolia.
# We'll use Hovd (also spelled Chovd and Khovd) to represent the west zone;
# the capital of the Hovd province is sometimes called Hovd, sometimes Dund-Us,
# and sometimes Jirgalanta (with variant spellings), but the name Hovd
# is good enough for our purposes.
# From Rives McDow (2001-05-13):
# In addition to Mongolia starting daylight savings as reported earlier
# (adopted DST on 2001-04-27 02:00 local time, ending 2001-09-28),
# there are three time zones.
#
# Provinces [at 7:00]: Bayan-ulgii, Uvs, Khovd, Zavkhan, Govi-Altai
# Provinces [at 8:00]: Khovsgol, Bulgan, Arkhangai, Khentii, Tov,
# Bayankhongor, Ovorkhangai, Dundgovi, Dornogovi, Omnogovi
# Provinces [at 9:00]: Dornod, Sukhbaatar
#
# [The province of Selenge is omitted from the above lists.]
# From Ganbold Ts., Ulaanbaatar (2004-04-17):
# Daylight saving occurs at 02:00 local time last Saturday of March.
# It will change back to normal at 02:00 local time last Saturday of
# September.... As I remember this rule was changed in 2001.
#
# From Paul Eggert (2004-04-17):
# For now, assume Rives McDow's informant got confused about Friday vs
# Saturday, and that his 2001 dates should have 1 added to them.
# From Paul Eggert (2005-07-26):
# We have wildly conflicting information about Mongolia's time zones.
# Bill Bonnet (2005-05-19) reports that the US Embassy in Ulaanbaatar says
# there is only one time zone and that DST is observed, citing Microsoft
# Windows XP as the source. Risto Nykanen (2005-05-16) reports that
# travelmongolia.org says there are two time zones (UTC+7, UTC+8) with no DST.
# Oscar van Vlijmen (2005-05-20) reports that the Mongolian Embassy in
# Washington, DC says there are two time zones, with DST observed.
# He also found
# <http://ubpost.mongolnews.mn/index.php?subaction=showcomments&id=1111634894&archive=&start_from=&ucat=1&>
# which also says that there is DST, and which has a comment by "Toddius"
# (2005-03-31 06:05 +0700) saying "Mongolia actually has 3.5 time zones.
# The West (OLGII) is +7 GMT, most of the country is ULAT is +8 GMT
# and some Eastern provinces are +9 GMT but Sukhbaatar Aimag is SUHK +8.5 GMT.
# The SUKH timezone is new this year, it is one of the few things the
# parliament passed during the tumultuous winter session."
# For now, let's ignore this information, until we have more confirmation.
# From Ganbold Ts. (2007-02-26):
# Parliament of Mongolia has just changed the daylight-saving rule in February.
# They decided not to adopt daylight-saving time....
# http://www.mongolnews.mn/index.php?module=unuudur&sec=view&id=15742
# From Deborah Goldsmith (2008-03-30):
# We received a bug report claiming that the tz database UTC offset for
# Asia/Choibalsan (GMT+09:00) is incorrect, and that it should be GMT
# +08:00 instead. Different sources appear to disagree with the tz
# database on this, e.g.:
#
# <a href="http://www.timeanddate.com/worldclock/city.html?n=1026">
# http://www.timeanddate.com/worldclock/city.html?n=1026
# </a>
# <a href="http://www.worldtimeserver.com/current_time_in_MN.aspx">
# http://www.worldtimeserver.com/current_time_in_MN.aspx
# </a>
#
# both say GMT+08:00.
# From Steffen Thorsen (2008-03-31):
# eznis airways, which operates several domestic flights, has a flight
# schedule here:
# <a href="http://www.eznis.com/Container.jsp?id=112">
# http://www.eznis.com/Container.jsp?id=112
# </a>
# (click the English flag for English)
#
# There it appears that flights between Choibalsan and Ulaanbatar arrive
# about 1:35 - 1:50 hours later in local clock time, no matter the
# direction, while Ulaanbaatar-Khvod takes 2 hours in the Eastern
# direction and 3:35 back, which indicates that Ulaanbatar and Khvod are
# in different time zones (like we know about), while Choibalsan and
# Ulaanbatar are in the same time zone (correction needed).
# From Arthur David Olson (2008-05-19):
# Assume that Choibalsan is indeed offset by 8:00.
# XXX--in the absence of better information, assume that transition
# was at the start of 2008-03-31 (the day of Steffen Thorsen's report);
# this is almost surely wrong.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Mongol 1983 1984 - Apr 1 0:00 1:00 S
Rule Mongol 1983 only - Oct 1 0:00 0 -
# Shanks & Pottenger and IATA SSIM say 1990s switches occurred at 00:00,
# but McDow says the 2001 switches occurred at 02:00. Also, IATA SSIM
# (1996-09) says 1996-10-25. Go with Shanks & Pottenger through 1998.
#
# Shanks & Pottenger say that the Sept. 1984 through Sept. 1990 switches
# in Choibalsan (more precisely, in Dornod and Sukhbaatar) took place
# at 02:00 standard time, not at 00:00 local time as in the rest of
# the country. That would be odd, and possibly is a result of their
# correction of 02:00 (in the previous edition) not being done correctly
# in the latest edition; so ignore it for now.
Rule Mongol 1985 1998 - Mar lastSun 0:00 1:00 S
Rule Mongol 1984 1998 - Sep lastSun 0:00 0 -
# IATA SSIM (1999-09) says Mongolia no longer observes DST.
Rule Mongol 2001 only - Apr lastSat 2:00 1:00 S
Rule Mongol 2001 2006 - Sep lastSat 2:00 0 -
Rule Mongol 2002 2006 - Mar lastSat 2:00 1:00 S
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
# Hovd, a.k.a. Chovd, Dund-Us, Dzhargalant, Khovd, Jirgalanta
Zone Asia/Hovd 6:06:36 - LMT 1905 Aug
6:00 - HOVT 1978 # Hovd Time
7:00 Mongol HOV%sT
# Ulaanbaatar, a.k.a. Ulan Bataar, Ulan Bator, Urga
Zone Asia/Ulaanbaatar 7:07:32 - LMT 1905 Aug
7:00 - ULAT 1978 # Ulaanbaatar Time
8:00 Mongol ULA%sT
# Choibalsan, a.k.a. Bajan Tuemen, Bajan Tumen, Chojbalsan,
# Choybalsan, Sanbejse, Tchoibalsan
Zone Asia/Choibalsan 7:38:00 - LMT 1905 Aug
7:00 - ULAT 1978
8:00 - ULAT 1983 Apr
9:00 Mongol CHO%sT 2008 Mar 31 # Choibalsan Time
8:00 Mongol CHO%sT
# Nepal
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Kathmandu 5:41:16 - LMT 1920
5:30 - IST 1986
5:45 - NPT # Nepal Time
# Oman
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Muscat 3:54:20 - LMT 1920
4:00 - GST
# Pakistan
# From Rives McDow (2002-03-13):
# I have been advised that Pakistan has decided to adopt dst on a
# TRIAL basis for one year, starting 00:01 local time on April 7, 2002
# and ending at 00:01 local time October 6, 2002. This is what I was
# told, but I believe that the actual time of change may be 00:00; the
# 00:01 was to make it clear which day it was on.
# From Paul Eggert (2002-03-15):
# Jesper Norgaard found this URL:
# http://www.pak.gov.pk/public/news/app/app06_dec.htm
# (dated 2001-12-06) which says that the Cabinet adopted a scheme "to
# advance the clocks by one hour on the night between the first
# Saturday and Sunday of April and revert to the original position on
# 15th October each year". This agrees with McDow's 04-07 at 00:00,
# but disagrees about the October transition, and makes it sound like
# it's not on a trial basis. Also, the "between the first Saturday
# and Sunday of April" phrase, if taken literally, means that the
# transition takes place at 00:00 on the first Sunday on or after 04-02.
# From Paul Eggert (2003-02-09):
# DAWN <http://www.dawn.com/2002/10/06/top13.htm> reported on 2002-10-05
# that 2002 DST ended that day at midnight. Go with McDow for now.
# From Steffen Thorsen (2003-03-14):
# According to http://www.dawn.com/2003/03/07/top15.htm
# there will be no DST in Pakistan this year:
#
# ISLAMABAD, March 6: Information and Media Development Minister Sheikh
# Rashid Ahmed on Thursday said the cabinet had reversed a previous
# decision to advance clocks by one hour in summer and put them back by
# one hour in winter with the aim of saving light hours and energy.
#
# The minister told a news conference that the experiment had rather
# shown 8 per cent higher consumption of electricity.
# From Alex Krivenyshev (2008-05-15):
#
# Here is an article that Pakistan plan to introduce Daylight Saving Time
# on June 1, 2008 for 3 months.
#
# "... The federal cabinet on Wednesday announced a new conservation plan to help
# reduce load shedding by approving the closure of commercial centres at 9pm and
# moving clocks forward by one hour for the next three months.
# ...."
#
# <a href="http://www.worldtimezone.net/dst_news/dst_news_pakistan01.html">
# http://www.worldtimezone.net/dst_news/dst_news_pakistan01.html
# </a>
# OR
# <a href="http://www.dailytimes.com.pk/default.asp?page=2008%5C05%5C15%5Cstory_15-5-2008_pg1_4">
# http://www.dailytimes.com.pk/default.asp?page=2008%5C05%5C15%5Cstory_15-5-2008_pg1_4
# </a>
# From Arthur David Olson (2008-05-19):
# XXX--midnight transitions is a guess; 2008 only is a guess.
# From Alexander Krivenyshev (2008-08-28):
# Pakistan government has decided to keep the watches one-hour advanced
# for another 2 months--plan to return to Standard Time on October 31
# instead of August 31.
#
# <a href="http://www.worldtimezone.com/dst_news/dst_news_pakistan02.html">
# http://www.worldtimezone.com/dst_news/dst_news_pakistan02.html
# </a>
# OR
# <a href="http://dailymailnews.com/200808/28/news/dmbrn03.html">
# http://dailymailnews.com/200808/28/news/dmbrn03.html
# </a>
# From Alexander Krivenyshev (2009-04-08):
# Based on previous media reports that "... proposed plan to
# advance clocks by one hour from May 1 will cause disturbance
# to the working schedules rather than bringing discipline in
# official working."
# <a href="http://www.thenews.com.pk/daily_detail.asp?id=171280">
# http://www.thenews.com.pk/daily_detail.asp?id=171280
# </a>
#
# recent news that instead of May 2009 - Pakistan plan to
# introduce DST from April 15, 2009
#
# FYI: Associated Press Of Pakistan
# April 08, 2009
# Cabinet okays proposal to advance clocks by one hour from April 15
# <a href="http://www.app.com.pk/en_/index.php?option=com_content&task=view&id=73043&Itemid=1">
# http://www.app.com.pk/en_/index.php?option=com_content&task=view&id=73043&Itemid=1
# </a>
#
# or
#
# <a href="http://www.worldtimezone.com/dst_news/dst_news_pakistan05.html">
# http://www.worldtimezone.com/dst_news/dst_news_pakistan05.html
# </a>
#
# ....
# The Federal Cabinet on Wednesday approved the proposal to
# advance clocks in the country by one hour from April 15 to
# conserve energy"
# From Steffen Thorsen (2009-09-17):
# "The News International," Pakistan reports that: "The Federal
# Government has decided to restore the previous time by moving the
# clocks backward by one hour from October 1. A formal announcement to
# this effect will be made after the Prime Minister grants approval in
# this regard."
# <a href="http://www.thenews.com.pk/updates.asp?id=87168">
# http://www.thenews.com.pk/updates.asp?id=87168
# </a>
# From Alexander Krivenyshev (2009-09-28):
# According to Associated Press Of Pakistan, it is confirmed that
# Pakistan clocks across the country would be turned back by an hour from October
# 1, 2009.
#
# "Clocks to go back one hour from 1 Oct"
# <a href="http://www.app.com.pk/en_/index.php?option=com_content&task=view&id=86715&Itemid=2">
# http://www.app.com.pk/en_/index.php?option=com_content&task=view&id=86715&Itemid=2
# </a>
# or
# <a href="http://www.worldtimezone.com/dst_news/dst_news_pakistan07.htm">
# http://www.worldtimezone.com/dst_news/dst_news_pakistan07.htm
# </a>
# From Steffen Thorsen (2009-09-29):
# Alexander Krivenyshev wrote:
# > According to Associated Press Of Pakistan, it is confirmed that
# > Pakistan clocks across the country would be turned back by an hour from October
# > 1, 2009.
#
# Now they seem to have changed their mind, November 1 is the new date:
# <a href="http://www.thenews.com.pk/top_story_detail.asp?Id=24742">
# http://www.thenews.com.pk/top_story_detail.asp?Id=24742
# </a>
# "The country's clocks will be reversed by one hour on November 1.
# Officials of Federal Ministry for Interior told this to Geo News on
# Monday."
#
# And more importantly, it seems that these dates will be kept every year:
# "It has now been decided that clocks will be wound forward by one hour
# on April 15 and reversed by an hour on November 1 every year without
# obtaining prior approval, the officials added."
#
# We have confirmed this year's end date with both with the Ministry of
# Water and Power and the Pakistan Electric Power Company:
# <a href="http://www.timeanddate.com/news/time/pakistan-ends-dst09.html">
# http://www.timeanddate.com/news/time/pakistan-ends-dst09.html
# </a>
# From Christoph Goehre (2009-10-01):
# [T]he German Consulate General in Karachi reported me today that Pakistan
# will go back to standard time on 1st of November.
# From Steffen Thorsen (2010-03-26):
# Steffen Thorsen wrote:
# > On Thursday (2010-03-25) it was announced that DST would start in
# > Pakistan on 2010-04-01.
# >
# > Then today, the president said that they might have to revert the
# > decision if it is not supported by the parliament. So at the time
# > being, it seems unclear if DST will be actually observed or not - but
# > April 1 could be a more likely date than April 15.
# Now, it seems that the decision to not observe DST in final:
#
# "Govt Withdraws Plan To Advance Clocks"
# <a href="http://www.apakistannews.com/govt-withdraws-plan-to-advance-clocks-172041">
# http://www.apakistannews.com/govt-withdraws-plan-to-advance-clocks-172041
# </a>
#
# "People laud PM's announcement to end DST"
# <a href="http://www.app.com.pk/en_/index.php?option=com_content&task=view&id=99374&Itemid=2">
# http://www.app.com.pk/en_/index.php?option=com_content&task=view&id=99374&Itemid=2
# </a>
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Pakistan 2002 only - Apr Sun>=2 0:01 1:00 S
Rule Pakistan 2002 only - Oct Sun>=2 0:01 0 -
Rule Pakistan 2008 only - Jun 1 0:00 1:00 S
Rule Pakistan 2008 only - Nov 1 0:00 0 -
Rule Pakistan 2009 only - Apr 15 0:00 1:00 S
Rule Pakistan 2009 only - Nov 1 0:00 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Karachi 4:28:12 - LMT 1907
5:30 - IST 1942 Sep
5:30 1:00 IST 1945 Oct 15
5:30 - IST 1951 Sep 30
5:00 - KART 1971 Mar 26 # Karachi Time
5:00 Pakistan PK%sT # Pakistan Time
# Palestine
# From Amos Shapir (1998-02-15):
#
# From 1917 until 1948-05-15, all of Palestine, including the parts now
# known as the Gaza Strip and the West Bank, was under British rule.
# Therefore the rules given for Israel for that period, apply there too...
#
# The Gaza Strip was under Egyptian rule between 1948-05-15 until 1967-06-05
# (except a short occupation by Israel from 1956-11 till 1957-03, but no
# time zone was affected then). It was never formally annexed to Egypt,
# though.
#
# The rest of Palestine was under Jordanian rule at that time, formally
# annexed in 1950 as the West Bank (and the word "Trans" was dropped from
# the country's previous name of "the Hashemite Kingdom of the
# Trans-Jordan"). So the rules for Jordan for that time apply. Major
# towns in that area are Nablus (Shchem), El-Halil (Hebron), Ramallah, and
# East Jerusalem.
#
# Both areas were occupied by Israel in June 1967, but not annexed (except
# for East Jerusalem). They were on Israel time since then; there might
# have been a Military Governor's order about time zones, but I'm not aware
# of any (such orders may have been issued semi-annually whenever summer
# time was in effect, but maybe the legal aspect of time was just neglected).
#
# The Palestinian Authority was established in 1993, and got hold of most
# towns in the West Bank and Gaza by 1995. I know that in order to
# demonstrate...independence, they have been switching to
# summer time and back on a different schedule than Israel's, but I don't
# know when this was started, or what algorithm is used (most likely the
# Jordanian one).
#
# To summarize, the table should probably look something like that:
#
# Area \ when | 1918-1947 | 1948-1967 | 1967-1995 | 1996-
# ------------+-----------+-----------+-----------+-----------
# Israel | Zion | Zion | Zion | Zion
# West bank | Zion | Jordan | Zion | Jordan
# Gaza | Zion | Egypt | Zion | Jordan
#
# I guess more info may be available from the PA's web page (if/when they
# have one).
# From Paul Eggert (2006-03-22):
# Shanks & Pottenger write that Gaza did not observe DST until 1957, but go
# with Shapir and assume that it observed DST from 1940 through 1947,
# and that it used Jordanian rules starting in 1996.
# We don't yet need a separate entry for the West Bank, since
# the only differences between it and Gaza that we know about
# occurred before our cutoff date of 1970.
# However, as we get more information, we may need to add entries
# for parts of the West Bank as they transitioned from Israel's rules
# to Palestine's rules.
# From IINS News Service - Israel - 1998-03-23 10:38:07 Israel time,
# forwarded by Ephraim Silverberg:
#
# Despite the fact that Israel changed over to daylight savings time
# last week, the PLO Authority (PA) has decided not to turn its clocks
# one-hour forward at this time. As a sign of independence from Israeli rule,
# the PA has decided to implement DST in April.
# From Paul Eggert (1999-09-20):
# Daoud Kuttab writes in
# <a href="http://www.jpost.com/com/Archive/22.Apr.1999/Opinion/Article-2.html">
# Holiday havoc
# </a> (Jerusalem Post, 1999-04-22) that
# the Palestinian National Authority changed to DST on 1999-04-15.
# I vaguely recall that they switch back in October (sorry, forgot the source).
# For now, let's assume that the spring switch was at 24:00,
# and that they switch at 0:00 on the 3rd Fridays of April and October.
# From Paul Eggert (2005-11-22):
# Starting 2004 transitions are from Steffen Thorsen's web site timeanddate.com.
# From Steffen Thorsen (2005-11-23):
# A user from Gaza reported that Gaza made the change early because of
# the Ramadan. Next year Ramadan will be even earlier, so I think
# there is a good chance next year's end date will be around two weeks
# earlier--the same goes for Jordan.
# From Steffen Thorsen (2006-08-17):
# I was informed by a user in Bethlehem that in Bethlehem it started the
# same day as Israel, and after checking with other users in the area, I
# was informed that they started DST one day after Israel. I was not
# able to find any authoritative sources at the time, nor details if
# Gaza changed as well, but presumed Gaza to follow the same rules as
# the West Bank.
# From Steffen Thorsen (2006-09-26):
# according to the Palestine News Network (2006-09-19):
# http://english.pnn.ps/index.php?option=com_content&task=view&id=596&Itemid=5
# > The Council of Ministers announced that this year its winter schedule
# > will begin early, as of midnight Thursday. It is also time to turn
# > back the clocks for winter. Friday will begin an hour late this week.
# I guess it is likely that next year's date will be moved as well,
# because of the Ramadan.
# From Jesper Norgaard Welen (2007-09-18):
# According to Steffen Thorsen's web site the Gaza Strip and the rest of the
# Palestinian territories left DST early on 13.th. of September at 2:00.
# From Paul Eggert (2007-09-20):
# My understanding is that Gaza and the West Bank disagree even over when
# the weekend is (Thursday+Friday versus Friday+Saturday), so I'd be a bit
# surprised if they agreed about DST. But for now, assume they agree.
# For lack of better information, predict that future changes will be
# the 2nd Thursday of September at 02:00.
# From Alexander Krivenyshev (2008-08-28):
# Here is an article, that Mideast running on different clocks at Ramadan.
#
# Gaza Strip (as Egypt) ended DST at midnight Thursday (Aug 28, 2008), while
# the West Bank will end Daylight Saving Time at midnight Sunday (Aug 31, 2008).
#
# <a href="http://www.guardian.co.uk/world/feedarticle/7759001">
# http://www.guardian.co.uk/world/feedarticle/7759001
# </a>
# <a href="http://www.abcnews.go.com/International/wireStory?id=5676087">
# http://www.abcnews.go.com/International/wireStory?id=5676087
# </a>
# or
# <a href="http://www.worldtimezone.com/dst_news/dst_news_gazastrip01.html">
# http://www.worldtimezone.com/dst_news/dst_news_gazastrip01.html
# </a>
# From Alexander Krivenyshev (2009-03-26):
# According to the Palestine News Network (arabic.pnn.ps), Palestinian
# government decided to start Daylight Time on Thursday night March
# 26 and continue until the night of 27 September 2009.
#
# (in Arabic)
# <a href="http://arabic.pnn.ps/index.php?option=com_content&task=view&id=50850">
# http://arabic.pnn.ps/index.php?option=com_content&task=view&id=50850
# </a>
#
# or
# (English translation)
# <a href="http://www.worldtimezone.com/dst_news/dst_news_westbank01.html">
# http://www.worldtimezone.com/dst_news/dst_news_westbank01.html
# </a>
# From Steffen Thorsen (2009-08-31):
# Palestine's Council of Ministers announced that they will revert back to
# winter time on Friday, 2009-09-04.
#
# One news source:
# <a href="http://www.safa.ps/ara/?action=showdetail&seid=4158">
# http://www.safa.ps/ara/?action=showdetail&seid=4158
# </a>
# (Palestinian press agency, Arabic),
# Google translate: "Decided that the Palestinian government in Ramallah
# headed by Salam Fayyad, the start of work in time for the winter of
# 2009, starting on Friday approved the fourth delay Sept. clock sixty
# minutes per hour as of Friday morning."
#
# We are not sure if Gaza will do the same, last year they had a different
# end date, we will keep this page updated:
# <a href="http://www.timeanddate.com/news/time/westbank-gaza-dst-2009.html">
# http://www.timeanddate.com/news/time/westbank-gaza-dst-2009.html
# </a>
# From Alexander Krivenyshev (2009-09-02):
# Seems that Gaza Strip will go back to Winter Time same date as West Bank.
#
# According to Palestinian Ministry Of Interior, West Bank and Gaza Strip plan
# to change time back to Standard time on September 4, 2009.
#
# "Winter time unite the West Bank and Gaza"
# (from Palestinian National Authority):
# <a href="http://www.moi.gov.ps/en/?page=633167343250594025&nid=11505
# http://www.moi.gov.ps/en/?page=633167343250594025&nid=11505
# </a>
# or
# <a href="http://www.worldtimezone.com/dst_news/dst_news_gazastrip02.html>
# http://www.worldtimezone.com/dst_news/dst_news_gazastrip02.html
# </a>
# From Alexander Krivenyshev (2010-03-19):
# According to Voice of Palestine DST will last for 191 days, from March
# 26, 2010 till "the last Sunday before the tenth day of Tishri
# (October), each year" (October 03, 2010?)
#
# <a href="http://palvoice.org/forums/showthread.php?t=245697">
# http://palvoice.org/forums/showthread.php?t=245697
# </a>
# (in Arabic)
# or
# <a href="http://www.worldtimezone.com/dst_news/dst_news_westbank03.html">
# http://www.worldtimezone.com/dst_news/dst_news_westbank03.html
# </a>
# From Steffen Thorsen (2010-03-24):
# ...Ma'an News Agency reports that Hamas cabinet has decided it will
# start one day later, at 12:01am. Not sure if they really mean 12:01am or
# noon though:
#
# <a href="http://www.maannews.net/eng/ViewDetails.aspx?ID=271178">
# http://www.maannews.net/eng/ViewDetails.aspx?ID=271178
# </a>
# (Ma'an News Agency)
# "At 12:01am Friday, clocks in Israel and the West Bank will change to
# 1:01am, while Gaza clocks will change at 12:01am Saturday morning."
# From Steffen Thorsen (2010-08-11):
# According to several sources, including
# <a href="http://www.maannews.net/eng/ViewDetails.aspx?ID=306795">
# http://www.maannews.net/eng/ViewDetails.aspx?ID=306795
# </a>
# the clocks were set back one hour at 2010-08-11 00:00:00 local time in
# Gaza and the West Bank.
# Some more background info:
# <a href="http://www.timeanddate.com/news/time/westbank-gaza-end-dst-2010.html">
# http://www.timeanddate.com/news/time/westbank-gaza-end-dst-2010.html
# </a>
# From Steffen Thorsen (2011-08-26):
# Gaza and the West Bank did go back to standard time in the beginning of
# August, and will now enter daylight saving time again on 2011-08-30
# 00:00 (so two periods of DST in 2011). The pause was because of
# Ramadan.
#
# <a href="http://www.maannews.net/eng/ViewDetails.aspx?ID=416217">
# http://www.maannews.net/eng/ViewDetails.aspx?ID=416217
# </a>
# Additional info:
# <a href="http://www.timeanddate.com/news/time/palestine-dst-2011.html">
# http://www.timeanddate.com/news/time/palestine-dst-2011.html
# </a>
# From Alexander Krivenyshev (2011-08-27):
# According to the article in The Jerusalem Post:
# "...Earlier this month, the Palestinian government in the West Bank decided to
# move to standard time for 30 days, during Ramadan. The Palestinians in the
# Gaza Strip accepted the change and also moved their clocks one hour back.
# The Hamas government said on Saturday that it won't observe summertime after
# the Muslim feast of Id al-Fitr, which begins on Tuesday..."
# ...
# <a href="http://www.jpost.com/MiddleEast/Article.aspx?id=235650">
# http://www.jpost.com/MiddleEast/Article.aspx?id=235650
# </a>
# or
# <a href="http://www.worldtimezone.com/dst_news/dst_news_gazastrip05.html">
# http://www.worldtimezone.com/dst_news/dst_news_gazastrip05.html
# </a>
# The rules for Egypt are stolen from the `africa' file.
# From Steffen Thorsen (2011-09-30):
# West Bank did end Daylight Saving Time this morning/midnight (2011-09-30
# 00:00).
# So West Bank and Gaza now have the same time again.
#
# Many sources, including:
# <a href="http://www.maannews.net/eng/ViewDetails.aspx?ID=424808">
# http://www.maannews.net/eng/ViewDetails.aspx?ID=424808
# </a>
# From Steffen Thorsen (2012-03-26):
# Palestinian news sources tell that both Gaza and West Bank will start DST
# on Friday (Thursday midnight, 2012-03-29 24:00).
# Some of many sources in Arabic:
# <a href="http://www.samanews.com/index.php?act=Show&id=122638">
# http://www.samanews.com/index.php?act=Show&id=122638
# </a>
#
# <a href="http://safa.ps/details/news/74352/%D8%A8%D8%AF%D8%A1-%D8%A7%D9%84%D8%AA%D9%88%D9%82%D9%8A%D8%AA-%D8%A7%D9%84%D8%B5%D9%8A%D9%81%D9%8A-%D8%A8%D8%A7%D9%84%D8%B6%D9%81%D8%A9-%D9%88%D8%BA%D8%B2%D8%A9-%D9%84%D9%8A%D9%84%D8%A9-%D8%A7%D9%84%D8%AC%D9%85%D8%B9%D8%A9.html">
# http://safa.ps/details/news/74352/%D8%A8%D8%AF%D8%A1-%D8%A7%D9%84%D8%AA%D9%88%D9%82%D9%8A%D8%AA-%D8%A7%D9%84%D8%B5%D9%8A%D9%81%D9%8A-%D8%A8%D8%A7%D9%84%D8%B6%D9%81%D8%A9-%D9%88%D8%BA%D8%B2%D8%A9-%D9%84%D9%8A%D9%84%D8%A9-%D8%A7%D9%84%D8%AC%D9%85%D8%B9%D8%A9.html
# </a>
#
# Our brief summary:
# <a href="http://www.timeanddate.com/news/time/gaza-west-bank-dst-2012.html">
# http://www.timeanddate.com/news/time/gaza-west-bank-dst-2012.html
# </a>
# From Arthur David Olson (2012-03-27):
# The timeanddate article for 2012 says that "the end date has not yet been
# announced" and that "Last year, both...paused daylight saving time during...
# Ramadan. It is not yet known [for] 2012."
# For now, assume both switch back on the last Friday in September. XXX
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule EgyptAsia 1957 only - May 10 0:00 1:00 S
Rule EgyptAsia 1957 1958 - Oct 1 0:00 0 -
Rule EgyptAsia 1958 only - May 1 0:00 1:00 S
Rule EgyptAsia 1959 1967 - May 1 1:00 1:00 S
Rule EgyptAsia 1959 1965 - Sep 30 3:00 0 -
Rule EgyptAsia 1966 only - Oct 1 3:00 0 -
Rule Palestine 1999 2005 - Apr Fri>=15 0:00 1:00 S
Rule Palestine 1999 2003 - Oct Fri>=15 0:00 0 -
Rule Palestine 2004 only - Oct 1 1:00 0 -
Rule Palestine 2005 only - Oct 4 2:00 0 -
Rule Palestine 2006 2008 - Apr 1 0:00 1:00 S
Rule Palestine 2006 only - Sep 22 0:00 0 -
Rule Palestine 2007 only - Sep Thu>=8 2:00 0 -
Rule Palestine 2008 only - Aug lastFri 0:00 0 -
Rule Palestine 2009 only - Mar lastFri 0:00 1:00 S
Rule Palestine 2009 only - Sep Fri>=1 2:00 0 -
Rule Palestine 2010 only - Mar lastSat 0:01 1:00 S
Rule Palestine 2010 only - Aug 11 0:00 0 -
# From Arthur David Olson (2011-09-20):
# 2011 transitions per http://www.timeanddate.com as of 2011-09-20.
# From Paul Eggert (2012-10-12):
# 2012 transitions per http://www.timeanddate.com as of 2012-10-12.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Gaza 2:17:52 - LMT 1900 Oct
2:00 Zion EET 1948 May 15
2:00 EgyptAsia EE%sT 1967 Jun 5
2:00 Zion I%sT 1996
2:00 Jordan EE%sT 1999
2:00 Palestine EE%sT 2011 Apr 2 12:01
2:00 1:00 EEST 2011 Aug 1
2:00 - EET 2012 Mar 30
2:00 1:00 EEST 2012 Sep 21 1:00
2:00 - EET
Zone Asia/Hebron 2:20:23 - LMT 1900 Oct
2:00 Zion EET 1948 May 15
2:00 EgyptAsia EE%sT 1967 Jun 5
2:00 Zion I%sT 1996
2:00 Jordan EE%sT 1999
2:00 Palestine EE%sT 2008 Aug
2:00 1:00 EEST 2008 Sep
2:00 Palestine EE%sT 2011 Apr 1 12:01
2:00 1:00 EEST 2011 Aug 1
2:00 - EET 2011 Aug 30
2:00 1:00 EEST 2011 Sep 30 3:00
2:00 - EET 2012 Mar 30
2:00 1:00 EEST 2012 Sep 21 1:00
2:00 - EET
# Paracel Is
# no information
# Philippines
# On 1844-08-16, Narciso Claveria, governor-general of the
# Philippines, issued a proclamation announcing that 1844-12-30 was to
# be immediately followed by 1845-01-01. Robert H. van Gent has a
# transcript of the decree in <http://www.phys.uu.nl/~vgent/idl/idl.htm>.
# The rest of the data are from Shanks & Pottenger.
# From Paul Eggert (2006-04-25):
# Tomorrow's Manila Standard reports that the Philippines Department of
# Trade and Industry is considering adopting DST this June when the
# rainy season begins. See
# <http://www.manilastandardtoday.com/?page=politics02_april26_2006>.
# For now, we'll ignore this, since it's not definite and we lack details.
#
# From Jesper Norgaard Welen (2006-04-26):
# ... claims that Philippines had DST last time in 1990:
# http://story.philippinetimes.com/p.x/ct/9/id/145be20cc6b121c0/cid/3e5bbccc730d258c/
# [a story dated 2006-04-25 by Cris Larano of Dow Jones Newswires,
# but no details]
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Phil 1936 only - Nov 1 0:00 1:00 S
Rule Phil 1937 only - Feb 1 0:00 0 -
Rule Phil 1954 only - Apr 12 0:00 1:00 S
Rule Phil 1954 only - Jul 1 0:00 0 -
Rule Phil 1978 only - Mar 22 0:00 1:00 S
Rule Phil 1978 only - Sep 21 0:00 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Manila -15:56:00 - LMT 1844 Dec 31
8:04:00 - LMT 1899 May 11
8:00 Phil PH%sT 1942 May
9:00 - JST 1944 Nov
8:00 Phil PH%sT
# Qatar
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Qatar 3:26:08 - LMT 1920 # Al Dawhah / Doha
4:00 - GST 1972 Jun
3:00 - AST
# Saudi Arabia
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Riyadh 3:06:52 - LMT 1950
3:00 - AST
# Singapore
# The data here are taken from Mok Ly Yng (2003-10-30)
# <http://www.math.nus.edu.sg/aslaksen/teaching/timezone.html>.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Singapore 6:55:25 - LMT 1901 Jan 1
6:55:25 - SMT 1905 Jun 1 # Singapore M.T.
7:00 - MALT 1933 Jan 1 # Malaya Time
7:00 0:20 MALST 1936 Jan 1
7:20 - MALT 1941 Sep 1
7:30 - MALT 1942 Feb 16
9:00 - JST 1945 Sep 12
7:30 - MALT 1965 Aug 9 # independence
7:30 - SGT 1982 Jan 1 # Singapore Time
8:00 - SGT
# Spratly Is
# no information
# Sri Lanka
# From Paul Eggert (1996-09-03):
# "Sri Lanka advances clock by an hour to avoid blackout"
# (www.virtual-pc.com/lankaweb/news/items/240596-2.html, 1996-05-24,
# no longer available as of 1999-08-17)
# reported ``the country's standard time will be put forward by one hour at
# midnight Friday (1830 GMT) `in the light of the present power crisis'.''
#
# From Dharmasiri Senanayake, Sri Lanka Media Minister (1996-10-24), as quoted
# by Shamindra in
# <a href="news:54rka5$m5h@mtinsc01-mgt.ops.worldnet.att.net">
# Daily News - Hot News Section (1996-10-26)
# </a>:
# With effect from 12.30 a.m. on 26th October 1996
# Sri Lanka will be six (06) hours ahead of GMT.
# From Jesper Norgaard Welen (2006-04-14), quoting Sri Lanka News Online
# <http://news.sinhalaya.com/wmview.php?ArtID=11002> (2006-04-13):
# 0030 hrs on April 15, 2006 (midnight of April 14, 2006 +30 minutes)
# at present, become 2400 hours of April 14, 2006 (midnight of April 14, 2006).
# From Peter Apps and Ranga Sirila of Reuters (2006-04-12) in:
# <http://today.reuters.co.uk/news/newsArticle.aspx?type=scienceNews&storyID=2006-04-12T172228Z_01_COL295762_RTRIDST_0_SCIENCE-SRILANKA-TIME-DC.XML>
# [The Tamil Tigers] never accepted the original 1996 time change and simply
# kept their clocks set five and a half hours ahead of Greenwich Mean
# Time (GMT), in line with neighbor India.
# From Paul Eggert (2006-04-18):
# People who live in regions under Tamil control can use [TZ='Asia/Kolkata'],
# as that zone has agreed with the Tamil areas since our cutoff date of 1970.
# From K Sethu (2006-04-25):
# I think the abbreviation LKT originated from the world of computers at
# the time of or subsequent to the time zone changes by SL Government
# twice in 1996 and probably SL Government or its standardization
# agencies never declared an abbreviation as a national standard.
#
# I recollect before the recent change the government annoucemments
# mentioning it as simply changing Sri Lanka Standard Time or Sri Lanka
# Time and no mention was made about the abbreviation.
#
# If we look at Sri Lanka Department of Government's "Official News
# Website of Sri Lanka" ... http://www.news.lk/ we can see that they
# use SLT as abbreviation in time stamp at the beginning of each news
# item....
#
# Within Sri Lanka I think LKT is well known among computer users and
# adminsitrators. In my opinion SLT may not be a good choice because the
# nation's largest telcom / internet operator Sri Lanka Telcom is well
# known by that abbreviation - simply as SLT (there IP domains are
# slt.lk and sltnet.lk).
#
# But if indeed our government has adopted SLT as standard abbreviation
# (that we have not known so far) then it is better that it be used for
# all computers.
# From Paul Eggert (2006-04-25):
# One possibility is that we wait for a bit for the dust to settle down
# and then see what people actually say in practice.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Colombo 5:19:24 - LMT 1880
5:19:32 - MMT 1906 # Moratuwa Mean Time
5:30 - IST 1942 Jan 5
5:30 0:30 IHST 1942 Sep
5:30 1:00 IST 1945 Oct 16 2:00
5:30 - IST 1996 May 25 0:00
6:30 - LKT 1996 Oct 26 0:30
6:00 - LKT 2006 Apr 15 0:30
5:30 - IST
# Syria
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Syria 1920 1923 - Apr Sun>=15 2:00 1:00 S
Rule Syria 1920 1923 - Oct Sun>=1 2:00 0 -
Rule Syria 1962 only - Apr 29 2:00 1:00 S
Rule Syria 1962 only - Oct 1 2:00 0 -
Rule Syria 1963 1965 - May 1 2:00 1:00 S
Rule Syria 1963 only - Sep 30 2:00 0 -
Rule Syria 1964 only - Oct 1 2:00 0 -
Rule Syria 1965 only - Sep 30 2:00 0 -
Rule Syria 1966 only - Apr 24 2:00 1:00 S
Rule Syria 1966 1976 - Oct 1 2:00 0 -
Rule Syria 1967 1978 - May 1 2:00 1:00 S
Rule Syria 1977 1978 - Sep 1 2:00 0 -
Rule Syria 1983 1984 - Apr 9 2:00 1:00 S
Rule Syria 1983 1984 - Oct 1 2:00 0 -
Rule Syria 1986 only - Feb 16 2:00 1:00 S
Rule Syria 1986 only - Oct 9 2:00 0 -
Rule Syria 1987 only - Mar 1 2:00 1:00 S
Rule Syria 1987 1988 - Oct 31 2:00 0 -
Rule Syria 1988 only - Mar 15 2:00 1:00 S
Rule Syria 1989 only - Mar 31 2:00 1:00 S
Rule Syria 1989 only - Oct 1 2:00 0 -
Rule Syria 1990 only - Apr 1 2:00 1:00 S
Rule Syria 1990 only - Sep 30 2:00 0 -
Rule Syria 1991 only - Apr 1 0:00 1:00 S
Rule Syria 1991 1992 - Oct 1 0:00 0 -
Rule Syria 1992 only - Apr 8 0:00 1:00 S
Rule Syria 1993 only - Mar 26 0:00 1:00 S
Rule Syria 1993 only - Sep 25 0:00 0 -
# IATA SSIM (1998-02) says 1998-04-02;
# (1998-09) says 1999-03-29 and 1999-09-29; (1999-02) says 1999-04-02,
# 2000-04-02, and 2001-04-02; (1999-09) says 2000-03-31 and 2001-03-31;
# (2006) says 2006-03-31 and 2006-09-22;
# for now ignore all these claims and go with Shanks & Pottenger,
# except for the 2006-09-22 claim (which seems right for Ramadan).
Rule Syria 1994 1996 - Apr 1 0:00 1:00 S
Rule Syria 1994 2005 - Oct 1 0:00 0 -
Rule Syria 1997 1998 - Mar lastMon 0:00 1:00 S
Rule Syria 1999 2006 - Apr 1 0:00 1:00 S
# From Stephen Colebourne (2006-09-18):
# According to IATA data, Syria will change DST on 21st September [21:00 UTC]
# this year [only].... This is probably related to Ramadan, like Egypt.
Rule Syria 2006 only - Sep 22 0:00 0 -
# From Paul Eggert (2007-03-29):
# Today the AP reported "Syria will switch to summertime at midnight Thursday."
# http://www.iht.com/articles/ap/2007/03/29/africa/ME-GEN-Syria-Time-Change.php
Rule Syria 2007 only - Mar lastFri 0:00 1:00 S
# From Jesper Norgard (2007-10-27):
# The sister center ICARDA of my work CIMMYT is confirming that Syria DST will
# not take place 1.st November at 0:00 o'clock but 1.st November at 24:00 or
# rather Midnight between Thursday and Friday. This does make more sence than
# having it between Wednesday and Thursday (two workdays in Syria) since the
# weekend in Syria is not Saturday and Sunday, but Friday and Saturday. So now
# it is implemented at midnight of the last workday before weekend...
#
# From Steffen Thorsen (2007-10-27):
# Jesper Norgaard Welen wrote:
#
# > "Winter local time in Syria will be observed at midnight of Thursday 1
# > November 2007, and the clock will be put back 1 hour."
#
# I found confirmation on this in this gov.sy-article (Arabic):
# http://wehda.alwehda.gov.sy/_print_veiw.asp?FileName=12521710520070926111247
#
# which using Google's translate tools says:
# Council of Ministers also approved the commencement of work on
# identifying the winter time as of Friday, 2/11/2007 where the 60th
# minute delay at midnight Thursday 1/11/2007.
Rule Syria 2007 only - Nov Fri>=1 0:00 0 -
# From Stephen Colebourne (2008-03-17):
# For everyone's info, I saw an IATA time zone change for [Syria] for
# this month (March 2008) in the last day or so...This is the data IATA
# are now using:
# Country Time Standard --- DST Start --- --- DST End --- DST
# Name Zone Variation Time Date Time Date
# Variation
# Syrian Arab
# Republic SY +0200 2200 03APR08 2100 30SEP08 +0300
# 2200 02APR09 2100 30SEP09 +0300
# 2200 01APR10 2100 30SEP10 +0300
# From Arthur David Olson (2008-03-17):
# Here's a link to English-language coverage by the Syrian Arab News
# Agency (SANA)...
# <a href="http://www.sana.sy/eng/21/2008/03/11/165173.htm">
# http://www.sana.sy/eng/21/2008/03/11/165173.htm
# </a>...which reads (in part) "The Cabinet approved the suggestion of the
# Ministry of Electricity to begin daylight savings time on Friday April
# 4th, advancing clocks one hour ahead on midnight of Thursday April 3rd."
# Since Syria is two hours east of UTC, the 2200 and 2100 transition times
# shown above match up with midnight in Syria.
# From Arthur David Olson (2008-03-18):
# My buest guess at a Syrian rule is "the Friday nearest April 1";
# coding that involves either using a "Mar Fri>=29" construct that old time zone
# compilers can't handle or having multiple Rules (a la Israel).
# For now, use "Apr Fri>=1", and go with IATA on a uniform Sep 30 end.
# From Steffen Thorsen (2008-10-07):
# Syria has now officially decided to end DST on 2008-11-01 this year,
# according to the following article in the Syrian Arab News Agency (SANA).
#
# The article is in Arabic, and seems to tell that they will go back to
# winter time on 2008-11-01 at 00:00 local daylight time (delaying/setting
# clocks back 60 minutes).
#
# <a href="http://sana.sy/ara/2/2008/10/07/195459.htm">
# http://sana.sy/ara/2/2008/10/07/195459.htm
# </a>
# From Steffen Thorsen (2009-03-19):
# Syria will start DST on 2009-03-27 00:00 this year according to many sources,
# two examples:
#
# <a href="http://www.sana.sy/eng/21/2009/03/17/217563.htm">
# http://www.sana.sy/eng/21/2009/03/17/217563.htm
# </a>
# (English, Syrian Arab News # Agency)
# <a href="http://thawra.alwehda.gov.sy/_View_news2.asp?FileName=94459258720090318012209">
# http://thawra.alwehda.gov.sy/_View_news2.asp?FileName=94459258720090318012209
# </a>
# (Arabic, gov-site)
#
# We have not found any sources saying anything about when DST ends this year.
#
# Our summary
# <a href="http://www.timeanddate.com/news/time/syria-dst-starts-march-27-2009.html">
# http://www.timeanddate.com/news/time/syria-dst-starts-march-27-2009.html
# </a>
# From Steffen Thorsen (2009-10-27):
# The Syrian Arab News Network on 2009-09-29 reported that Syria will
# revert back to winter (standard) time on midnight between Thursday
# 2009-10-29 and Friday 2009-10-30:
# <a href="http://www.sana.sy/ara/2/2009/09/29/247012.htm">
# http://www.sana.sy/ara/2/2009/09/29/247012.htm (Arabic)
# </a>
# From Arthur David Olson (2009-10-28):
# We'll see if future DST switching times turn out to be end of the last
# Thursday of the month or the start of the last Friday of the month or
# something else. For now, use the start of the last Friday.
# From Steffen Thorsen (2010-03-17):
# The "Syrian News Station" reported on 2010-03-16 that the Council of
# Ministers has decided that Syria will start DST on midnight Thursday
# 2010-04-01: (midnight between Thursday and Friday):
# <a href="http://sns.sy/sns/?path=news/read/11421">
# http://sns.sy/sns/?path=news/read/11421 (Arabic)
# </a>
# From Steffen Thorsen (2012-03-26):
# Today, Syria's government announced that they will start DST early on Friday
# (00:00). This is a bit earlier than the past two years.
#
# From Syrian Arab News Agency, in Arabic:
# <a href="http://www.sana.sy/ara/2/2012/03/26/408215.htm">
# http://www.sana.sy/ara/2/2012/03/26/408215.htm
# </a>
#
# Our brief summary:
# <a href="http://www.timeanddate.com/news/time/syria-dst-2012.html">
# http://www.timeanddate.com/news/time/syria-dst-2012.html
# </a>
# From Arthur David Olson (2012-03-27):
# Assume last Friday in March going forward XXX.
Rule Syria 2008 only - Apr Fri>=1 0:00 1:00 S
Rule Syria 2008 only - Nov 1 0:00 0 -
Rule Syria 2009 only - Mar lastFri 0:00 1:00 S
Rule Syria 2010 2011 - Apr Fri>=1 0:00 1:00 S
Rule Syria 2012 max - Mar lastFri 0:00 1:00 S
Rule Syria 2009 max - Oct lastFri 0:00 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Damascus 2:25:12 - LMT 1920 # Dimashq
2:00 Syria EE%sT
# Tajikistan
# From Shanks & Pottenger.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Dushanbe 4:35:12 - LMT 1924 May 2
5:00 - DUST 1930 Jun 21 # Dushanbe Time
6:00 RussiaAsia DUS%sT 1991 Mar 31 2:00s
5:00 1:00 DUSST 1991 Sep 9 2:00s
5:00 - TJT # Tajikistan Time
# Thailand
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Bangkok 6:42:04 - LMT 1880
6:42:04 - BMT 1920 Apr # Bangkok Mean Time
7:00 - ICT
# Turkmenistan
# From Shanks & Pottenger.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Ashgabat 3:53:32 - LMT 1924 May 2 # or Ashkhabad
4:00 - ASHT 1930 Jun 21 # Ashkhabad Time
5:00 RussiaAsia ASH%sT 1991 Mar 31 2:00
4:00 RussiaAsia ASH%sT 1991 Oct 27 # independence
4:00 RussiaAsia TM%sT 1992 Jan 19 2:00
5:00 - TMT
# United Arab Emirates
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Dubai 3:41:12 - LMT 1920
4:00 - GST
# Uzbekistan
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Samarkand 4:27:12 - LMT 1924 May 2
4:00 - SAMT 1930 Jun 21 # Samarkand Time
5:00 - SAMT 1981 Apr 1
5:00 1:00 SAMST 1981 Oct 1
6:00 - TAST 1982 Apr 1 # Tashkent Time
5:00 RussiaAsia SAM%sT 1991 Sep 1 # independence
5:00 RussiaAsia UZ%sT 1992
5:00 - UZT
Zone Asia/Tashkent 4:37:12 - LMT 1924 May 2
5:00 - TAST 1930 Jun 21 # Tashkent Time
6:00 RussiaAsia TAS%sT 1991 Mar 31 2:00
5:00 RussiaAsia TAS%sT 1991 Sep 1 # independence
5:00 RussiaAsia UZ%sT 1992
5:00 - UZT
# Vietnam
# From Arthur David Olson (2008-03-18):
# The English-language name of Vietnam's most populous city is "Ho Chi Min City";
# we use Ho_Chi_Minh below to avoid a name of more than 14 characters.
# From Shanks & Pottenger:
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Ho_Chi_Minh 7:06:40 - LMT 1906 Jun 9
7:06:20 - SMT 1911 Mar 11 0:01 # Saigon MT?
7:00 - ICT 1912 May
8:00 - ICT 1931 May
7:00 - ICT
# Yemen
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Aden 3:00:48 - LMT 1950
3:00 - AST
|
27182812/ChatGLM-LLaMA-chinese-insturct | 16,228 | src/transformers/utils/generic.py | # Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Generic utilities
"""
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class cached_property(property):
"""
Descriptor that mimics @property but caches output in member variable.
From tensorflow_datasets
Built-in in functools from Python 3.8.
"""
def __get__(self, obj, objtype=None):
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError("unreadable attribute")
attr = "__cached_" + self.fget.__name__
cached = getattr(obj, attr, None)
if cached is None:
cached = self.fget(obj)
setattr(obj, attr, cached)
return cached
def is_tensor(x):
"""
Tests if `x` is a `torch.Tensor`, `tf.Tensor`, `jaxlib.xla_extension.DeviceArray` or `np.ndarray`.
"""
if is_torch_fx_proxy(x):
return True
if is_torch_available():
import torch
if isinstance(x, torch.Tensor):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(x, tf.Tensor):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(x, (jnp.ndarray, Tracer)):
return True
return isinstance(x, np.ndarray)
def _is_numpy(x):
return isinstance(x, np.ndarray)
def is_numpy_array(x):
"""
Tests if `x` is a numpy array or not.
"""
return _is_numpy(x)
def _is_torch(x):
import torch
return isinstance(x, torch.Tensor)
def is_torch_tensor(x):
"""
Tests if `x` is a torch tensor or not. Safe to call even if torch is not installed.
"""
return False if not is_torch_available() else _is_torch(x)
def _is_torch_device(x):
import torch
return isinstance(x, torch.device)
def is_torch_device(x):
"""
Tests if `x` is a torch device or not. Safe to call even if torch is not installed.
"""
return False if not is_torch_available() else _is_torch_device(x)
def _is_torch_dtype(x):
import torch
if isinstance(x, str):
if hasattr(torch, x):
x = getattr(torch, x)
else:
return False
return isinstance(x, torch.dtype)
def is_torch_dtype(x):
"""
Tests if `x` is a torch dtype or not. Safe to call even if torch is not installed.
"""
return False if not is_torch_available() else _is_torch_dtype(x)
def _is_tensorflow(x):
import tensorflow as tf
return isinstance(x, tf.Tensor)
def is_tf_tensor(x):
"""
Tests if `x` is a tensorflow tensor or not. Safe to call even if tensorflow is not installed.
"""
return False if not is_tf_available() else _is_tensorflow(x)
def _is_jax(x):
import jax.numpy as jnp # noqa: F811
return isinstance(x, jnp.ndarray)
def is_jax_tensor(x):
"""
Tests if `x` is a Jax tensor or not. Safe to call even if jax is not installed.
"""
return False if not is_flax_available() else _is_jax(x)
def to_py_obj(obj):
"""
Convert a TensorFlow tensor, PyTorch tensor, Numpy array or python list to a python list.
"""
if isinstance(obj, (dict, UserDict)):
return {k: to_py_obj(v) for k, v in obj.items()}
elif isinstance(obj, (list, tuple)):
return [to_py_obj(o) for o in obj]
elif is_tf_tensor(obj):
return obj.numpy().tolist()
elif is_torch_tensor(obj):
return obj.detach().cpu().tolist()
elif is_jax_tensor(obj):
return np.asarray(obj).tolist()
elif isinstance(obj, (np.ndarray, np.number)): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def to_numpy(obj):
"""
Convert a TensorFlow tensor, PyTorch tensor, Numpy array or python list to a Numpy array.
"""
if isinstance(obj, (dict, UserDict)):
return {k: to_numpy(v) for k, v in obj.items()}
elif isinstance(obj, (list, tuple)):
return np.array(obj)
elif is_tf_tensor(obj):
return obj.numpy()
elif is_torch_tensor(obj):
return obj.detach().cpu().numpy()
elif is_jax_tensor(obj):
return np.asarray(obj)
else:
return obj
class ModelOutput(OrderedDict):
"""
Base class for all model outputs as dataclass. Has a `__getitem__` that allows indexing by integer or slice (like a
tuple) or strings (like a dictionary) that will ignore the `None` attributes. Otherwise behaves like a regular
python dictionary.
<Tip warning={true}>
You can't unpack a `ModelOutput` directly. Use the [`~utils.ModelOutput.to_tuple`] method to convert it to a tuple
before.
</Tip>
"""
def __post_init__(self):
class_fields = fields(self)
# Safety and consistency checks
if not len(class_fields):
raise ValueError(f"{self.__class__.__name__} has no fields.")
if not all(field.default is None for field in class_fields[1:]):
raise ValueError(f"{self.__class__.__name__} should not have more than one required field.")
first_field = getattr(self, class_fields[0].name)
other_fields_are_none = all(getattr(self, field.name) is None for field in class_fields[1:])
if other_fields_are_none and not is_tensor(first_field):
if isinstance(first_field, dict):
iterator = first_field.items()
first_field_iterator = True
else:
try:
iterator = iter(first_field)
first_field_iterator = True
except TypeError:
first_field_iterator = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(iterator):
if (
not isinstance(element, (list, tuple))
or not len(element) == 2
or not isinstance(element[0], str)
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
self[class_fields[0].name] = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f"Cannot set key/value for {element}. It needs to be a tuple (key, value)."
)
break
setattr(self, element[0], element[1])
if element[1] is not None:
self[element[0]] = element[1]
elif first_field is not None:
self[class_fields[0].name] = first_field
else:
for field in class_fields:
v = getattr(self, field.name)
if v is not None:
self[field.name] = v
def __delitem__(self, *args, **kwargs):
raise Exception(f"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.")
def setdefault(self, *args, **kwargs):
raise Exception(f"You cannot use ``setdefault`` on a {self.__class__.__name__} instance.")
def pop(self, *args, **kwargs):
raise Exception(f"You cannot use ``pop`` on a {self.__class__.__name__} instance.")
def update(self, *args, **kwargs):
raise Exception(f"You cannot use ``update`` on a {self.__class__.__name__} instance.")
def __getitem__(self, k):
if isinstance(k, str):
inner_dict = {k: v for (k, v) in self.items()}
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__(self, name, value):
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(name, value)
super().__setattr__(name, value)
def __setitem__(self, key, value):
# Will raise a KeyException if needed
super().__setitem__(key, value)
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(key, value)
def to_tuple(self) -> Tuple[Any]:
"""
Convert self to a tuple containing all the attributes/keys that are not `None`.
"""
return tuple(self[k] for k in self.keys())
class ExplicitEnum(str, Enum):
"""
Enum with more explicit error message for missing values.
"""
@classmethod
def _missing_(cls, value):
raise ValueError(
f"{value} is not a valid {cls.__name__}, please select one of {list(cls._value2member_map_.keys())}"
)
class PaddingStrategy(ExplicitEnum):
"""
Possible values for the `padding` argument in [`PreTrainedTokenizerBase.__call__`]. Useful for tab-completion in an
IDE.
"""
LONGEST = "longest"
MAX_LENGTH = "max_length"
DO_NOT_PAD = "do_not_pad"
class TensorType(ExplicitEnum):
"""
Possible values for the `return_tensors` argument in [`PreTrainedTokenizerBase.__call__`]. Useful for
tab-completion in an IDE.
"""
PYTORCH = "pt"
TENSORFLOW = "tf"
NUMPY = "np"
JAX = "jax"
class ContextManagers:
"""
Wrapper for `contextlib.ExitStack` which enters a collection of context managers. Adaptation of `ContextManagers`
in the `fastcore` library.
"""
def __init__(self, context_managers: List[ContextManager]):
self.context_managers = context_managers
self.stack = ExitStack()
def __enter__(self):
for context_manager in self.context_managers:
self.stack.enter_context(context_manager)
def __exit__(self, *args, **kwargs):
self.stack.__exit__(*args, **kwargs)
def can_return_loss(model_class):
"""
Check if a given model can return loss.
Args:
model_class (`type`): The class of the model.
"""
base_classes = str(inspect.getmro(model_class))
if "keras.engine.training.Model" in base_classes:
signature = inspect.signature(model_class.call) # TensorFlow models
elif "torch.nn.modules.module.Module" in base_classes:
signature = inspect.signature(model_class.forward) # PyTorch models
else:
signature = inspect.signature(model_class.__call__) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def find_labels(model_class):
"""
Find the labels used by a given model.
Args:
model_class (`type`): The class of the model.
"""
model_name = model_class.__name__
base_classes = str(inspect.getmro(model_class))
if "keras.engine.training.Model" in base_classes:
signature = inspect.signature(model_class.call) # TensorFlow models
elif "torch.nn.modules.module.Module" in base_classes:
signature = inspect.signature(model_class.forward) # PyTorch models
else:
signature = inspect.signature(model_class.__call__) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def flatten_dict(d: MutableMapping, parent_key: str = "", delimiter: str = "."):
"""Flatten a nested dict into a single level dict."""
def _flatten_dict(d, parent_key="", delimiter="."):
for k, v in d.items():
key = str(parent_key) + delimiter + str(k) if parent_key else k
if v and isinstance(v, MutableMapping):
yield from flatten_dict(v, key, delimiter=delimiter).items()
else:
yield key, v
return dict(_flatten_dict(d, parent_key, delimiter))
@contextmanager
def working_or_temp_dir(working_dir, use_temp_dir: bool = False):
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def transpose(array, axes=None):
"""
Framework-agnostic version of `numpy.transpose` that will work on torch/TensorFlow/Jax tensors as well as NumPy
arrays.
"""
if is_numpy_array(array):
return np.transpose(array, axes=axes)
elif is_torch_tensor(array):
return array.T if axes is None else array.permute(*axes)
elif is_tf_tensor(array):
import tensorflow as tf
return tf.transpose(array, perm=axes)
elif is_jax_tensor(array):
return jnp.transpose(array, axes=axes)
else:
raise ValueError(f"Type not supported for transpose: {type(array)}.")
def reshape(array, newshape):
"""
Framework-agnostic version of `numpy.reshape` that will work on torch/TensorFlow/Jax tensors as well as NumPy
arrays.
"""
if is_numpy_array(array):
return np.reshape(array, newshape)
elif is_torch_tensor(array):
return array.reshape(*newshape)
elif is_tf_tensor(array):
import tensorflow as tf
return tf.reshape(array, newshape)
elif is_jax_tensor(array):
return jnp.reshape(array, newshape)
else:
raise ValueError(f"Type not supported for reshape: {type(array)}.")
def squeeze(array, axis=None):
"""
Framework-agnostic version of `numpy.squeeze` that will work on torch/TensorFlow/Jax tensors as well as NumPy
arrays.
"""
if is_numpy_array(array):
return np.squeeze(array, axis=axis)
elif is_torch_tensor(array):
return array.squeeze() if axis is None else array.squeeze(dim=axis)
elif is_tf_tensor(array):
import tensorflow as tf
return tf.squeeze(array, axis=axis)
elif is_jax_tensor(array):
return jnp.squeeze(array, axis=axis)
else:
raise ValueError(f"Type not supported for squeeze: {type(array)}.")
def expand_dims(array, axis):
"""
Framework-agnostic version of `numpy.expand_dims` that will work on torch/TensorFlow/Jax tensors as well as NumPy
arrays.
"""
if is_numpy_array(array):
return np.expand_dims(array, axis)
elif is_torch_tensor(array):
return array.unsqueeze(dim=axis)
elif is_tf_tensor(array):
import tensorflow as tf
return tf.expand_dims(array, axis=axis)
elif is_jax_tensor(array):
return jnp.expand_dims(array, axis=axis)
else:
raise ValueError(f"Type not supported for expand_dims: {type(array)}.")
def tensor_size(array):
"""
Framework-agnostic version of `numpy.size` that will work on torch/TensorFlow/Jax tensors as well as NumPy arrays.
"""
if is_numpy_array(array):
return np.size(array)
elif is_torch_tensor(array):
return array.numel()
elif is_tf_tensor(array):
import tensorflow as tf
return tf.size(array)
elif is_jax_tensor(array):
return array.size
else:
raise ValueError(f"Type not supported for expand_dims: {type(array)}.")
|
274056675/springboot-openai-chatgpt | 1,215 | mng_web/src/styles/theme/d2.scss | .theme-d2 {
.avue-logo{
color: #409EFF;
background-color: #ebf1f6;
box-shadow: none;
.avue-logo_title{
font-size: 22px;
font-weight: 400;
}
}
.avue-top{
background-color: #ebf1f6;
box-shadow: none;
}
.avue-main{
padding: 0 5px;
}
.avue-tags{
margin-left: 6px;
padding: 0;
border: 1px solid #e4e7ed;
border-radius: 3px;
background-color: #ebf1f6;
box-shadow: none;
.el-tabs__item{
border-left: 1px solid #cfd7e5 !important;
margin: 0 !important;
background-color: rgba(0,0,0,.03) !important;
color: #606266 !important;
font-size: 14px !important;
font-weight: 500 !important;
&:first-child{
border-left: none !important;
}
}
.is-active{
border-bottom:1px solid #fff !important;
background-color: #fff !important;
color: #409EFF !important;
}
}
.avue-sidebar{
background-color: #ebf1f6;
box-shadow: none;
.el-menu-item,.el-submenu__title{
i,span{
color:#606266
}
&:hover,&.is-active{
background: hsla(0,0%,100%,.5);
i,span{
color: #409EFF;
}
}
}
}
} |
274056675/springboot-openai-chatgpt | 1,694 | mng_web/src/styles/theme/iview.scss | .theme-iview {
.avue-logo{
background: #001529;
box-shadow: none;
text-align: center;
.avue-logo_title{
div{
border-top-left-radius: 5px;
border-top-right-radius: 5px;
border-bottom-left-radius: 3px;
border-bottom-right-radius: 3px;
font-size: 22px;
color:#fff;
font-weight: 500;
margin: 10px auto;
width: 180px;
height: 45px;
background-color: #409EFF;
}
}
}
.avue-tags{
padding: 3px 5px 5px 0;
background: #f0f0f0;
box-shadow: inset 0 0 3px 2px hsla(0,0%,39.2%,.1);
.is-active{
&:before{
background: #409EFF !important;
}
}
.el-tabs__item{
padding: 0 15px !important;
position: relative;
height: 32px !important;
line-height:32px !important;
border: 1px solid #e8eaec!important;
color: #515a6e!important;
background: #fff!important;
border-radius: 3px;
&:before{
content:'';
display: inline-block;
width: 12px;
height: 12px;
margin-right:10px;
border-radius: 50%;
background: #e8eaec;
}
}
}
.avue-sidebar{
background: #001529;
.el-menu-item{
&.is-active {
background-color: #000c17;
&:before {
display: none;
}
i,span{
color:#409EFF;
}
}
}
.el-submenu{
.el-menu-item{
&.is-active {
background-color: #409EFF;
&:before {
display: none;
}
i,span{
color:#fff;
}
}
}
}
}
} |
274056675/springboot-openai-chatgpt | 2,029 | mng_web/src/styles/theme/star.scss | .theme-star {
.avue-contail {
background-image: url("/img/bg/star-squashed.jpg");
background-size: 100% 100%;
}
.avue-logo{
color: #fff;
}
.avue-header,
.avue-logo,
.tags-container {
background-color: transparent;
}
.el-card,.error-page {
opacity: .9;
}
.avue-tabs {
padding: 0 20px ;
}
.avue-tags{
background-color: transparent;
border-top: none;
}
.avue-top{
.avue-breadcrumb{
color:#fff;
}
.el-menu-item{
i,span{
color:#fff;
}
&.is-active{
background-color: rgba(0,0,0,.4)
}
}
.el-dropdown{
color:#fff;
}
}
.avue-sidebar{
box-shadow: 2px 0 6px rgba(0, 21, 41, 0.15);
background-color:transparent;
.el-menu-item,.el-submenu__title{
i,span{
color:#fff
}
&:hover{
background: transparent;
i,span{
color:#409EFF;
}
}
&.is-active {
background-color: rgba(0,0,0,.4);
i,span{
color:#fff;
}
}
}
}
.top-search {
.el-input__inner{
color: #333;
}
input::-webkit-input-placeholder,
textarea::-webkit-input-placeholder {
/* WebKit browsers */
color: #fff;
}
input:-moz-placeholder,
textarea:-moz-placeholder {
/* Mozilla Firefox 4 to 18 */
color: #fff;
}
input::-moz-placeholder,
textarea::-moz-placeholder {
/* Mozilla Firefox 19+ */
color: #fff;
}
input:-ms-input-placeholder,
textarea:-ms-input-placeholder {
/* Internet Explorer 10+ */
color: #fff;
}
}
.top-bar__item {
i {
color: #fff;
}
}
} |
233zzh/TitanDataOperationSystem | 136,129 | 代码/web代码/titanApp/src/main/resources/static/src/assets/extra-libs/flot/examples/axes-time-zones/tz/northamerica | # <pre>
# This file is in the public domain, so clarified as of
# 2009-05-17 by Arthur David Olson.
# also includes Central America and the Caribbean
# This data is by no means authoritative; if you think you know better,
# go ahead and edit the file (and please send any changes to
# tz@iana.org for general use in the future).
# From Paul Eggert (1999-03-22):
# A reliable and entertaining source about time zones is
# Derek Howse, Greenwich time and longitude, Philip Wilson Publishers (1997).
###############################################################################
# United States
# From Paul Eggert (1999-03-31):
# Howse writes (pp 121-125) that time zones were invented by
# Professor Charles Ferdinand Dowd (1825-1904),
# Principal of Temple Grove Ladies' Seminary (Saratoga Springs, NY).
# His pamphlet ``A System of National Time for Railroads'' (1870)
# was the result of his proposals at the Convention of Railroad Trunk Lines
# in New York City (1869-10). His 1870 proposal was based on Washington, DC,
# but in 1872-05 he moved the proposed origin to Greenwich.
# His proposal was adopted by the railroads on 1883-11-18 at 12:00,
# and the most of the country soon followed suit.
# From Paul Eggert (2005-04-16):
# That 1883 transition occurred at 12:00 new time, not at 12:00 old time.
# See p 46 of David Prerau, Seize the daylight, Thunder's Mouth Press (2005).
# From Paul Eggert (2006-03-22):
# A good source for time zone historical data in the US is
# Thomas G. Shanks, The American Atlas (5th edition),
# San Diego: ACS Publications, Inc. (1991).
# Make sure you have the errata sheet; the book is somewhat useless without it.
# It is the source for most of the pre-1991 US entries below.
# From Paul Eggert (2001-03-06):
# Daylight Saving Time was first suggested as a joke by Benjamin Franklin
# in his whimsical essay ``An Economical Project for Diminishing the Cost
# of Light'' published in the Journal de Paris (1784-04-26).
# Not everyone is happy with the results:
#
# I don't really care how time is reckoned so long as there is some
# agreement about it, but I object to being told that I am saving
# daylight when my reason tells me that I am doing nothing of the kind.
# I even object to the implication that I am wasting something
# valuable if I stay in bed after the sun has risen. As an admirer
# of moonlight I resent the bossy insistence of those who want to
# reduce my time for enjoying it. At the back of the Daylight Saving
# scheme I detect the bony, blue-fingered hand of Puritanism, eager
# to push people into bed earlier, and get them up earlier, to make
# them healthy, wealthy and wise in spite of themselves.
#
# -- Robertson Davies, The diary of Samuel Marchbanks,
# Clarke, Irwin (1947), XIX, Sunday
#
# For more about the first ten years of DST in the United States, see
# Robert Garland's <a href="http://www.clpgh.org/exhibit/dst.html">
# Ten years of daylight saving from the Pittsburgh standpoint
# (Carnegie Library of Pittsburgh, 1927)</a>.
#
# Shanks says that DST was called "War Time" in the US in 1918 and 1919.
# However, DST was imposed by the Standard Time Act of 1918, which
# was the first nationwide legal time standard, and apparently
# time was just called "Standard Time" or "Daylight Saving Time".
# From Arthur David Olson:
# US Daylight Saving Time ended on the last Sunday of *October* in 1974.
# See, for example, the front page of the Saturday, 1974-10-26
# and Sunday, 1974-10-27 editions of the Washington Post.
# From Arthur David Olson:
# Before the Uniform Time Act of 1966 took effect in 1967, observance of
# Daylight Saving Time in the US was by local option, except during wartime.
# From Arthur David Olson (2000-09-25):
# Last night I heard part of a rebroadcast of a 1945 Arch Oboler radio drama.
# In the introduction, Oboler spoke of "Eastern Peace Time."
# An AltaVista search turned up
# <a href="http://rowayton.org/rhs/hstaug45.html">:
# "When the time is announced over the radio now, it is 'Eastern Peace
# Time' instead of the old familiar 'Eastern War Time.' Peace is wonderful."
# </a> (August 1945) by way of confirmation.
# From Joseph Gallant citing
# George H. Douglas, _The Early Days of Radio Broadcasting_ (1987):
# At 7 P.M. (Eastern War Time) [on 1945-08-14], the networks were set
# to switch to London for Attlee's address, but the American people
# never got to hear his speech live. According to one press account,
# CBS' Bob Trout was first to announce the word of Japan's surrender,
# but a few seconds later, NBC, ABC and Mutual also flashed the word
# of surrender, all of whom interrupting the bells of Big Ben in
# London which were to precede Mr. Attlee's speech.
# From Paul Eggert (2003-02-09): It was Robert St John, not Bob Trout. From
# Myrna Oliver's obituary of St John on page B16 of today's Los Angeles Times:
#
# ... a war-weary U.S. clung to radios, awaiting word of Japan's surrender.
# Any announcement from Asia would reach St. John's New York newsroom on a
# wire service teletype machine, which had prescribed signals for major news.
# Associated Press, for example, would ring five bells before spewing out
# typed copy of an important story, and 10 bells for news "of transcendental
# importance."
#
# On Aug. 14, stalling while talking steadily into the NBC networks' open
# microphone, St. John heard five bells and waited only to hear a sixth bell,
# before announcing confidently: "Ladies and gentlemen, World War II is over.
# The Japanese have agreed to our surrender terms."
#
# He had scored a 20-second scoop on other broadcasters.
# From Arthur David Olson (2005-08-22):
# Paul has been careful to use the "US" rules only in those locations
# that are part of the United States; this reflects the real scope of
# U.S. government action. So even though the "US" rules have changed
# in the latest release, other countries won't be affected.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule US 1918 1919 - Mar lastSun 2:00 1:00 D
Rule US 1918 1919 - Oct lastSun 2:00 0 S
Rule US 1942 only - Feb 9 2:00 1:00 W # War
Rule US 1945 only - Aug 14 23:00u 1:00 P # Peace
Rule US 1945 only - Sep 30 2:00 0 S
Rule US 1967 2006 - Oct lastSun 2:00 0 S
Rule US 1967 1973 - Apr lastSun 2:00 1:00 D
Rule US 1974 only - Jan 6 2:00 1:00 D
Rule US 1975 only - Feb 23 2:00 1:00 D
Rule US 1976 1986 - Apr lastSun 2:00 1:00 D
Rule US 1987 2006 - Apr Sun>=1 2:00 1:00 D
Rule US 2007 max - Mar Sun>=8 2:00 1:00 D
Rule US 2007 max - Nov Sun>=1 2:00 0 S
# From Arthur David Olson, 2005-12-19
# We generate the files specified below to guard against old files with
# obsolete information being left in the time zone binary directory.
# We limit the list to names that have appeared in previous versions of
# this time zone package.
# We do these as separate Zones rather than as Links to avoid problems if
# a particular place changes whether it observes DST.
# We put these specifications here in the northamerica file both to
# increase the chances that they'll actually get compiled and to
# avoid the need to duplicate the US rules in another file.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone EST -5:00 - EST
Zone MST -7:00 - MST
Zone HST -10:00 - HST
Zone EST5EDT -5:00 US E%sT
Zone CST6CDT -6:00 US C%sT
Zone MST7MDT -7:00 US M%sT
Zone PST8PDT -8:00 US P%sT
# From Bob Devine (1988-01-28):
# ...Alaska (and Hawaii) had the timezone names changed in 1967.
# old new
# Pacific Standard Time(PST) -same-
# Yukon Standard Time(YST) -same-
# Central Alaska S.T. (CAT) Alaska-Hawaii St[an]dard Time (AHST)
# Nome Standard Time (NT) Bering Standard Time (BST)
#
# ...Alaska's timezone lines were redrawn in 1983 to give only 2 tz.
# The YST zone now covers nearly all of the state, AHST just part
# of the Aleutian islands. No DST.
# From Paul Eggert (1995-12-19):
# The tables below use `NST', not `NT', for Nome Standard Time.
# I invented `CAWT' for Central Alaska War Time.
# From U. S. Naval Observatory (1989-01-19):
# USA EASTERN 5 H BEHIND UTC NEW YORK, WASHINGTON
# USA EASTERN 4 H BEHIND UTC APR 3 - OCT 30
# USA CENTRAL 6 H BEHIND UTC CHICAGO, HOUSTON
# USA CENTRAL 5 H BEHIND UTC APR 3 - OCT 30
# USA MOUNTAIN 7 H BEHIND UTC DENVER
# USA MOUNTAIN 6 H BEHIND UTC APR 3 - OCT 30
# USA PACIFIC 8 H BEHIND UTC L.A., SAN FRANCISCO
# USA PACIFIC 7 H BEHIND UTC APR 3 - OCT 30
# USA ALASKA STD 9 H BEHIND UTC MOST OF ALASKA (AKST)
# USA ALASKA STD 8 H BEHIND UTC APR 3 - OCT 30 (AKDT)
# USA ALEUTIAN 10 H BEHIND UTC ISLANDS WEST OF 170W
# USA - " - 9 H BEHIND UTC APR 3 - OCT 30
# USA HAWAII 10 H BEHIND UTC
# USA BERING 11 H BEHIND UTC SAMOA, MIDWAY
# From Arthur David Olson (1989-01-21):
# The above dates are for 1988.
# Note the "AKST" and "AKDT" abbreviations, the claim that there's
# no DST in Samoa, and the claim that there is DST in Alaska and the
# Aleutians.
# From Arthur David Olson (1988-02-13):
# Legal standard time zone names, from United States Code (1982 Edition and
# Supplement III), Title 15, Chapter 6, Section 260 and forward. First, names
# up to 1967-04-01 (when most provisions of the Uniform Time Act of 1966
# took effect), as explained in sections 263 and 261:
# (none)
# United States standard eastern time
# United States standard mountain time
# United States standard central time
# United States standard Pacific time
# (none)
# United States standard Alaska time
# (none)
# Next, names from 1967-04-01 until 1983-11-30 (the date for
# public law 98-181):
# Atlantic standard time
# eastern standard time
# central standard time
# mountain standard time
# Pacific standard time
# Yukon standard time
# Alaska-Hawaii standard time
# Bering standard time
# And after 1983-11-30:
# Atlantic standard time
# eastern standard time
# central standard time
# mountain standard time
# Pacific standard time
# Alaska standard time
# Hawaii-Aleutian standard time
# Samoa standard time
# The law doesn't give abbreviations.
#
# From Paul Eggert (2000-01-08), following a heads-up from Rives McDow:
# Public law 106-564 (2000-12-23) introduced the abbreviation
# "Chamorro Standard Time" for time in Guam and the Northern Marianas.
# See the file "australasia".
# From Arthur David Olson, 2005-08-09
# The following was signed into law on 2005-08-08.
#
# H.R. 6, Energy Policy Act of 2005, SEC. 110. DAYLIGHT SAVINGS.
# (a) Amendment- Section 3(a) of the Uniform Time Act of 1966 (15
# U.S.C. 260a(a)) is amended--
# (1) by striking `first Sunday of April' and inserting `second
# Sunday of March'; and
# (2) by striking `last Sunday of October' and inserting `first
# Sunday of November'.
# (b) Effective Date- Subsection (a) shall take effect 1 year after the
# date of enactment of this Act or March 1, 2007, whichever is later.
# (c) Report to Congress- Not later than 9 months after the effective
# date stated in subsection (b), the Secretary shall report to Congress
# on the impact of this section on energy consumption in the United
# States.
# (d) Right to Revert- Congress retains the right to revert the
# Daylight Saving Time back to the 2005 time schedules once the
# Department study is complete.
# US eastern time, represented by New York
# Connecticut, Delaware, District of Columbia, most of Florida,
# Georgia, southeast Indiana (Dearborn and Ohio counties), eastern Kentucky
# (except America/Kentucky/Louisville below), Maine, Maryland, Massachusetts,
# New Hampshire, New Jersey, New York, North Carolina, Ohio,
# Pennsylvania, Rhode Island, South Carolina, eastern Tennessee,
# Vermont, Virginia, West Virginia
# From Dave Cantor (2004-11-02):
# Early this summer I had the occasion to visit the Mount Washington
# Observatory weather station atop (of course!) Mount Washington [, NH]....
# One of the staff members said that the station was on Eastern Standard Time
# and didn't change their clocks for Daylight Saving ... so that their
# reports will always have times which are 5 hours behind UTC.
# From Paul Eggert (2005-08-26):
# According to today's Huntsville Times
# <http://www.al.com/news/huntsvilletimes/index.ssf?/base/news/1125047783228320.xml&coll=1>
# a few towns on Alabama's "eastern border with Georgia, such as Phenix City
# in Russell County, Lanett in Chambers County and some towns in Lee County,
# set their watches and clocks on Eastern time." It quotes H.H. "Bubba"
# Roberts, city administrator in Phenix City. as saying "We are in the Central
# time zone, but we do go by the Eastern time zone because so many people work
# in Columbus."
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER
Rule NYC 1920 only - Mar lastSun 2:00 1:00 D
Rule NYC 1920 only - Oct lastSun 2:00 0 S
Rule NYC 1921 1966 - Apr lastSun 2:00 1:00 D
Rule NYC 1921 1954 - Sep lastSun 2:00 0 S
Rule NYC 1955 1966 - Oct lastSun 2:00 0 S
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/New_York -4:56:02 - LMT 1883 Nov 18 12:03:58
-5:00 US E%sT 1920
-5:00 NYC E%sT 1942
-5:00 US E%sT 1946
-5:00 NYC E%sT 1967
-5:00 US E%sT
# US central time, represented by Chicago
# Alabama, Arkansas, Florida panhandle (Bay, Calhoun, Escambia,
# Gulf, Holmes, Jackson, Okaloosa, Santa Rosa, Walton, and
# Washington counties), Illinois, western Indiana
# (Gibson, Jasper, Lake, LaPorte, Newton, Porter, Posey, Spencer,
# Vanderburgh, and Warrick counties), Iowa, most of Kansas, western
# Kentucky, Louisiana, Minnesota, Mississippi, Missouri, eastern
# Nebraska, eastern North Dakota, Oklahoma, eastern South Dakota,
# western Tennessee, most of Texas, Wisconsin
# From Larry M. Smith (2006-04-26) re Wisconsin:
# http://www.legis.state.wi.us/statutes/Stat0175.pdf ...
# is currently enforced at the 01:00 time of change. Because the local
# "bar time" in the state corresponds to 02:00, a number of citations
# are issued for the "sale of class 'B' alcohol after prohibited
# hours" within the deviated hour of this change every year....
#
# From Douglas R. Bomberg (2007-03-12):
# Wisconsin has enacted (nearly eleventh-hour) legislation to get WI
# Statue 175 closer in synch with the US Congress' intent....
# http://www.legis.state.wi.us/2007/data/acts/07Act3.pdf
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER
Rule Chicago 1920 only - Jun 13 2:00 1:00 D
Rule Chicago 1920 1921 - Oct lastSun 2:00 0 S
Rule Chicago 1921 only - Mar lastSun 2:00 1:00 D
Rule Chicago 1922 1966 - Apr lastSun 2:00 1:00 D
Rule Chicago 1922 1954 - Sep lastSun 2:00 0 S
Rule Chicago 1955 1966 - Oct lastSun 2:00 0 S
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Chicago -5:50:36 - LMT 1883 Nov 18 12:09:24
-6:00 US C%sT 1920
-6:00 Chicago C%sT 1936 Mar 1 2:00
-5:00 - EST 1936 Nov 15 2:00
-6:00 Chicago C%sT 1942
-6:00 US C%sT 1946
-6:00 Chicago C%sT 1967
-6:00 US C%sT
# Oliver County, ND switched from mountain to central time on 1992-10-25.
Zone America/North_Dakota/Center -6:45:12 - LMT 1883 Nov 18 12:14:48
-7:00 US M%sT 1992 Oct 25 02:00
-6:00 US C%sT
# Morton County, ND, switched from mountain to central time on
# 2003-10-26, except for the area around Mandan which was already central time.
# See <http://dmses.dot.gov/docimages/p63/135818.pdf>.
# Officially this switch also included part of Sioux County, and
# Jones, Mellette, and Todd Counties in South Dakota;
# but in practice these other counties were already observing central time.
# See <http://www.epa.gov/fedrgstr/EPA-IMPACT/2003/October/Day-28/i27056.htm>.
Zone America/North_Dakota/New_Salem -6:45:39 - LMT 1883 Nov 18 12:14:21
-7:00 US M%sT 2003 Oct 26 02:00
-6:00 US C%sT
# From Josh Findley (2011-01-21):
# ...it appears that Mercer County, North Dakota, changed from the
# mountain time zone to the central time zone at the last transition from
# daylight-saving to standard time (on Nov. 7, 2010):
# <a href="http://www.gpo.gov/fdsys/pkg/FR-2010-09-29/html/2010-24376.htm">
# http://www.gpo.gov/fdsys/pkg/FR-2010-09-29/html/2010-24376.htm
# </a>
# <a href="http://www.bismarcktribune.com/news/local/article_1eb1b588-c758-11df-b472-001cc4c03286.html">
# http://www.bismarcktribune.com/news/local/article_1eb1b588-c758-11df-b472-001cc4c03286.html
# </a>
# From Andy Lipscomb (2011-01-24):
# ...according to the Census Bureau, the largest city is Beulah (although
# it's commonly referred to as Beulah-Hazen, with Hazen being the next
# largest city in Mercer County). Google Maps places Beulah's city hall
# at 4715'51" north, 10146'40" west, which yields an offset of 6h47'07".
Zone America/North_Dakota/Beulah -6:47:07 - LMT 1883 Nov 18 12:12:53
-7:00 US M%sT 2010 Nov 7 2:00
-6:00 US C%sT
# US mountain time, represented by Denver
#
# Colorado, far western Kansas, Montana, western
# Nebraska, Nevada border (Jackpot, Owyhee, and Mountain City),
# New Mexico, southwestern North Dakota,
# western South Dakota, far western Texas (El Paso County, Hudspeth County,
# and Pine Springs and Nickel Creek in Culberson County), Utah, Wyoming
#
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER
Rule Denver 1920 1921 - Mar lastSun 2:00 1:00 D
Rule Denver 1920 only - Oct lastSun 2:00 0 S
Rule Denver 1921 only - May 22 2:00 0 S
Rule Denver 1965 1966 - Apr lastSun 2:00 1:00 D
Rule Denver 1965 1966 - Oct lastSun 2:00 0 S
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Denver -6:59:56 - LMT 1883 Nov 18 12:00:04
-7:00 US M%sT 1920
-7:00 Denver M%sT 1942
-7:00 US M%sT 1946
-7:00 Denver M%sT 1967
-7:00 US M%sT
# US Pacific time, represented by Los Angeles
#
# California, northern Idaho (Benewah, Bonner, Boundary, Clearwater,
# Idaho, Kootenai, Latah, Lewis, Nez Perce, and Shoshone counties,
# and the northern three-quarters of Idaho county),
# most of Nevada, most of Oregon, and Washington
#
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER
Rule CA 1948 only - Mar 14 2:00 1:00 D
Rule CA 1949 only - Jan 1 2:00 0 S
Rule CA 1950 1966 - Apr lastSun 2:00 1:00 D
Rule CA 1950 1961 - Sep lastSun 2:00 0 S
Rule CA 1962 1966 - Oct lastSun 2:00 0 S
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Los_Angeles -7:52:58 - LMT 1883 Nov 18 12:07:02
-8:00 US P%sT 1946
-8:00 CA P%sT 1967
-8:00 US P%sT
# Alaska
# AK%sT is the modern abbreviation for -9:00 per USNO.
#
# From Paul Eggert (2001-05-30):
# Howse writes that Alaska switched from the Julian to the Gregorian calendar,
# and from east-of-GMT to west-of-GMT days, when the US bought it from Russia.
# This was on 1867-10-18, a Friday; the previous day was 1867-10-06 Julian,
# also a Friday. Include only the time zone part of this transition,
# ignoring the switch from Julian to Gregorian, since we can't represent
# the Julian calendar.
#
# As far as we know, none of the exact locations mentioned below were
# permanently inhabited in 1867 by anyone using either calendar.
# (Yakutat was colonized by the Russians in 1799, but the settlement
# was destroyed in 1805 by a Yakutat-kon war party.) However, there
# were nearby inhabitants in some cases and for our purposes perhaps
# it's best to simply use the official transition.
#
# From Steve Ferguson (2011-01-31):
# The author lives in Alaska and many of the references listed are only
# available to Alaskan residents.
#
# <a href="http://www.alaskahistoricalsociety.org/index.cfm?section=discover%20alaska&page=Glimpses%20of%20the%20Past&viewpost=2&ContentId=98">
# http://www.alaskahistoricalsociety.org/index.cfm?section=discover%20alaska&page=Glimpses%20of%20the%20Past&viewpost=2&ContentId=98
# </a>
# From Arthur David Olson (2011-02-01):
# Here's database-relevant material from the 2001 "Alaska History" article:
#
# On September 20 [1979]...DOT...officials decreed that on April 27,
# 1980, Juneau and other nearby communities would move to Yukon Time.
# Sitka, Petersburg, Wrangell, and Ketchikan, however, would remain on
# Pacific Time.
#
# ...on September 22, 1980, DOT Secretary Neil E. Goldschmidt rescinded the
# Department's September 1979 decision. Juneau and other communities in
# northern Southeast reverted to Pacific Time on October 26.
#
# On October 28 [1983]...the Metlakatla Indian Community Council voted
# unanimously to keep the reservation on Pacific Time.
#
# According to DOT official Joanne Petrie, Indian reservations are not
# bound to follow time zones imposed by neighboring jurisdictions.
#
# (The last is consistent with how the database now handles the Navajo
# Nation.)
# From Arthur David Olson (2011-02-09):
# I just spoke by phone with a staff member at the Metlakatla Indian
# Community office (using contact information available at
# <a href="http://www.commerce.state.ak.us/dca/commdb/CIS.cfm?Comm_Boro_name=Metlakatla">
# http://www.commerce.state.ak.us/dca/commdb/CIS.cfm?Comm_Boro_name=Metlakatla
# </a>).
# It's shortly after 1:00 here on the east coast of the United States;
# the staffer said it was shortly after 10:00 there. When I asked whether
# that meant they were on Pacific time, they said no--they were on their
# own time. I asked about daylight saving; they said it wasn't used. I
# did not inquire about practices in the past.
# From Arthur David Olson (2011-08-17):
# For lack of better information, assume that Metlakatla's
# abandonment of use of daylight saving resulted from the 1983 vote.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Juneau 15:02:19 - LMT 1867 Oct 18
-8:57:41 - LMT 1900 Aug 20 12:00
-8:00 - PST 1942
-8:00 US P%sT 1946
-8:00 - PST 1969
-8:00 US P%sT 1980 Apr 27 2:00
-9:00 US Y%sT 1980 Oct 26 2:00
-8:00 US P%sT 1983 Oct 30 2:00
-9:00 US Y%sT 1983 Nov 30
-9:00 US AK%sT
Zone America/Sitka 14:58:47 - LMT 1867 Oct 18
-9:01:13 - LMT 1900 Aug 20 12:00
-8:00 - PST 1942
-8:00 US P%sT 1946
-8:00 - PST 1969
-8:00 US P%sT 1983 Oct 30 2:00
-9:00 US Y%sT 1983 Nov 30
-9:00 US AK%sT
Zone America/Metlakatla 15:13:42 - LMT 1867 Oct 18
-8:46:18 - LMT 1900 Aug 20 12:00
-8:00 - PST 1942
-8:00 US P%sT 1946
-8:00 - PST 1969
-8:00 US P%sT 1983 Oct 30 2:00
-8:00 - MeST
Zone America/Yakutat 14:41:05 - LMT 1867 Oct 18
-9:18:55 - LMT 1900 Aug 20 12:00
-9:00 - YST 1942
-9:00 US Y%sT 1946
-9:00 - YST 1969
-9:00 US Y%sT 1983 Nov 30
-9:00 US AK%sT
Zone America/Anchorage 14:00:24 - LMT 1867 Oct 18
-9:59:36 - LMT 1900 Aug 20 12:00
-10:00 - CAT 1942
-10:00 US CAT/CAWT 1945 Aug 14 23:00u
-10:00 US CAT/CAPT 1946 # Peace
-10:00 - CAT 1967 Apr
-10:00 - AHST 1969
-10:00 US AH%sT 1983 Oct 30 2:00
-9:00 US Y%sT 1983 Nov 30
-9:00 US AK%sT
Zone America/Nome 12:58:21 - LMT 1867 Oct 18
-11:01:38 - LMT 1900 Aug 20 12:00
-11:00 - NST 1942
-11:00 US N%sT 1946
-11:00 - NST 1967 Apr
-11:00 - BST 1969
-11:00 US B%sT 1983 Oct 30 2:00
-9:00 US Y%sT 1983 Nov 30
-9:00 US AK%sT
Zone America/Adak 12:13:21 - LMT 1867 Oct 18
-11:46:38 - LMT 1900 Aug 20 12:00
-11:00 - NST 1942
-11:00 US N%sT 1946
-11:00 - NST 1967 Apr
-11:00 - BST 1969
-11:00 US B%sT 1983 Oct 30 2:00
-10:00 US AH%sT 1983 Nov 30
-10:00 US HA%sT
# The following switches don't quite make our 1970 cutoff.
#
# Shanks writes that part of southwest Alaska (e.g. Aniak)
# switched from -11:00 to -10:00 on 1968-09-22 at 02:00,
# and another part (e.g. Akiak) made the same switch five weeks later.
#
# From David Flater (2004-11-09):
# In e-mail, 2004-11-02, Ray Hudson, historian/liaison to the Unalaska
# Historic Preservation Commission, provided this information, which
# suggests that Unalaska deviated from statutory time from early 1967
# possibly until 1983:
#
# Minutes of the Unalaska City Council Meeting, January 10, 1967:
# "Except for St. Paul and Akutan, Unalaska is the only important
# location not on Alaska Standard Time. The following resolution was
# made by William Robinson and seconded by Henry Swanson: Be it
# resolved that the City of Unalaska hereby goes to Alaska Standard
# Time as of midnight Friday, January 13, 1967 (1 A.M. Saturday,
# January 14, Alaska Standard Time.) This resolution was passed with
# three votes for and one against."
# Hawaii
# From Arthur David Olson (2010-12-09):
# "Hawaiian Time" by Robert C. Schmitt and Doak C. Cox appears on pages 207-225
# of volume 26 of The Hawaiian Journal of History (1992). As of 2010-12-09,
# the article is available at
# <a href="http://evols.library.manoa.hawaii.edu/bitstream/10524/239/2/JL26215.pdf">
# http://evols.library.manoa.hawaii.edu/bitstream/10524/239/2/JL26215.pdf
# </a>
# and indicates that standard time was adopted effective noon, January
# 13, 1896 (page 218), that in "1933, the Legislature decreed daylight
# saving for the period between the last Sunday of each April and the
# last Sunday of each September, but less than a month later repealed the
# act," (page 220), that year-round daylight saving time was in effect
# from 1942-02-09 to 1945-09-30 (page 221, with no time of day given for
# when clocks changed) and that clocks were changed by 30 minutes
# effective the second Sunday of June, 1947 (page 219, with no time of
# day given for when clocks changed). A footnote for the 1933 changes
# cites Session Laws of Hawaii 1933, "Act. 90 (approved 26 Apr. 1933)
# and Act 163 (approved 21 May 1933)."
# From Arthur David Olson (2011-01-19):
# The following is from "Laws of the Territory of Hawaii Passed by the
# Seventeenth Legislature: Regular Session 1933," available (as of
# 2011-01-19) at American University's Pence Law Library. Page 85: "Act
# 90...At 2 o'clock ante meridian of the last Sunday in April of each
# year, the standard time of this Territory shall be advanced one
# hour...This Act shall take effect upon its approval. Approved this 26th
# day of April, A. D. 1933. LAWRENCE M JUDD, Governor of the Territory of
# Hawaii." Page 172: "Act 163...Act 90 of the Session Laws of 1933 is
# hereby repealed...This Act shall take effect upon its approval, upon
# which date the standard time of this Territory shall be restored to
# that existing immediately prior to the taking effect of said Act 90.
# Approved this 21st day of May, A. D. 1933. LAWRENCE M. JUDD, Governor
# of the Territory of Hawaii."
#
# Note that 1933-05-21 was a Sunday.
# We're left to guess the time of day when Act 163 was approved; guess noon.
Zone Pacific/Honolulu -10:31:26 - LMT 1896 Jan 13 12:00 #Schmitt&Cox
-10:30 - HST 1933 Apr 30 2:00 #Laws 1933
-10:30 1:00 HDT 1933 May 21 12:00 #Laws 1933+12
-10:30 - HST 1942 Feb 09 2:00 #Schmitt&Cox+2
-10:30 1:00 HDT 1945 Sep 30 2:00 #Schmitt&Cox+2
-10:30 - HST 1947 Jun 8 2:00 #Schmitt&Cox+2
-10:00 - HST
# Now we turn to US areas that have diverged from the consensus since 1970.
# Arizona mostly uses MST.
# From Paul Eggert (2002-10-20):
#
# The information in the rest of this paragraph is derived from the
# <a href="http://www.dlapr.lib.az.us/links/daylight.htm">
# Daylight Saving Time web page (2002-01-23)</a> maintained by the
# Arizona State Library, Archives and Public Records.
# Between 1944-01-01 and 1944-04-01 the State of Arizona used standard
# time, but by federal law railroads, airlines, bus lines, military
# personnel, and some engaged in interstate commerce continued to
# observe war (i.e., daylight saving) time. The 1944-03-17 Phoenix
# Gazette says that was the date the law changed, and that 04-01 was
# the date the state's clocks would change. In 1945 the State of
# Arizona used standard time all year, again with exceptions only as
# mandated by federal law. Arizona observed DST in 1967, but Arizona
# Laws 1968, ch. 183 (effective 1968-03-21) repealed DST.
#
# Shanks says the 1944 experiment came to an end on 1944-03-17.
# Go with the Arizona State Library instead.
Zone America/Phoenix -7:28:18 - LMT 1883 Nov 18 11:31:42
-7:00 US M%sT 1944 Jan 1 00:01
-7:00 - MST 1944 Apr 1 00:01
-7:00 US M%sT 1944 Oct 1 00:01
-7:00 - MST 1967
-7:00 US M%sT 1968 Mar 21
-7:00 - MST
# From Arthur David Olson (1988-02-13):
# A writer from the Inter Tribal Council of Arizona, Inc.,
# notes in private correspondence dated 1987-12-28 that "Presently, only the
# Navajo Nation participates in the Daylight Saving Time policy, due to its
# large size and location in three states." (The "only" means that other
# tribal nations don't use DST.)
Link America/Denver America/Shiprock
# Southern Idaho (Ada, Adams, Bannock, Bear Lake, Bingham, Blaine,
# Boise, Bonneville, Butte, Camas, Canyon, Caribou, Cassia, Clark,
# Custer, Elmore, Franklin, Fremont, Gem, Gooding, Jefferson, Jerome,
# Lemhi, Lincoln, Madison, Minidoka, Oneida, Owyhee, Payette, Power,
# Teton, Twin Falls, Valley, Washington counties, and the southern
# quarter of Idaho county) and eastern Oregon (most of Malheur County)
# switched four weeks late in 1974.
#
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Boise -7:44:49 - LMT 1883 Nov 18 12:15:11
-8:00 US P%sT 1923 May 13 2:00
-7:00 US M%sT 1974
-7:00 - MST 1974 Feb 3 2:00
-7:00 US M%sT
# Indiana
#
# For a map of Indiana's time zone regions, see:
# <a href="http://www.mccsc.edu/time.html">
# What time is it in Indiana?
# </a> (2006-03-01)
#
# From Paul Eggert (2007-08-17):
# Since 1970, most of Indiana has been like America/Indiana/Indianapolis,
# with the following exceptions:
#
# - Gibson, Jasper, Lake, LaPorte, Newton, Porter, Posey, Spencer,
# Vandenburgh, and Warrick counties have been like America/Chicago.
#
# - Dearborn and Ohio counties have been like America/New_York.
#
# - Clark, Floyd, and Harrison counties have been like
# America/Kentucky/Louisville.
#
# - Crawford, Daviess, Dubois, Knox, Martin, Perry, Pike, Pulaski, Starke,
# and Switzerland counties have their own time zone histories as noted below.
#
# Shanks partitioned Indiana into 345 regions, each with its own time history,
# and wrote ``Even newspaper reports present contradictory information.''
# Those Hoosiers! Such a flighty and changeable people!
# Fortunately, most of the complexity occurred before our cutoff date of 1970.
#
# Other than Indianapolis, the Indiana place names are so nondescript
# that they would be ambiguous if we left them at the `America' level.
# So we reluctantly put them all in a subdirectory `America/Indiana'.
# From Paul Eggert (2005-08-16):
# http://www.mccsc.edu/time.html says that Indiana will use DST starting 2006.
# From Nathan Stratton Treadway (2006-03-30):
# http://www.dot.gov/affairs/dot0406.htm [3705 B]
# From Deborah Goldsmith (2006-01-18):
# http://dmses.dot.gov/docimages/pdf95/382329_web.pdf [2.9 MB]
# From Paul Eggert (2006-01-20):
# It says "DOT is relocating the time zone boundary in Indiana to move Starke,
# Pulaski, Knox, Daviess, Martin, Pike, Dubois, and Perry Counties from the
# Eastern Time Zone to the Central Time Zone.... The effective date of
# this rule is 2:OO a.m. EST Sunday, April 2, 2006, which is the
# changeover date from standard time to Daylight Saving Time."
# Strictly speaking, this means the affected counties will change their
# clocks twice that night, but this obviously is in error. The intent
# is that 01:59:59 EST be followed by 02:00:00 CDT.
# From Gwillim Law (2007-02-10):
# The Associated Press has been reporting that Pulaski County, Indiana is
# going to switch from Central to Eastern Time on March 11, 2007....
# http://www.indystar.com/apps/pbcs.dll/article?AID=/20070207/LOCAL190108/702070524/0/LOCAL
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER
Rule Indianapolis 1941 only - Jun 22 2:00 1:00 D
Rule Indianapolis 1941 1954 - Sep lastSun 2:00 0 S
Rule Indianapolis 1946 1954 - Apr lastSun 2:00 1:00 D
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Indiana/Indianapolis -5:44:38 - LMT 1883 Nov 18 12:15:22
-6:00 US C%sT 1920
-6:00 Indianapolis C%sT 1942
-6:00 US C%sT 1946
-6:00 Indianapolis C%sT 1955 Apr 24 2:00
-5:00 - EST 1957 Sep 29 2:00
-6:00 - CST 1958 Apr 27 2:00
-5:00 - EST 1969
-5:00 US E%sT 1971
-5:00 - EST 2006
-5:00 US E%sT
#
# Eastern Crawford County, Indiana, left its clocks alone in 1974,
# as well as from 1976 through 2005.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER
Rule Marengo 1951 only - Apr lastSun 2:00 1:00 D
Rule Marengo 1951 only - Sep lastSun 2:00 0 S
Rule Marengo 1954 1960 - Apr lastSun 2:00 1:00 D
Rule Marengo 1954 1960 - Sep lastSun 2:00 0 S
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Indiana/Marengo -5:45:23 - LMT 1883 Nov 18 12:14:37
-6:00 US C%sT 1951
-6:00 Marengo C%sT 1961 Apr 30 2:00
-5:00 - EST 1969
-5:00 US E%sT 1974 Jan 6 2:00
-6:00 1:00 CDT 1974 Oct 27 2:00
-5:00 US E%sT 1976
-5:00 - EST 2006
-5:00 US E%sT
#
# Daviess, Dubois, Knox, and Martin Counties, Indiana,
# switched from eastern to central time in April 2006, then switched back
# in November 2007.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER
Rule Vincennes 1946 only - Apr lastSun 2:00 1:00 D
Rule Vincennes 1946 only - Sep lastSun 2:00 0 S
Rule Vincennes 1953 1954 - Apr lastSun 2:00 1:00 D
Rule Vincennes 1953 1959 - Sep lastSun 2:00 0 S
Rule Vincennes 1955 only - May 1 0:00 1:00 D
Rule Vincennes 1956 1963 - Apr lastSun 2:00 1:00 D
Rule Vincennes 1960 only - Oct lastSun 2:00 0 S
Rule Vincennes 1961 only - Sep lastSun 2:00 0 S
Rule Vincennes 1962 1963 - Oct lastSun 2:00 0 S
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Indiana/Vincennes -5:50:07 - LMT 1883 Nov 18 12:09:53
-6:00 US C%sT 1946
-6:00 Vincennes C%sT 1964 Apr 26 2:00
-5:00 - EST 1969
-5:00 US E%sT 1971
-5:00 - EST 2006 Apr 2 2:00
-6:00 US C%sT 2007 Nov 4 2:00
-5:00 US E%sT
#
# Perry County, Indiana, switched from eastern to central time in April 2006.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER
Rule Perry 1946 only - Apr lastSun 2:00 1:00 D
Rule Perry 1946 only - Sep lastSun 2:00 0 S
Rule Perry 1953 1954 - Apr lastSun 2:00 1:00 D
Rule Perry 1953 1959 - Sep lastSun 2:00 0 S
Rule Perry 1955 only - May 1 0:00 1:00 D
Rule Perry 1956 1963 - Apr lastSun 2:00 1:00 D
Rule Perry 1960 only - Oct lastSun 2:00 0 S
Rule Perry 1961 only - Sep lastSun 2:00 0 S
Rule Perry 1962 1963 - Oct lastSun 2:00 0 S
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Indiana/Tell_City -5:47:03 - LMT 1883 Nov 18 12:12:57
-6:00 US C%sT 1946
-6:00 Perry C%sT 1964 Apr 26 2:00
-5:00 - EST 1969
-5:00 US E%sT 1971
-5:00 - EST 2006 Apr 2 2:00
-6:00 US C%sT
#
# Pike County, Indiana moved from central to eastern time in 1977,
# then switched back in 2006, then switched back again in 2007.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER
Rule Pike 1955 only - May 1 0:00 1:00 D
Rule Pike 1955 1960 - Sep lastSun 2:00 0 S
Rule Pike 1956 1964 - Apr lastSun 2:00 1:00 D
Rule Pike 1961 1964 - Oct lastSun 2:00 0 S
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Indiana/Petersburg -5:49:07 - LMT 1883 Nov 18 12:10:53
-6:00 US C%sT 1955
-6:00 Pike C%sT 1965 Apr 25 2:00
-5:00 - EST 1966 Oct 30 2:00
-6:00 US C%sT 1977 Oct 30 2:00
-5:00 - EST 2006 Apr 2 2:00
-6:00 US C%sT 2007 Nov 4 2:00
-5:00 US E%sT
#
# Starke County, Indiana moved from central to eastern time in 1991,
# then switched back in 2006.
# From Arthur David Olson (1991-10-28):
# An article on page A3 of the Sunday, 1991-10-27 Washington Post
# notes that Starke County switched from Central time to Eastern time as of
# 1991-10-27.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER
Rule Starke 1947 1961 - Apr lastSun 2:00 1:00 D
Rule Starke 1947 1954 - Sep lastSun 2:00 0 S
Rule Starke 1955 1956 - Oct lastSun 2:00 0 S
Rule Starke 1957 1958 - Sep lastSun 2:00 0 S
Rule Starke 1959 1961 - Oct lastSun 2:00 0 S
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Indiana/Knox -5:46:30 - LMT 1883 Nov 18 12:13:30
-6:00 US C%sT 1947
-6:00 Starke C%sT 1962 Apr 29 2:00
-5:00 - EST 1963 Oct 27 2:00
-6:00 US C%sT 1991 Oct 27 2:00
-5:00 - EST 2006 Apr 2 2:00
-6:00 US C%sT
#
# Pulaski County, Indiana, switched from eastern to central time in
# April 2006 and then switched back in March 2007.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER
Rule Pulaski 1946 1960 - Apr lastSun 2:00 1:00 D
Rule Pulaski 1946 1954 - Sep lastSun 2:00 0 S
Rule Pulaski 1955 1956 - Oct lastSun 2:00 0 S
Rule Pulaski 1957 1960 - Sep lastSun 2:00 0 S
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Indiana/Winamac -5:46:25 - LMT 1883 Nov 18 12:13:35
-6:00 US C%sT 1946
-6:00 Pulaski C%sT 1961 Apr 30 2:00
-5:00 - EST 1969
-5:00 US E%sT 1971
-5:00 - EST 2006 Apr 2 2:00
-6:00 US C%sT 2007 Mar 11 2:00
-5:00 US E%sT
#
# Switzerland County, Indiana, did not observe DST from 1973 through 2005.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Indiana/Vevay -5:40:16 - LMT 1883 Nov 18 12:19:44
-6:00 US C%sT 1954 Apr 25 2:00
-5:00 - EST 1969
-5:00 US E%sT 1973
-5:00 - EST 2006
-5:00 US E%sT
# Part of Kentucky left its clocks alone in 1974.
# This also includes Clark, Floyd, and Harrison counties in Indiana.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER
Rule Louisville 1921 only - May 1 2:00 1:00 D
Rule Louisville 1921 only - Sep 1 2:00 0 S
Rule Louisville 1941 1961 - Apr lastSun 2:00 1:00 D
Rule Louisville 1941 only - Sep lastSun 2:00 0 S
Rule Louisville 1946 only - Jun 2 2:00 0 S
Rule Louisville 1950 1955 - Sep lastSun 2:00 0 S
Rule Louisville 1956 1960 - Oct lastSun 2:00 0 S
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Kentucky/Louisville -5:43:02 - LMT 1883 Nov 18 12:16:58
-6:00 US C%sT 1921
-6:00 Louisville C%sT 1942
-6:00 US C%sT 1946
-6:00 Louisville C%sT 1961 Jul 23 2:00
-5:00 - EST 1968
-5:00 US E%sT 1974 Jan 6 2:00
-6:00 1:00 CDT 1974 Oct 27 2:00
-5:00 US E%sT
#
# Wayne County, Kentucky
#
# From
# <a href="http://www.lake-cumberland.com/life/archive/news990129time.shtml">
# Lake Cumberland LIFE
# </a> (1999-01-29) via WKYM-101.7:
# Clinton County has joined Wayne County in asking the DoT to change from
# the Central to the Eastern time zone.... The Wayne County government made
# the same request in December. And while Russell County officials have not
# taken action, the majority of respondents to a poll conducted there in
# August indicated they would like to change to "fast time" also.
# The three Lake Cumberland counties are the farthest east of any U.S.
# location in the Central time zone.
#
# From Rich Wales (2000-08-29):
# After prolonged debate, and despite continuing deep differences of opinion,
# Wayne County (central Kentucky) is switching from Central (-0600) to Eastern
# (-0500) time. They won't "fall back" this year. See Sara Shipley,
# The difference an hour makes, Nando Times (2000-08-29 15:33 -0400).
#
# From Paul Eggert (2001-07-16):
# The final rule was published in the
# <a href="http://frwebgate.access.gpo.gov/cgi-bin/getdoc.cgi?dbname=2000_register&docid=fr17au00-22">
# Federal Register 65, 160 (2000-08-17), page 50154-50158.
# </a>
#
Zone America/Kentucky/Monticello -5:39:24 - LMT 1883 Nov 18 12:20:36
-6:00 US C%sT 1946
-6:00 - CST 1968
-6:00 US C%sT 2000 Oct 29 2:00
-5:00 US E%sT
# From Rives McDow (2000-08-30):
# Here ... are all the changes in the US since 1985.
# Kearny County, KS (put all of county on central;
# previously split between MST and CST) ... 1990-10
# Starke County, IN (from CST to EST) ... 1991-10
# Oliver County, ND (from MST to CST) ... 1992-10
# West Wendover, NV (from PST TO MST) ... 1999-10
# Wayne County, KY (from CST to EST) ... 2000-10
#
# From Paul Eggert (2001-07-17):
# We don't know where the line used to be within Kearny County, KS,
# so omit that change for now.
# See America/Indiana/Knox for the Starke County, IN change.
# See America/North_Dakota/Center for the Oliver County, ND change.
# West Wendover, NV officially switched from Pacific to mountain time on
# 1999-10-31. See the
# <a href="http://frwebgate.access.gpo.gov/cgi-bin/getdoc.cgi?dbname=1999_register&docid=fr21oc99-15">
# Federal Register 64, 203 (1999-10-21), page 56705-56707.
# </a>
# However, the Federal Register says that West Wendover already operated
# on mountain time, and the rule merely made this official;
# hence a separate tz entry is not needed.
# Michigan
#
# From Bob Devine (1988-01-28):
# Michigan didn't observe DST from 1968 to 1973.
#
# From Paul Eggert (1999-03-31):
# Shanks writes that Michigan started using standard time on 1885-09-18,
# but Howse writes (pp 124-125, referring to Popular Astronomy, 1901-01)
# that Detroit kept
#
# local time until 1900 when the City Council decreed that clocks should
# be put back twenty-eight minutes to Central Standard Time. Half the
# city obeyed, half refused. After considerable debate, the decision
# was rescinded and the city reverted to Sun time. A derisive offer to
# erect a sundial in front of the city hall was referred to the
# Committee on Sewers. Then, in 1905, Central time was adopted
# by city vote.
#
# This story is too entertaining to be false, so go with Howse over Shanks.
#
# From Paul Eggert (2001-03-06):
# Garland (1927) writes ``Cleveland and Detroit advanced their clocks
# one hour in 1914.'' This change is not in Shanks. We have no more
# info, so omit this for now.
#
# Most of Michigan observed DST from 1973 on, but was a bit late in 1975.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER
Rule Detroit 1948 only - Apr lastSun 2:00 1:00 D
Rule Detroit 1948 only - Sep lastSun 2:00 0 S
Rule Detroit 1967 only - Jun 14 2:00 1:00 D
Rule Detroit 1967 only - Oct lastSun 2:00 0 S
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Detroit -5:32:11 - LMT 1905
-6:00 - CST 1915 May 15 2:00
-5:00 - EST 1942
-5:00 US E%sT 1946
-5:00 Detroit E%sT 1973
-5:00 US E%sT 1975
-5:00 - EST 1975 Apr 27 2:00
-5:00 US E%sT
#
# Dickinson, Gogebic, Iron, and Menominee Counties, Michigan,
# switched from EST to CST/CDT in 1973.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER
Rule Menominee 1946 only - Apr lastSun 2:00 1:00 D
Rule Menominee 1946 only - Sep lastSun 2:00 0 S
Rule Menominee 1966 only - Apr lastSun 2:00 1:00 D
Rule Menominee 1966 only - Oct lastSun 2:00 0 S
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Menominee -5:50:27 - LMT 1885 Sep 18 12:00
-6:00 US C%sT 1946
-6:00 Menominee C%sT 1969 Apr 27 2:00
-5:00 - EST 1973 Apr 29 2:00
-6:00 US C%sT
# Navassa
# administered by the US Fish and Wildlife Service
# claimed by US under the provisions of the 1856 Guano Islands Act
# also claimed by Haiti
# occupied 1857/1900 by the Navassa Phosphate Co
# US lighthouse 1917/1996-09
# currently uninhabited
# see Mark Fineman, ``An Isle Rich in Guano and Discord'',
# _Los Angeles Times_ (1998-11-10), A1, A10; it cites
# Jimmy Skaggs, _The Great Guano Rush_ (1994).
################################################################################
# From Paul Eggert (2006-03-22):
# A good source for time zone historical data outside the U.S. is
# Thomas G. Shanks and Rique Pottenger, The International Atlas (6th edition),
# San Diego: ACS Publications, Inc. (2003).
#
# Gwillim Law writes that a good source
# for recent time zone data is the International Air Transport
# Association's Standard Schedules Information Manual (IATA SSIM),
# published semiannually. Law sent in several helpful summaries
# of the IATA's data after 1990.
#
# Except where otherwise noted, Shanks & Pottenger is the source for
# entries through 1990, and IATA SSIM is the source for entries afterwards.
#
# Other sources occasionally used include:
#
# Edward W. Whitman, World Time Differences,
# Whitman Publishing Co, 2 Niagara Av, Ealing, London (undated),
# which I found in the UCLA library.
#
# <a href="http://www.pettswoodvillage.co.uk/Daylight_Savings_William_Willett.pdf">
# William Willett, The Waste of Daylight, 19th edition
# </a> (1914-03)
#
# See the `europe' file for Greenland.
# Canada
# From Alain LaBont<e'> (1994-11-14):
# I post here the time zone abbreviations standardized in Canada
# for both English and French in the CAN/CSA-Z234.4-89 standard....
#
# UTC Standard time Daylight savings time
# offset French English French English
# -2:30 - - HAT NDT
# -3 - - HAA ADT
# -3:30 HNT NST - -
# -4 HNA AST HAE EDT
# -5 HNE EST HAC CDT
# -6 HNC CST HAR MDT
# -7 HNR MST HAP PDT
# -8 HNP PST HAY YDT
# -9 HNY YST - -
#
# HN: Heure Normale ST: Standard Time
# HA: Heure Avanc<e'>e DT: Daylight saving Time
#
# A: de l'Atlantique Atlantic
# C: du Centre Central
# E: de l'Est Eastern
# M: Mountain
# N: Newfoundland
# P: du Pacifique Pacific
# R: des Rocheuses
# T: de Terre-Neuve
# Y: du Yukon Yukon
#
# From Paul Eggert (1994-11-22):
# Alas, this sort of thing must be handled by localization software.
# Unless otherwise specified, the data for Canada are all from Shanks
# & Pottenger.
# From Chris Walton (2006-04-01, 2006-04-25, 2006-06-26, 2007-01-31,
# 2007-03-01):
# The British Columbia government announced yesterday that it will
# adjust daylight savings next year to align with changes in the
# U.S. and the rest of Canada....
# http://www2.news.gov.bc.ca/news_releases_2005-2009/2006AG0014-000330.htm
# ...
# Nova Scotia
# Daylight saving time will be extended by four weeks starting in 2007....
# http://www.gov.ns.ca/just/regulations/rg2/2006/ma1206.pdf
#
# [For New Brunswick] the new legislation dictates that the time change is to
# be done at 02:00 instead of 00:01.
# http://www.gnb.ca/0062/acts/BBA-2006/Chap-19.pdf
# ...
# Manitoba has traditionally changed the clock every fall at 03:00.
# As of 2006, the transition is to take place one hour earlier at 02:00.
# http://web2.gov.mb.ca/laws/statutes/ccsm/o030e.php
# ...
# [Alberta, Ontario, Quebec] will follow US rules.
# http://www.qp.gov.ab.ca/documents/spring/CH03_06.CFM
# http://www.e-laws.gov.on.ca/DBLaws/Source/Regs/English/2006/R06111_e.htm
# http://www2.publicationsduquebec.gouv.qc.ca/dynamicSearch/telecharge.php?type=5&file=2006C39A.PDF
# ...
# P.E.I. will follow US rules....
# http://www.assembly.pe.ca/bills/pdf_chapter/62/3/chapter-41.pdf
# ...
# Province of Newfoundland and Labrador....
# http://www.hoa.gov.nl.ca/hoa/bills/Bill0634.htm
# ...
# Yukon
# http://www.gov.yk.ca/legislation/regs/oic2006_127.pdf
# ...
# N.W.T. will follow US rules. Whoever maintains the government web site
# does not seem to believe in bookmarks. To see the news release, click the
# following link and search for "Daylight Savings Time Change". Press the
# "Daylight Savings Time Change" link; it will fire off a popup using
# JavaScript.
# http://www.exec.gov.nt.ca/currentnews/currentPR.asp?mode=archive
# ...
# Nunavut
# An amendment to the Interpretation Act was registered on February 19/2007....
# http://action.attavik.ca/home/justice-gn/attach/2007/gaz02part2.pdf
# From Paul Eggert (2006-04-25):
# H. David Matthews and Mary Vincent's map
# <a href="http://www.canadiangeographic.ca/Magazine/SO98/geomap.asp">
# "It's about TIME", _Canadian Geographic_ (September-October 1998)
# </a> contains detailed boundaries for regions observing nonstandard
# time and daylight saving time arrangements in Canada circa 1998.
#
# INMS, the Institute for National Measurement Standards in Ottawa, has <a
# href="http://inms-ienm.nrc-cnrc.gc.ca/en/time_services/daylight_saving_e.php">
# information about standard and daylight saving time zones in Canada.
# </a> (updated periodically).
# Its unofficial information is often taken from Matthews and Vincent.
# From Paul Eggert (2006-06-27):
# For now, assume all of DST-observing Canada will fall into line with the
# new US DST rules,
# From Chris Walton (2011-12-01)
# In the first of Tammy Hardwick's articles
# <a href="http://www.ilovecreston.com/?p=articles&t=spec&ar=260">
# http://www.ilovecreston.com/?p=articles&t=spec&ar=260
# </a>
# she quotes the Friday November 1/1918 edition of the Creston Review.
# The quote includes these two statements:
# 'Sunday the CPR went back to the old system of time...'
# '... The daylight saving scheme was dropped all over Canada at the same time,'
# These statements refer to a transition from daylight time to standard time
# that occurred nationally on Sunday October 27/1918. This transition was
# also documented in the Saturday October 26/1918 edition of the Toronto Star.
# In light of that evidence, we alter the date from the earlier believed
# Oct 31, to Oct 27, 1918 (and Sunday is a more likely transition day
# than Thursday) in all Canadian rulesets.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Canada 1918 only - Apr 14 2:00 1:00 D
Rule Canada 1918 only - Oct 27 2:00 0 S
Rule Canada 1942 only - Feb 9 2:00 1:00 W # War
Rule Canada 1945 only - Aug 14 23:00u 1:00 P # Peace
Rule Canada 1945 only - Sep 30 2:00 0 S
Rule Canada 1974 1986 - Apr lastSun 2:00 1:00 D
Rule Canada 1974 2006 - Oct lastSun 2:00 0 S
Rule Canada 1987 2006 - Apr Sun>=1 2:00 1:00 D
Rule Canada 2007 max - Mar Sun>=8 2:00 1:00 D
Rule Canada 2007 max - Nov Sun>=1 2:00 0 S
# Newfoundland and Labrador
# From Paul Eggert (2000-10-02):
# Matthews and Vincent (1998) write that Labrador should use NST/NDT,
# but the only part of Labrador that follows the rules is the
# southeast corner, including Port Hope Simpson and Mary's Harbour,
# but excluding, say, Black Tickle.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule StJohns 1917 only - Apr 8 2:00 1:00 D
Rule StJohns 1917 only - Sep 17 2:00 0 S
# Whitman gives 1919 Apr 5 and 1920 Apr 5; go with Shanks & Pottenger.
Rule StJohns 1919 only - May 5 23:00 1:00 D
Rule StJohns 1919 only - Aug 12 23:00 0 S
# For 1931-1935 Whitman gives Apr same date; go with Shanks & Pottenger.
Rule StJohns 1920 1935 - May Sun>=1 23:00 1:00 D
Rule StJohns 1920 1935 - Oct lastSun 23:00 0 S
# For 1936-1941 Whitman gives May Sun>=8 and Oct Sun>=1; go with Shanks &
# Pottenger.
Rule StJohns 1936 1941 - May Mon>=9 0:00 1:00 D
Rule StJohns 1936 1941 - Oct Mon>=2 0:00 0 S
# Whitman gives the following transitions:
# 1942 03-01/12-31, 1943 05-30/09-05, 1944 07-10/09-02, 1945 01-01/10-07
# but go with Shanks & Pottenger and assume they used Canadian rules.
# For 1946-9 Whitman gives May 5,4,9,1 - Oct 1,5,3,2, and for 1950 he gives
# Apr 30 - Sep 24; go with Shanks & Pottenger.
Rule StJohns 1946 1950 - May Sun>=8 2:00 1:00 D
Rule StJohns 1946 1950 - Oct Sun>=2 2:00 0 S
Rule StJohns 1951 1986 - Apr lastSun 2:00 1:00 D
Rule StJohns 1951 1959 - Sep lastSun 2:00 0 S
Rule StJohns 1960 1986 - Oct lastSun 2:00 0 S
# From Paul Eggert (2000-10-02):
# INMS (2000-09-12) says that, since 1988 at least, Newfoundland switches
# at 00:01 local time. For now, assume it started in 1987.
# From Michael Pelley (2011-09-12):
# We received today, Monday, September 12, 2011, notification that the
# changes to the Newfoundland Standard Time Act have been proclaimed.
# The change in the Act stipulates that the change from Daylight Savings
# Time to Standard Time and from Standard Time to Daylight Savings Time
# now occurs at 2:00AM.
# ...
# <a href="http://www.assembly.nl.ca/legislation/sr/annualstatutes/2011/1106.chp.htm">
# http://www.assembly.nl.ca/legislation/sr/annualstatutes/2011/1106.chp.htm
# </a>
# ...
# MICHAEL PELLEY | Manager of Enterprise Architecture - Solution Delivery
# Office of the Chief Information Officer
# Executive Council
# Government of Newfoundland & Labrador
Rule StJohns 1987 only - Apr Sun>=1 0:01 1:00 D
Rule StJohns 1987 2006 - Oct lastSun 0:01 0 S
Rule StJohns 1988 only - Apr Sun>=1 0:01 2:00 DD
Rule StJohns 1989 2006 - Apr Sun>=1 0:01 1:00 D
Rule StJohns 2007 2011 - Mar Sun>=8 0:01 1:00 D
Rule StJohns 2007 2010 - Nov Sun>=1 0:01 0 S
#
# St John's has an apostrophe, but Posix file names can't have apostrophes.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/St_Johns -3:30:52 - LMT 1884
-3:30:52 StJohns N%sT 1918
-3:30:52 Canada N%sT 1919
-3:30:52 StJohns N%sT 1935 Mar 30
-3:30 StJohns N%sT 1942 May 11
-3:30 Canada N%sT 1946
-3:30 StJohns N%sT 2011 Nov
-3:30 Canada N%sT
# most of east Labrador
# The name `Happy Valley-Goose Bay' is too long; use `Goose Bay'.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Goose_Bay -4:01:40 - LMT 1884 # Happy Valley-Goose Bay
-3:30:52 - NST 1918
-3:30:52 Canada N%sT 1919
-3:30:52 - NST 1935 Mar 30
-3:30 - NST 1936
-3:30 StJohns N%sT 1942 May 11
-3:30 Canada N%sT 1946
-3:30 StJohns N%sT 1966 Mar 15 2:00
-4:00 StJohns A%sT 2011 Nov
-4:00 Canada A%sT
# west Labrador, Nova Scotia, Prince Edward I
# From Paul Eggert (2006-03-22):
# Shanks & Pottenger write that since 1970 most of this region has been like
# Halifax. Many locales did not observe peacetime DST until 1972;
# Glace Bay, NS is the largest that we know of.
# Shanks & Pottenger also write that Liverpool, NS was the only town
# in Canada to observe DST in 1971 but not 1970; for now we'll assume
# this is a typo.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Halifax 1916 only - Apr 1 0:00 1:00 D
Rule Halifax 1916 only - Oct 1 0:00 0 S
Rule Halifax 1920 only - May 9 0:00 1:00 D
Rule Halifax 1920 only - Aug 29 0:00 0 S
Rule Halifax 1921 only - May 6 0:00 1:00 D
Rule Halifax 1921 1922 - Sep 5 0:00 0 S
Rule Halifax 1922 only - Apr 30 0:00 1:00 D
Rule Halifax 1923 1925 - May Sun>=1 0:00 1:00 D
Rule Halifax 1923 only - Sep 4 0:00 0 S
Rule Halifax 1924 only - Sep 15 0:00 0 S
Rule Halifax 1925 only - Sep 28 0:00 0 S
Rule Halifax 1926 only - May 16 0:00 1:00 D
Rule Halifax 1926 only - Sep 13 0:00 0 S
Rule Halifax 1927 only - May 1 0:00 1:00 D
Rule Halifax 1927 only - Sep 26 0:00 0 S
Rule Halifax 1928 1931 - May Sun>=8 0:00 1:00 D
Rule Halifax 1928 only - Sep 9 0:00 0 S
Rule Halifax 1929 only - Sep 3 0:00 0 S
Rule Halifax 1930 only - Sep 15 0:00 0 S
Rule Halifax 1931 1932 - Sep Mon>=24 0:00 0 S
Rule Halifax 1932 only - May 1 0:00 1:00 D
Rule Halifax 1933 only - Apr 30 0:00 1:00 D
Rule Halifax 1933 only - Oct 2 0:00 0 S
Rule Halifax 1934 only - May 20 0:00 1:00 D
Rule Halifax 1934 only - Sep 16 0:00 0 S
Rule Halifax 1935 only - Jun 2 0:00 1:00 D
Rule Halifax 1935 only - Sep 30 0:00 0 S
Rule Halifax 1936 only - Jun 1 0:00 1:00 D
Rule Halifax 1936 only - Sep 14 0:00 0 S
Rule Halifax 1937 1938 - May Sun>=1 0:00 1:00 D
Rule Halifax 1937 1941 - Sep Mon>=24 0:00 0 S
Rule Halifax 1939 only - May 28 0:00 1:00 D
Rule Halifax 1940 1941 - May Sun>=1 0:00 1:00 D
Rule Halifax 1946 1949 - Apr lastSun 2:00 1:00 D
Rule Halifax 1946 1949 - Sep lastSun 2:00 0 S
Rule Halifax 1951 1954 - Apr lastSun 2:00 1:00 D
Rule Halifax 1951 1954 - Sep lastSun 2:00 0 S
Rule Halifax 1956 1959 - Apr lastSun 2:00 1:00 D
Rule Halifax 1956 1959 - Sep lastSun 2:00 0 S
Rule Halifax 1962 1973 - Apr lastSun 2:00 1:00 D
Rule Halifax 1962 1973 - Oct lastSun 2:00 0 S
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Halifax -4:14:24 - LMT 1902 Jun 15
-4:00 Halifax A%sT 1918
-4:00 Canada A%sT 1919
-4:00 Halifax A%sT 1942 Feb 9 2:00s
-4:00 Canada A%sT 1946
-4:00 Halifax A%sT 1974
-4:00 Canada A%sT
Zone America/Glace_Bay -3:59:48 - LMT 1902 Jun 15
-4:00 Canada A%sT 1953
-4:00 Halifax A%sT 1954
-4:00 - AST 1972
-4:00 Halifax A%sT 1974
-4:00 Canada A%sT
# New Brunswick
# From Paul Eggert (2007-01-31):
# The Time Definition Act <http://www.gnb.ca/0062/PDF-acts/t-06.pdf>
# says they changed at 00:01 through 2006, and
# <http://www.canlii.org/nb/laws/sta/t-6/20030127/whole.html> makes it
# clear that this was the case since at least 1993.
# For now, assume it started in 1993.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Moncton 1933 1935 - Jun Sun>=8 1:00 1:00 D
Rule Moncton 1933 1935 - Sep Sun>=8 1:00 0 S
Rule Moncton 1936 1938 - Jun Sun>=1 1:00 1:00 D
Rule Moncton 1936 1938 - Sep Sun>=1 1:00 0 S
Rule Moncton 1939 only - May 27 1:00 1:00 D
Rule Moncton 1939 1941 - Sep Sat>=21 1:00 0 S
Rule Moncton 1940 only - May 19 1:00 1:00 D
Rule Moncton 1941 only - May 4 1:00 1:00 D
Rule Moncton 1946 1972 - Apr lastSun 2:00 1:00 D
Rule Moncton 1946 1956 - Sep lastSun 2:00 0 S
Rule Moncton 1957 1972 - Oct lastSun 2:00 0 S
Rule Moncton 1993 2006 - Apr Sun>=1 0:01 1:00 D
Rule Moncton 1993 2006 - Oct lastSun 0:01 0 S
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Moncton -4:19:08 - LMT 1883 Dec 9
-5:00 - EST 1902 Jun 15
-4:00 Canada A%sT 1933
-4:00 Moncton A%sT 1942
-4:00 Canada A%sT 1946
-4:00 Moncton A%sT 1973
-4:00 Canada A%sT 1993
-4:00 Moncton A%sT 2007
-4:00 Canada A%sT
# Quebec
# From Paul Eggert (2006-07-09):
# Shanks & Pottenger write that since 1970 most of Quebec has been
# like Montreal.
# From Paul Eggert (2006-06-27):
# Matthews and Vincent (1998) also write that Quebec east of the -63
# meridian is supposed to observe AST, but residents as far east as
# Natashquan use EST/EDT, and residents east of Natashquan use AST.
# In "Official time in Quebec" the Quebec department of justice writes in
# http://www.justice.gouv.qc.ca/english/publications/generale/temps-regl-1-a.htm
# that "The residents of the Municipality of the
# Cote-Nord-du-Golfe-Saint-Laurent and the municipalities of Saint-Augustin,
# Bonne-Esperance and Blanc-Sablon apply the Official Time Act as it is
# written and use Atlantic standard time all year round. The same applies to
# the residents of the Native facilities along the lower North Shore."
# <http://www.assnat.qc.ca/eng/37legislature2/Projets-loi/Publics/06-a002.htm>
# says this common practice was codified into law as of 2007.
# For lack of better info, guess this practice began around 1970, contra to
# Shanks & Pottenger who have this region observing AST/ADT.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Mont 1917 only - Mar 25 2:00 1:00 D
Rule Mont 1917 only - Apr 24 0:00 0 S
Rule Mont 1919 only - Mar 31 2:30 1:00 D
Rule Mont 1919 only - Oct 25 2:30 0 S
Rule Mont 1920 only - May 2 2:30 1:00 D
Rule Mont 1920 1922 - Oct Sun>=1 2:30 0 S
Rule Mont 1921 only - May 1 2:00 1:00 D
Rule Mont 1922 only - Apr 30 2:00 1:00 D
Rule Mont 1924 only - May 17 2:00 1:00 D
Rule Mont 1924 1926 - Sep lastSun 2:30 0 S
Rule Mont 1925 1926 - May Sun>=1 2:00 1:00 D
# The 1927-to-1937 rules can be expressed more simply as
# Rule Mont 1927 1937 - Apr lastSat 24:00 1:00 D
# Rule Mont 1927 1937 - Sep lastSat 24:00 0 S
# The rules below avoid use of 24:00
# (which pre-1998 versions of zic cannot handle).
Rule Mont 1927 only - May 1 0:00 1:00 D
Rule Mont 1927 1932 - Sep lastSun 0:00 0 S
Rule Mont 1928 1931 - Apr lastSun 0:00 1:00 D
Rule Mont 1932 only - May 1 0:00 1:00 D
Rule Mont 1933 1940 - Apr lastSun 0:00 1:00 D
Rule Mont 1933 only - Oct 1 0:00 0 S
Rule Mont 1934 1939 - Sep lastSun 0:00 0 S
Rule Mont 1946 1973 - Apr lastSun 2:00 1:00 D
Rule Mont 1945 1948 - Sep lastSun 2:00 0 S
Rule Mont 1949 1950 - Oct lastSun 2:00 0 S
Rule Mont 1951 1956 - Sep lastSun 2:00 0 S
Rule Mont 1957 1973 - Oct lastSun 2:00 0 S
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Blanc-Sablon -3:48:28 - LMT 1884
-4:00 Canada A%sT 1970
-4:00 - AST
Zone America/Montreal -4:54:16 - LMT 1884
-5:00 Mont E%sT 1918
-5:00 Canada E%sT 1919
-5:00 Mont E%sT 1942 Feb 9 2:00s
-5:00 Canada E%sT 1946
-5:00 Mont E%sT 1974
-5:00 Canada E%sT
# Ontario
# From Paul Eggert (2006-07-09):
# Shanks & Pottenger write that since 1970 most of Ontario has been like
# Toronto.
# Thunder Bay skipped DST in 1973.
# Many smaller locales did not observe peacetime DST until 1974;
# Nipigon (EST) and Rainy River (CST) are the largest that we know of.
# Far west Ontario is like Winnipeg; far east Quebec is like Halifax.
# From Mark Brader (2003-07-26):
# [According to the Toronto Star] Orillia, Ontario, adopted DST
# effective Saturday, 1912-06-22, 22:00; the article mentions that
# Port Arthur (now part of Thunder Bay, Ontario) as well as Moose Jaw
# have already done so. In Orillia DST was to run until Saturday,
# 1912-08-31 (no time mentioned), but it was met with considerable
# hostility from certain segments of the public, and was revoked after
# only two weeks -- I copied it as Saturday, 1912-07-07, 22:00, but
# presumably that should be -07-06. (1912-06-19, -07-12; also letters
# earlier in June).
#
# Kenora, Ontario, was to abandon DST on 1914-06-01 (-05-21).
# From Paul Eggert (1997-10-17):
# Mark Brader writes that an article in the 1997-10-14 Toronto Star
# says that Atikokan, Ontario currently does not observe DST,
# but will vote on 11-10 whether to use EST/EDT.
# He also writes that the
# <a href="http://www.gov.on.ca/MBS/english/publications/statregs/conttext.html">
# Ontario Time Act (1990, Chapter T.9)
# </a>
# says that Ontario east of 90W uses EST/EDT, and west of 90W uses CST/CDT.
# Officially Atikokan is therefore on CST/CDT, and most likely this report
# concerns a non-official time observed as a matter of local practice.
#
# From Paul Eggert (2000-10-02):
# Matthews and Vincent (1998) write that Atikokan, Pickle Lake, and
# New Osnaburgh observe CST all year, that Big Trout Lake observes
# CST/CDT, and that Upsala and Shebandowan observe EST/EDT, all in
# violation of the official Ontario rules.
#
# From Paul Eggert (2006-07-09):
# Chris Walton (2006-07-06) mentioned an article by Stephanie MacLellan in the
# 2005-07-21 Chronicle-Journal, which said:
#
# The clocks in Atikokan stay set on standard time year-round.
# This means they spend about half the time on central time and
# the other half on eastern time.
#
# For the most part, the system works, Mayor Dennis Brown said.
#
# "The majority of businesses in Atikokan deal more with Eastern
# Canada, but there are some that deal with Western Canada," he
# said. "I don't see any changes happening here."
#
# Walton also writes "Supposedly Pickle Lake and Mishkeegogamang
# [New Osnaburgh] follow the same practice."
# From Garry McKinnon (2006-07-14) via Chris Walton:
# I chatted with a member of my board who has an outstanding memory
# and a long history in Atikokan (and in the telecom industry) and he
# can say for certain that Atikokan has been practicing the current
# time keeping since 1952, at least.
# From Paul Eggert (2006-07-17):
# Shanks & Pottenger say that Atikokan has agreed with Rainy River
# ever since standard time was introduced, but the information from
# McKinnon sounds more authoritative. For now, assume that Atikokan
# switched to EST immediately after WWII era daylight saving time
# ended. This matches the old (less-populous) America/Coral_Harbour
# entry since our cutoff date of 1970, so we can move
# America/Coral_Harbour to the 'backward' file.
# From Mark Brader (2010-03-06):
#
# Currently the database has:
#
# # Ontario
#
# # From Paul Eggert (2006-07-09):
# # Shanks & Pottenger write that since 1970 most of Ontario has been like
# # Toronto.
# # Thunder Bay skipped DST in 1973.
# # Many smaller locales did not observe peacetime DST until 1974;
# # Nipigon (EST) and Rainy River (CST) are the largest that we know of.
#
# In the (Toronto) Globe and Mail for Saturday, 1955-09-24, in the bottom
# right corner of page 1, it says that Toronto will return to standard
# time at 2 am Sunday morning (which agrees with the database), and that:
#
# The one-hour setback will go into effect throughout most of Ontario,
# except in areas like Windsor which remains on standard time all year.
#
# Windsor is, of course, a lot larger than Nipigon.
#
# I only came across this incidentally. I don't know if Windsor began
# observing DST when Detroit did, or in 1974, or on some other date.
#
# By the way, the article continues by noting that:
#
# Some cities in the United States have pushed the deadline back
# three weeks and will change over from daylight saving in October.
# From Arthur David Olson (2010-07-17):
#
# "Standard Time and Time Zones in Canada" appeared in
# The Journal of The Royal Astronomical Society of Canada,
# volume 26, number 2 (February 1932) and, as of 2010-07-17,
# was available at
# <a href="http://adsabs.harvard.edu/full/1932JRASC..26...49S">
# http://adsabs.harvard.edu/full/1932JRASC..26...49S
# </a>
#
# It includes the text below (starting on page 57):
#
# A list of the places in Canada using daylight saving time would
# require yearly revision. From information kindly furnished by
# the provincial governments and by the postmasters in many cities
# and towns, it is found that the following places used daylight sav-
# ing in 1930. The information for the province of Quebec is definite,
# for the other provinces only approximate:
#
# Province Daylight saving time used
# Prince Edward Island Not used.
# Nova Scotia In Halifax only.
# New Brunswick In St. John only.
# Quebec In the following places:
# Montreal Lachine
# Quebec Mont-Royal
# Levis Iberville
# St. Lambert Cap de la Madeleine
# Verdun Loretteville
# Westmount Richmond
# Outremont St. Jerome
# Longueuil Greenfield Park
# Arvida Waterloo
# Chambly-Canton Beaulieu
# Melbourne La Tuque
# St. Theophile Buckingham
# Ontario Used generally in the cities and towns along
# the southerly part of the province. Not
# used in the northwesterlhy part.
# Manitoba Not used.
# Saskatchewan In Regina only.
# Alberta Not used.
# British Columbia Not used.
#
# With some exceptions, the use of daylight saving may be said to be limited
# to those cities and towns lying between Quebec city and Windsor, Ont.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Toronto 1919 only - Mar 30 23:30 1:00 D
Rule Toronto 1919 only - Oct 26 0:00 0 S
Rule Toronto 1920 only - May 2 2:00 1:00 D
Rule Toronto 1920 only - Sep 26 0:00 0 S
Rule Toronto 1921 only - May 15 2:00 1:00 D
Rule Toronto 1921 only - Sep 15 2:00 0 S
Rule Toronto 1922 1923 - May Sun>=8 2:00 1:00 D
# Shanks & Pottenger say 1923-09-19; assume it's a typo and that "-16"
# was meant.
Rule Toronto 1922 1926 - Sep Sun>=15 2:00 0 S
Rule Toronto 1924 1927 - May Sun>=1 2:00 1:00 D
# The 1927-to-1939 rules can be expressed more simply as
# Rule Toronto 1927 1937 - Sep Sun>=25 2:00 0 S
# Rule Toronto 1928 1937 - Apr Sun>=25 2:00 1:00 D
# Rule Toronto 1938 1940 - Apr lastSun 2:00 1:00 D
# Rule Toronto 1938 1939 - Sep lastSun 2:00 0 S
# The rules below avoid use of Sun>=25
# (which pre-2004 versions of zic cannot handle).
Rule Toronto 1927 1932 - Sep lastSun 2:00 0 S
Rule Toronto 1928 1931 - Apr lastSun 2:00 1:00 D
Rule Toronto 1932 only - May 1 2:00 1:00 D
Rule Toronto 1933 1940 - Apr lastSun 2:00 1:00 D
Rule Toronto 1933 only - Oct 1 2:00 0 S
Rule Toronto 1934 1939 - Sep lastSun 2:00 0 S
Rule Toronto 1945 1946 - Sep lastSun 2:00 0 S
Rule Toronto 1946 only - Apr lastSun 2:00 1:00 D
Rule Toronto 1947 1949 - Apr lastSun 0:00 1:00 D
Rule Toronto 1947 1948 - Sep lastSun 0:00 0 S
Rule Toronto 1949 only - Nov lastSun 0:00 0 S
Rule Toronto 1950 1973 - Apr lastSun 2:00 1:00 D
Rule Toronto 1950 only - Nov lastSun 2:00 0 S
Rule Toronto 1951 1956 - Sep lastSun 2:00 0 S
# Shanks & Pottenger say Toronto ended DST a week early in 1971,
# namely on 1971-10-24, but Mark Brader wrote (2003-05-31) that this
# is wrong, and that he had confirmed it by checking the 1971-10-30
# Toronto Star, which said that DST was ending 1971-10-31 as usual.
Rule Toronto 1957 1973 - Oct lastSun 2:00 0 S
# From Paul Eggert (2003-07-27):
# Willett (1914-03) writes (p. 17) "In the Cities of Fort William, and
# Port Arthur, Ontario, the principle of the Bill has been in
# operation for the past three years, and in the City of Moose Jaw,
# Saskatchewan, for one year."
# From David Bryan via Tory Tronrud, Director/Curator,
# Thunder Bay Museum (2003-11-12):
# There is some suggestion, however, that, by-law or not, daylight
# savings time was being practiced in Fort William and Port Arthur
# before 1909.... [I]n 1910, the line between the Eastern and Central
# Time Zones was permanently moved about two hundred miles west to
# include the Thunder Bay area.... When Canada adopted daylight
# savings time in 1916, Fort William and Port Arthur, having done so
# already, did not change their clocks.... During the Second World
# War,... [t]he cities agreed to implement DST during the summer
# months for the remainder of the war years.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Toronto -5:17:32 - LMT 1895
-5:00 Canada E%sT 1919
-5:00 Toronto E%sT 1942 Feb 9 2:00s
-5:00 Canada E%sT 1946
-5:00 Toronto E%sT 1974
-5:00 Canada E%sT
Zone America/Thunder_Bay -5:57:00 - LMT 1895
-6:00 - CST 1910
-5:00 - EST 1942
-5:00 Canada E%sT 1970
-5:00 Mont E%sT 1973
-5:00 - EST 1974
-5:00 Canada E%sT
Zone America/Nipigon -5:53:04 - LMT 1895
-5:00 Canada E%sT 1940 Sep 29
-5:00 1:00 EDT 1942 Feb 9 2:00s
-5:00 Canada E%sT
Zone America/Rainy_River -6:18:16 - LMT 1895
-6:00 Canada C%sT 1940 Sep 29
-6:00 1:00 CDT 1942 Feb 9 2:00s
-6:00 Canada C%sT
Zone America/Atikokan -6:06:28 - LMT 1895
-6:00 Canada C%sT 1940 Sep 29
-6:00 1:00 CDT 1942 Feb 9 2:00s
-6:00 Canada C%sT 1945 Sep 30 2:00
-5:00 - EST
# Manitoba
# From Rob Douglas (2006-04-06):
# the old Manitoba Time Act - as amended by Bill 2, assented to
# March 27, 1987 ... said ...
# "between two o'clock Central Standard Time in the morning of
# the first Sunday of April of each year and two o'clock Central
# Standard Time in the morning of the last Sunday of October next
# following, one hour in advance of Central Standard Time."...
# I believe that the English legislation [of the old time act] had =
# been assented to (March 22, 1967)....
# Also, as far as I can tell, there was no order-in-council varying
# the time of Daylight Saving Time for 2005 and so the provisions of
# the 1987 version would apply - the changeover was at 2:00 Central
# Standard Time (i.e. not until 3:00 Central Daylight Time).
# From Paul Eggert (2006-04-10):
# Shanks & Pottenger say Manitoba switched at 02:00 (not 02:00s)
# starting 1966. Since 02:00s is clearly correct for 1967 on, assume
# it was also 02:00s in 1966.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Winn 1916 only - Apr 23 0:00 1:00 D
Rule Winn 1916 only - Sep 17 0:00 0 S
Rule Winn 1918 only - Apr 14 2:00 1:00 D
Rule Winn 1918 only - Oct 27 2:00 0 S
Rule Winn 1937 only - May 16 2:00 1:00 D
Rule Winn 1937 only - Sep 26 2:00 0 S
Rule Winn 1942 only - Feb 9 2:00 1:00 W # War
Rule Winn 1945 only - Aug 14 23:00u 1:00 P # Peace
Rule Winn 1945 only - Sep lastSun 2:00 0 S
Rule Winn 1946 only - May 12 2:00 1:00 D
Rule Winn 1946 only - Oct 13 2:00 0 S
Rule Winn 1947 1949 - Apr lastSun 2:00 1:00 D
Rule Winn 1947 1949 - Sep lastSun 2:00 0 S
Rule Winn 1950 only - May 1 2:00 1:00 D
Rule Winn 1950 only - Sep 30 2:00 0 S
Rule Winn 1951 1960 - Apr lastSun 2:00 1:00 D
Rule Winn 1951 1958 - Sep lastSun 2:00 0 S
Rule Winn 1959 only - Oct lastSun 2:00 0 S
Rule Winn 1960 only - Sep lastSun 2:00 0 S
Rule Winn 1963 only - Apr lastSun 2:00 1:00 D
Rule Winn 1963 only - Sep 22 2:00 0 S
Rule Winn 1966 1986 - Apr lastSun 2:00s 1:00 D
Rule Winn 1966 2005 - Oct lastSun 2:00s 0 S
Rule Winn 1987 2005 - Apr Sun>=1 2:00s 1:00 D
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Winnipeg -6:28:36 - LMT 1887 Jul 16
-6:00 Winn C%sT 2006
-6:00 Canada C%sT
# Saskatchewan
# From Mark Brader (2003-07-26):
# The first actual adoption of DST in Canada was at the municipal
# level. As the [Toronto] Star put it (1912-06-07), "While people
# elsewhere have long been talking of legislation to save daylight,
# the city of Moose Jaw [Saskatchewan] has acted on its own hook."
# DST in Moose Jaw began on Saturday, 1912-06-01 (no time mentioned:
# presumably late evening, as below), and would run until "the end of
# the summer". The discrepancy between municipal time and railroad
# time was noted.
# From Paul Eggert (2003-07-27):
# Willett (1914-03) notes that DST "has been in operation ... in the
# City of Moose Jaw, Saskatchewan, for one year."
# From Paul Eggert (2006-03-22):
# Shanks & Pottenger say that since 1970 this region has mostly been as Regina.
# Some western towns (e.g. Swift Current) switched from MST/MDT to CST in 1972.
# Other western towns (e.g. Lloydminster) are like Edmonton.
# Matthews and Vincent (1998) write that Denare Beach and Creighton
# are like Winnipeg, in violation of Saskatchewan law.
# From W. Jones (1992-11-06):
# The. . .below is based on information I got from our law library, the
# provincial archives, and the provincial Community Services department.
# A precise history would require digging through newspaper archives, and
# since you didn't say what you wanted, I didn't bother.
#
# Saskatchewan is split by a time zone meridian (105W) and over the years
# the boundary became pretty ragged as communities near it reevaluated
# their affiliations in one direction or the other. In 1965 a provincial
# referendum favoured legislating common time practices.
#
# On 15 April 1966 the Time Act (c. T-14, Revised Statutes of
# Saskatchewan 1978) was proclaimed, and established that the eastern
# part of Saskatchewan would use CST year round, that districts in
# northwest Saskatchewan would by default follow CST but could opt to
# follow Mountain Time rules (thus 1 hour difference in the winter and
# zero in the summer), and that districts in southwest Saskatchewan would
# by default follow MT but could opt to follow CST.
#
# It took a few years for the dust to settle (I know one story of a town
# on one time zone having its school in another, such that a mom had to
# serve her family lunch in two shifts), but presently it seems that only
# a few towns on the border with Alberta (e.g. Lloydminster) follow MT
# rules any more; all other districts appear to have used CST year round
# since sometime in the 1960s.
# From Chris Walton (2006-06-26):
# The Saskatchewan time act which was last updated in 1996 is about 30 pages
# long and rather painful to read.
# http://www.qp.gov.sk.ca/documents/English/Statutes/Statutes/T14.pdf
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Regina 1918 only - Apr 14 2:00 1:00 D
Rule Regina 1918 only - Oct 27 2:00 0 S
Rule Regina 1930 1934 - May Sun>=1 0:00 1:00 D
Rule Regina 1930 1934 - Oct Sun>=1 0:00 0 S
Rule Regina 1937 1941 - Apr Sun>=8 0:00 1:00 D
Rule Regina 1937 only - Oct Sun>=8 0:00 0 S
Rule Regina 1938 only - Oct Sun>=1 0:00 0 S
Rule Regina 1939 1941 - Oct Sun>=8 0:00 0 S
Rule Regina 1942 only - Feb 9 2:00 1:00 W # War
Rule Regina 1945 only - Aug 14 23:00u 1:00 P # Peace
Rule Regina 1945 only - Sep lastSun 2:00 0 S
Rule Regina 1946 only - Apr Sun>=8 2:00 1:00 D
Rule Regina 1946 only - Oct Sun>=8 2:00 0 S
Rule Regina 1947 1957 - Apr lastSun 2:00 1:00 D
Rule Regina 1947 1957 - Sep lastSun 2:00 0 S
Rule Regina 1959 only - Apr lastSun 2:00 1:00 D
Rule Regina 1959 only - Oct lastSun 2:00 0 S
#
Rule Swift 1957 only - Apr lastSun 2:00 1:00 D
Rule Swift 1957 only - Oct lastSun 2:00 0 S
Rule Swift 1959 1961 - Apr lastSun 2:00 1:00 D
Rule Swift 1959 only - Oct lastSun 2:00 0 S
Rule Swift 1960 1961 - Sep lastSun 2:00 0 S
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Regina -6:58:36 - LMT 1905 Sep
-7:00 Regina M%sT 1960 Apr lastSun 2:00
-6:00 - CST
Zone America/Swift_Current -7:11:20 - LMT 1905 Sep
-7:00 Canada M%sT 1946 Apr lastSun 2:00
-7:00 Regina M%sT 1950
-7:00 Swift M%sT 1972 Apr lastSun 2:00
-6:00 - CST
# Alberta
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Edm 1918 1919 - Apr Sun>=8 2:00 1:00 D
Rule Edm 1918 only - Oct 27 2:00 0 S
Rule Edm 1919 only - May 27 2:00 0 S
Rule Edm 1920 1923 - Apr lastSun 2:00 1:00 D
Rule Edm 1920 only - Oct lastSun 2:00 0 S
Rule Edm 1921 1923 - Sep lastSun 2:00 0 S
Rule Edm 1942 only - Feb 9 2:00 1:00 W # War
Rule Edm 1945 only - Aug 14 23:00u 1:00 P # Peace
Rule Edm 1945 only - Sep lastSun 2:00 0 S
Rule Edm 1947 only - Apr lastSun 2:00 1:00 D
Rule Edm 1947 only - Sep lastSun 2:00 0 S
Rule Edm 1967 only - Apr lastSun 2:00 1:00 D
Rule Edm 1967 only - Oct lastSun 2:00 0 S
Rule Edm 1969 only - Apr lastSun 2:00 1:00 D
Rule Edm 1969 only - Oct lastSun 2:00 0 S
Rule Edm 1972 1986 - Apr lastSun 2:00 1:00 D
Rule Edm 1972 2006 - Oct lastSun 2:00 0 S
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Edmonton -7:33:52 - LMT 1906 Sep
-7:00 Edm M%sT 1987
-7:00 Canada M%sT
# British Columbia
# From Paul Eggert (2006-03-22):
# Shanks & Pottenger write that since 1970 most of this region has
# been like Vancouver.
# Dawson Creek uses MST. Much of east BC is like Edmonton.
# Matthews and Vincent (1998) write that Creston is like Dawson Creek.
# It seems though that (re: Creston) is not entirely correct:
# From Chris Walton (2011-12-01):
# There are two areas within the Canadian province of British Columbia
# that do not currently observe daylight saving:
# a) The Creston Valley (includes the town of Creston and surrounding area)
# b) The eastern half of the Peace River Regional District
# (includes the cities of Dawson Creek and Fort St. John)
# Earlier this year I stumbled across a detailed article about the time
# keeping history of Creston; it was written by Tammy Hardwick who is the
# manager of the Creston & District Museum. The article was written in May 2009.
# <a href="http://www.ilovecreston.com/?p=articles&t=spec&ar=260">
# http://www.ilovecreston.com/?p=articles&t=spec&ar=260
# </a>
# According to the article, Creston has not changed its clocks since June 1918.
# i.e. Creston has been stuck on UTC-7 for 93 years.
# Dawson Creek, on the other hand, changed its clocks as recently as April 1972.
# Unfortunately the exact date for the time change in June 1918 remains
# unknown and will be difficult to ascertain. I e-mailed Tammy a few months
# ago to ask if Sunday June 2 was a reasonable guess. She said it was just
# as plausible as any other date (in June). She also said that after writing the
# article she had discovered another time change in 1916; this is the subject
# of another article which she wrote in October 2010.
# <a href="http://www.creston.museum.bc.ca/index.php?module=comments&uop=view_comment&cm+id=56">
# http://www.creston.museum.bc.ca/index.php?module=comments&uop=view_comment&cm+id=56
# </a>
# Here is a summary of the three clock change events in Creston's history:
# 1. 1884 or 1885: adoption of Mountain Standard Time (GMT-7)
# Exact date unknown
# 2. Oct 1916: switch to Pacific Standard Time (GMT-8)
# Exact date in October unknown; Sunday October 1 is a reasonable guess.
# 3. June 1918: switch to Pacific Daylight Time (GMT-7)
# Exact date in June unknown; Sunday June 2 is a reasonable guess.
# note#1:
# On Oct 27/1918 when daylight saving ended in the rest of Canada,
# Creston did not change its clocks.
# note#2:
# During WWII when the Federal Government legislated a mandatory clock change,
# Creston did not oblige.
# note#3:
# There is no guarantee that Creston will remain on Mountain Standard Time
# (UTC-7) forever.
# The subject was debated at least once this year by the town Council.
# <a href="http://www.bclocalnews.com/kootenay_rockies/crestonvalleyadvance/news/116760809.html">
# http://www.bclocalnews.com/kootenay_rockies/crestonvalleyadvance/news/116760809.html
# </a>
# During a period WWII, summer time (Daylight saying) was mandatory in Canada.
# In Creston, that was handled by shifting the area to PST (-8:00) then applying
# summer time to cause the offset to be -7:00, the same as it had been before
# the change. It can be argued that the timezone abbreviation during this
# period should be PDT rather than MST, but that doesn't seem important enough
# (to anyone) to further complicate the rules.
# The transition dates (and times) are guesses.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Vanc 1918 only - Apr 14 2:00 1:00 D
Rule Vanc 1918 only - Oct 27 2:00 0 S
Rule Vanc 1942 only - Feb 9 2:00 1:00 W # War
Rule Vanc 1945 only - Aug 14 23:00u 1:00 P # Peace
Rule Vanc 1945 only - Sep 30 2:00 0 S
Rule Vanc 1946 1986 - Apr lastSun 2:00 1:00 D
Rule Vanc 1946 only - Oct 13 2:00 0 S
Rule Vanc 1947 1961 - Sep lastSun 2:00 0 S
Rule Vanc 1962 2006 - Oct lastSun 2:00 0 S
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Vancouver -8:12:28 - LMT 1884
-8:00 Vanc P%sT 1987
-8:00 Canada P%sT
Zone America/Dawson_Creek -8:00:56 - LMT 1884
-8:00 Canada P%sT 1947
-8:00 Vanc P%sT 1972 Aug 30 2:00
-7:00 - MST
Zone America/Creston -7:46:04 - LMT 1884
-7:00 - MST 1916 Oct 1
-8:00 - PST 1918 Jun 2
-7:00 - MST
# Northwest Territories, Nunavut, Yukon
# From Paul Eggert (2006-03-22):
# Dawson switched to PST in 1973. Inuvik switched to MST in 1979.
# Mathew Englander (1996-10-07) gives the following refs:
# * 1967. Paragraph 28(34)(g) of the Interpretation Act, S.C. 1967-68,
# c. 7 defines Yukon standard time as UTC-9. This is still valid;
# see Interpretation Act, R.S.C. 1985, c. I-21, s. 35(1).
# * C.O. 1973/214 switched Yukon to PST on 1973-10-28 00:00.
# * O.I.C. 1980/02 established DST.
# * O.I.C. 1987/056 changed DST to Apr firstSun 2:00 to Oct lastSun 2:00.
# Shanks & Pottenger say Yukon's 1973-10-28 switch was at 2:00; go
# with Englander.
# From Chris Walton (2006-06-26):
# Here is a link to the old daylight saving portion of the interpretation
# act which was last updated in 1987:
# http://www.gov.yk.ca/legislation/regs/oic1987_056.pdf
# From Rives McDow (1999-09-04):
# Nunavut ... moved ... to incorporate the whole territory into one time zone.
# <a href="http://www.nunatsiaq.com/nunavut/nvt90903_13.html">
# Nunavut moves to single time zone Oct. 31
# </a>
#
# From Antoine Leca (1999-09-06):
# We then need to create a new timezone for the Kitikmeot region of Nunavut
# to differentiate it from the Yellowknife region.
# From Paul Eggert (1999-09-20):
# <a href="http://www.nunavut.com/basicfacts/english/basicfacts_1territory.html">
# Basic Facts: The New Territory
# </a> (1999) reports that Pangnirtung operates on eastern time,
# and that Coral Harbour does not observe DST. We don't know when
# Pangnirtung switched to eastern time; we'll guess 1995.
# From Rives McDow (1999-11-08):
# On October 31, when the rest of Nunavut went to Central time,
# Pangnirtung wobbled. Here is the result of their wobble:
#
# The following businesses and organizations in Pangnirtung use Central Time:
#
# First Air, Power Corp, Nunavut Construction, Health Center, RCMP,
# Eastern Arctic National Parks, A & D Specialist
#
# The following businesses and organizations in Pangnirtung use Eastern Time:
#
# Hamlet office, All other businesses, Both schools, Airport operator
#
# This has made for an interesting situation there, which warranted the news.
# No one there that I spoke with seems concerned, or has plans to
# change the local methods of keeping time, as it evidently does not
# really interfere with any activities or make things difficult locally.
# They plan to celebrate New Year's turn-over twice, one hour apart,
# so it appears that the situation will last at least that long.
# The Nunavut Intergovernmental Affairs hopes that they will "come to
# their senses", but the locals evidently don't see any problem with
# the current state of affairs.
# From Michaela Rodrigue, writing in the
# <a href="http://www.nunatsiaq.com/archives/nunavut991130/nvt91119_17.html">
# Nunatsiaq News (1999-11-19)</a>:
# Clyde River, Pangnirtung and Sanikiluaq now operate with two time zones,
# central - or Nunavut time - for government offices, and eastern time
# for municipal offices and schools.... Igloolik [was similar but then]
# made the switch to central time on Saturday, Nov. 6.
# From Paul Eggert (2000-10-02):
# Matthews and Vincent (1998) say the following, but we lack histories
# for these potential new Zones.
#
# The Canadian Forces station at Alert uses Eastern Time while the
# handful of residents at the Eureka weather station [in the Central
# zone] skip daylight savings. Baffin Island, which is crossed by the
# Central, Eastern and Atlantic Time zones only uses Eastern Time.
# Gjoa Haven, Taloyoak and Pelly Bay all use Mountain instead of
# Central Time and Southampton Island [in the Central zone] is not
# required to use daylight savings.
# From
# <a href="http://www.nunatsiaq.com/archives/nunavut001130/nvt21110_02.html">
# Nunavut now has two time zones
# </a> (2000-11-10):
# The Nunavut government would allow its employees in Kugluktuk and
# Cambridge Bay to operate on central time year-round, putting them
# one hour behind the rest of Nunavut for six months during the winter.
# At the end of October the two communities had rebelled against
# Nunavut's unified time zone, refusing to shift to eastern time with
# the rest of the territory for the winter. Cambridge Bay remained on
# central time, while Kugluktuk, even farther west, reverted to
# mountain time, which they had used before the advent of Nunavut's
# unified time zone in 1999.
#
# From Rives McDow (2001-01-20), quoting the Nunavut government:
# The preceding decision came into effect at midnight, Saturday Nov 4, 2000.
# From Paul Eggert (2000-12-04):
# Let's just keep track of the official times for now.
# From Rives McDow (2001-03-07):
# The premier of Nunavut has issued a ministerial statement advising
# that effective 2001-04-01, the territory of Nunavut will revert
# back to three time zones (mountain, central, and eastern). Of the
# cities in Nunavut, Coral Harbor is the only one that I know of that
# has said it will not observe dst, staying on EST year round. I'm
# checking for more info, and will get back to you if I come up with
# more.
# [Also see <http://www.nunatsiaq.com/nunavut/nvt10309_06.html> (2001-03-09).]
# From Gwillim Law (2005-05-21):
# According to maps at
# http://inms-ienm.nrc-cnrc.gc.ca/images/time_services/TZ01SWE.jpg
# http://inms-ienm.nrc-cnrc.gc.ca/images/time_services/TZ01SSE.jpg
# (both dated 2003), and
# http://www.canadiangeographic.ca/Magazine/SO98/geomap.asp
# (from a 1998 Canadian Geographic article), the de facto and de jure time
# for Southampton Island (at the north end of Hudson Bay) is UTC-5 all year
# round. Using Google, it's easy to find other websites that confirm this.
# I wasn't able to find how far back this time regimen goes, but since it
# predates the creation of Nunavut, it probably goes back many years....
# The Inuktitut name of Coral Harbour is Sallit, but it's rarely used.
#
# From Paul Eggert (2005-07-26):
# For lack of better information, assume that Southampton Island observed
# daylight saving only during wartime.
# From Chris Walton (2007-03-01):
# ... the community of Resolute (located on Cornwallis Island in
# Nunavut) moved from Central Time to Eastern Time last November.
# Basically the community did not change its clocks at the end of
# daylight saving....
# http://www.nnsl.com/frames/newspapers/2006-11/nov13_06none.html
# From Chris Walton (2011-03-21):
# Back in 2007 I initiated the creation of a new "zone file" for Resolute
# Bay. Resolute Bay is a small community located about 900km north of
# the Arctic Circle. The zone file was required because Resolute Bay had
# decided to use UTC-5 instead of UTC-6 for the winter of 2006-2007.
#
# According to new information which I received last week, Resolute Bay
# went back to using UTC-6 in the winter of 2007-2008...
#
# On March 11/2007 most of Canada went onto daylight saving. On March
# 14/2007 I phoned the Resolute Bay hamlet office to do a "time check." I
# talked to somebody that was both knowledgeable and helpful. I was able
# to confirm that Resolute Bay was still operating on UTC-5. It was
# explained to me that Resolute Bay had been on the Eastern Time zone
# (EST) in the winter, and was now back on the Central Time zone (CDT).
# i.e. the time zone had changed twice in the last year but the clocks
# had not moved. The residents had to know which time zone they were in
# so they could follow the correct TV schedule...
#
# On Nov 02/2008 most of Canada went onto standard time. On Nov 03/2008 I
# phoned the Resolute Bay hamlet office...[D]ue to the challenging nature
# of the phone call, I decided to seek out an alternate source of
# information. I found an e-mail address for somebody by the name of
# Stephanie Adams whose job was listed as "Inns North Support Officer for
# Arctic Co-operatives." I was under the impression that Stephanie lived
# and worked in Resolute Bay...
#
# On March 14/2011 I phoned the hamlet office again. I was told that
# Resolute Bay had been using Central Standard Time over the winter of
# 2010-2011 and that the clocks had therefore been moved one hour ahead
# on March 13/2011. The person I talked to was aware that Resolute Bay
# had previously experimented with Eastern Standard Time but he could not
# tell me when the practice had stopped.
#
# On March 17/2011 I searched the Web to find an e-mail address of
# somebody that might be able to tell me exactly when Resolute Bay went
# off Eastern Standard Time. I stumbled on the name "Aziz Kheraj." Aziz
# used to be the mayor of Resolute Bay and he apparently owns half the
# businesses including "South Camp Inn." This website has some info on
# Aziz:
# <a href="http://www.uphere.ca/node/493">
# http://www.uphere.ca/node/493
# </a>
#
# I sent Aziz an e-mail asking when Resolute Bay had stopped using
# Eastern Standard Time.
#
# Aziz responded quickly with this: "hi, The time was not changed for the
# 1 year only, the following year, the community went back to the old way
# of "spring ahead-fall behind" currently we are zulu plus 5 hrs and in
# the winter Zulu plus 6 hrs"
#
# This of course conflicted with everything I had ascertained in November 2008.
#
# I sent Aziz a copy of my 2008 e-mail exchange with Stephanie. Aziz
# responded with this: "Hi, Stephanie lives in Winnipeg. I live here, You
# may want to check with the weather office in Resolute Bay or do a
# search on the weather through Env. Canada. web site"
#
# If I had realized the Stephanie did not live in Resolute Bay I would
# never have contacted her. I now believe that all the information I
# obtained in November 2008 should be ignored...
# I apologize for reporting incorrect information in 2008.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule NT_YK 1918 only - Apr 14 2:00 1:00 D
Rule NT_YK 1918 only - Oct 27 2:00 0 S
Rule NT_YK 1919 only - May 25 2:00 1:00 D
Rule NT_YK 1919 only - Nov 1 0:00 0 S
Rule NT_YK 1942 only - Feb 9 2:00 1:00 W # War
Rule NT_YK 1945 only - Aug 14 23:00u 1:00 P # Peace
Rule NT_YK 1945 only - Sep 30 2:00 0 S
Rule NT_YK 1965 only - Apr lastSun 0:00 2:00 DD
Rule NT_YK 1965 only - Oct lastSun 2:00 0 S
Rule NT_YK 1980 1986 - Apr lastSun 2:00 1:00 D
Rule NT_YK 1980 2006 - Oct lastSun 2:00 0 S
Rule NT_YK 1987 2006 - Apr Sun>=1 2:00 1:00 D
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
# aka Panniqtuuq
Zone America/Pangnirtung 0 - zzz 1921 # trading post est.
-4:00 NT_YK A%sT 1995 Apr Sun>=1 2:00
-5:00 Canada E%sT 1999 Oct 31 2:00
-6:00 Canada C%sT 2000 Oct 29 2:00
-5:00 Canada E%sT
# formerly Frobisher Bay
Zone America/Iqaluit 0 - zzz 1942 Aug # Frobisher Bay est.
-5:00 NT_YK E%sT 1999 Oct 31 2:00
-6:00 Canada C%sT 2000 Oct 29 2:00
-5:00 Canada E%sT
# aka Qausuittuq
Zone America/Resolute 0 - zzz 1947 Aug 31 # Resolute founded
-6:00 NT_YK C%sT 2000 Oct 29 2:00
-5:00 - EST 2001 Apr 1 3:00
-6:00 Canada C%sT 2006 Oct 29 2:00
-5:00 - EST 2007 Mar 11 3:00
-6:00 Canada C%sT
# aka Kangiqiniq
Zone America/Rankin_Inlet 0 - zzz 1957 # Rankin Inlet founded
-6:00 NT_YK C%sT 2000 Oct 29 2:00
-5:00 - EST 2001 Apr 1 3:00
-6:00 Canada C%sT
# aka Iqaluktuuttiaq
Zone America/Cambridge_Bay 0 - zzz 1920 # trading post est.?
-7:00 NT_YK M%sT 1999 Oct 31 2:00
-6:00 Canada C%sT 2000 Oct 29 2:00
-5:00 - EST 2000 Nov 5 0:00
-6:00 - CST 2001 Apr 1 3:00
-7:00 Canada M%sT
Zone America/Yellowknife 0 - zzz 1935 # Yellowknife founded?
-7:00 NT_YK M%sT 1980
-7:00 Canada M%sT
Zone America/Inuvik 0 - zzz 1953 # Inuvik founded
-8:00 NT_YK P%sT 1979 Apr lastSun 2:00
-7:00 NT_YK M%sT 1980
-7:00 Canada M%sT
Zone America/Whitehorse -9:00:12 - LMT 1900 Aug 20
-9:00 NT_YK Y%sT 1966 Jul 1 2:00
-8:00 NT_YK P%sT 1980
-8:00 Canada P%sT
Zone America/Dawson -9:17:40 - LMT 1900 Aug 20
-9:00 NT_YK Y%sT 1973 Oct 28 0:00
-8:00 NT_YK P%sT 1980
-8:00 Canada P%sT
###############################################################################
# Mexico
# From Paul Eggert (2001-03-05):
# The Investigation and Analysis Service of the
# Mexican Library of Congress (MLoC) has published a
# <a href="http://www.cddhcu.gob.mx/bibliot/publica/inveyana/polisoc/horver/">
# history of Mexican local time (in Spanish)
# </a>.
#
# Here are the discrepancies between Shanks & Pottenger (S&P) and the MLoC.
# (In all cases we go with the MLoC.)
# S&P report that Baja was at -8:00 in 1922/1923.
# S&P say the 1930 transition in Baja was 1930-11-16.
# S&P report no DST during summer 1931.
# S&P report a transition at 1932-03-30 23:00, not 1932-04-01.
# From Gwillim Law (2001-02-20):
# There are some other discrepancies between the Decrees page and the
# tz database. I think they can best be explained by supposing that
# the researchers who prepared the Decrees page failed to find some of
# the relevant documents.
# From Alan Perry (1996-02-15):
# A guy from our Mexico subsidiary finally found the Presidential Decree
# outlining the timezone changes in Mexico.
#
# ------------- Begin Forwarded Message -------------
#
# I finally got my hands on the Official Presidential Decree that sets up the
# rules for the DST changes. The rules are:
#
# 1. The country is divided in 3 timezones:
# - Baja California Norte (the Mexico/BajaNorte TZ)
# - Baja California Sur, Nayarit, Sinaloa and Sonora (the Mexico/BajaSur TZ)
# - The rest of the country (the Mexico/General TZ)
#
# 2. From the first Sunday in April at 2:00 AM to the last Sunday in October
# at 2:00 AM, the times in each zone are as follows:
# BajaNorte: GMT+7
# BajaSur: GMT+6
# General: GMT+5
#
# 3. The rest of the year, the times are as follows:
# BajaNorte: GMT+8
# BajaSur: GMT+7
# General: GMT+6
#
# The Decree was published in Mexico's Official Newspaper on January 4th.
#
# -------------- End Forwarded Message --------------
# From Paul Eggert (1996-06-12):
# For an English translation of the decree, see
# <a href="http://mexico-travel.com/extra/timezone_eng.html">
# ``Diario Oficial: Time Zone Changeover'' (1996-01-04).
# </a>
# From Rives McDow (1998-10-08):
# The State of Quintana Roo has reverted back to central STD and DST times
# (i.e. UTC -0600 and -0500 as of 1998-08-02).
# From Rives McDow (2000-01-10):
# Effective April 4, 1999 at 2:00 AM local time, Sonora changed to the time
# zone 5 hours from the International Date Line, and will not observe daylight
# savings time so as to stay on the same time zone as the southern part of
# Arizona year round.
# From Jesper Norgaard, translating
# <http://www.reforma.com/nacional/articulo/064327/> (2001-01-17):
# In Oaxaca, the 55.000 teachers from the Section 22 of the National
# Syndicate of Education Workers, refuse to apply daylight saving each
# year, so that the more than 10,000 schools work at normal hour the
# whole year.
# From Gwillim Law (2001-01-19):
# <http://www.reforma.com/negocios_y_dinero/articulo/064481/> ... says
# (translated):...
# January 17, 2000 - The Energy Secretary, Ernesto Martens, announced
# that Summer Time will be reduced from seven to five months, starting
# this year....
# <http://www.publico.com.mx/scripts/texto3.asp?action=pagina&pag=21&pos=p&secc=naci&date=01/17/2001>
# [translated], says "summer time will ... take effect on the first Sunday
# in May, and end on the last Sunday of September.
# From Arthur David Olson (2001-01-25):
# The 2001-01-24 traditional Washington Post contained the page one
# story "Timely Issue Divides Mexicans."...
# http://www.washingtonpost.com/wp-dyn/articles/A37383-2001Jan23.html
# ... Mexico City Mayor Lopez Obrador "...is threatening to keep
# Mexico City and its 20 million residents on a different time than
# the rest of the country..." In particular, Lopez Obrador would abolish
# observation of Daylight Saving Time.
# <a href="http://www.conae.gob.mx/ahorro/decretohorver2001.html#decre">
# Official statute published by the Energy Department
# </a> (2001-02-01) shows Baja and Chihauhua as still using US DST rules,
# and Sonora with no DST. This was reported by Jesper Norgaard (2001-02-03).
# From Paul Eggert (2001-03-03):
#
# <a href="http://www.latimes.com/news/nation/20010303/t000018766.html">
# James F. Smith writes in today's LA Times
# </a>
# * Sonora will continue to observe standard time.
# * Last week Mexico City's mayor Andres Manuel Lopez Obrador decreed that
# the Federal District will not adopt DST.
# * 4 of 16 district leaders announced they'll ignore the decree.
# * The decree does not affect federal-controlled facilities including
# the airport, banks, hospitals, and schools.
#
# For now we'll assume that the Federal District will bow to federal rules.
# From Jesper Norgaard (2001-04-01):
# I found some references to the Mexican application of daylight
# saving, which modifies what I had already sent you, stating earlier
# that a number of northern Mexican states would go on daylight
# saving. The modification reverts this to only cover Baja California
# (Norte), while all other states (except Sonora, who has no daylight
# saving all year) will follow the original decree of president
# Vicente Fox, starting daylight saving May 6, 2001 and ending
# September 30, 2001.
# References: "Diario de Monterrey" <www.diariodemonterrey.com/index.asp>
# Palabra <http://palabra.infosel.com/010331/primera/ppri3101.pdf> (2001-03-31)
# From Reuters (2001-09-04):
# Mexico's Supreme Court on Tuesday declared that daylight savings was
# unconstitutional in Mexico City, creating the possibility the
# capital will be in a different time zone from the rest of the nation
# next year.... The Supreme Court's ruling takes effect at 2:00
# a.m. (0800 GMT) on Sept. 30, when Mexico is scheduled to revert to
# standard time. "This is so residents of the Federal District are not
# subject to unexpected time changes," a statement from the court said.
# From Jesper Norgaard Welen (2002-03-12):
# ... consulting my local grocery store(!) and my coworkers, they all insisted
# that a new decision had been made to reinstate US style DST in Mexico....
# http://www.conae.gob.mx/ahorro/horaver2001_m1_2002.html (2002-02-20)
# confirms this. Sonora as usual is the only state where DST is not applied.
# From Steffen Thorsen (2009-12-28):
#
# Steffen Thorsen wrote:
# > Mexico's House of Representatives has approved a proposal for northern
# > Mexico's border cities to share the same daylight saving schedule as
# > the United States.
# Now this has passed both the Congress and the Senate, so starting from
# 2010, some border regions will be the same:
# <a href="http://www.signonsandiego.com/news/2009/dec/28/clocks-will-match-both-sides-border/">
# http://www.signonsandiego.com/news/2009/dec/28/clocks-will-match-both-sides-border/
# </a>
# <a href="http://www.elmananarey.com/diario/noticia/nacional/noticias/empatan_horario_de_frontera_con_eu/621939">
# http://www.elmananarey.com/diario/noticia/nacional/noticias/empatan_horario_de_frontera_con_eu/621939
# </a>
# (Spanish)
#
# Could not find the new law text, but the proposed law text changes are here:
# <a href="http://gaceta.diputados.gob.mx/Gaceta/61/2009/dic/20091210-V.pdf">
# http://gaceta.diputados.gob.mx/Gaceta/61/2009/dic/20091210-V.pdf
# </a>
# (Gaceta Parlamentaria)
#
# There is also a list of the votes here:
# <a href="http://gaceta.diputados.gob.mx/Gaceta/61/2009/dic/V2-101209.html">
# http://gaceta.diputados.gob.mx/Gaceta/61/2009/dic/V2-101209.html
# </a>
#
# Our page:
# <a href="http://www.timeanddate.com/news/time/north-mexico-dst-change.html">
# http://www.timeanddate.com/news/time/north-mexico-dst-change.html
# </a>
# From Arthur David Olson (2010-01-20):
# The page
# <a href="http://dof.gob.mx/nota_detalle.php?codigo=5127480&fecha=06/01/2010">
# http://dof.gob.mx/nota_detalle.php?codigo=5127480&fecha=06/01/2010
# </a>
# includes this text:
# En los municipios fronterizos de Tijuana y Mexicali en Baja California;
# Juárez y Ojinaga en Chihuahua; Acuña y Piedras Negras en Coahuila;
# Anáhuac en Nuevo León; y Nuevo Laredo, Reynosa y Matamoros en
# Tamaulipas, la aplicación de este horario estacional surtirá efecto
# desde las dos horas del segundo domingo de marzo y concluirá a las dos
# horas del primer domingo de noviembre.
# En los municipios fronterizos que se encuentren ubicados en la franja
# fronteriza norte en el territorio comprendido entre la línea
# internacional y la línea paralela ubicada a una distancia de veinte
# kilómetros, así como la Ciudad de Ensenada, Baja California, hacia el
# interior del país, la aplicación de este horario estacional surtirá
# efecto desde las dos horas del segundo domingo de marzo y concluirá a
# las dos horas del primer domingo de noviembre.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Mexico 1939 only - Feb 5 0:00 1:00 D
Rule Mexico 1939 only - Jun 25 0:00 0 S
Rule Mexico 1940 only - Dec 9 0:00 1:00 D
Rule Mexico 1941 only - Apr 1 0:00 0 S
Rule Mexico 1943 only - Dec 16 0:00 1:00 W # War
Rule Mexico 1944 only - May 1 0:00 0 S
Rule Mexico 1950 only - Feb 12 0:00 1:00 D
Rule Mexico 1950 only - Jul 30 0:00 0 S
Rule Mexico 1996 2000 - Apr Sun>=1 2:00 1:00 D
Rule Mexico 1996 2000 - Oct lastSun 2:00 0 S
Rule Mexico 2001 only - May Sun>=1 2:00 1:00 D
Rule Mexico 2001 only - Sep lastSun 2:00 0 S
Rule Mexico 2002 max - Apr Sun>=1 2:00 1:00 D
Rule Mexico 2002 max - Oct lastSun 2:00 0 S
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
# Quintana Roo
Zone America/Cancun -5:47:04 - LMT 1922 Jan 1 0:12:56
-6:00 - CST 1981 Dec 23
-5:00 Mexico E%sT 1998 Aug 2 2:00
-6:00 Mexico C%sT
# Campeche, Yucatan
Zone America/Merida -5:58:28 - LMT 1922 Jan 1 0:01:32
-6:00 - CST 1981 Dec 23
-5:00 - EST 1982 Dec 2
-6:00 Mexico C%sT
# Coahuila, Durango, Nuevo Leon, Tamaulipas (near US border)
Zone America/Matamoros -6:40:00 - LMT 1921 Dec 31 23:20:00
-6:00 - CST 1988
-6:00 US C%sT 1989
-6:00 Mexico C%sT 2010
-6:00 US C%sT
# Coahuila, Durango, Nuevo Leon, Tamaulipas (away from US border)
Zone America/Monterrey -6:41:16 - LMT 1921 Dec 31 23:18:44
-6:00 - CST 1988
-6:00 US C%sT 1989
-6:00 Mexico C%sT
# Central Mexico
Zone America/Mexico_City -6:36:36 - LMT 1922 Jan 1 0:23:24
-7:00 - MST 1927 Jun 10 23:00
-6:00 - CST 1930 Nov 15
-7:00 - MST 1931 May 1 23:00
-6:00 - CST 1931 Oct
-7:00 - MST 1932 Apr 1
-6:00 Mexico C%sT 2001 Sep 30 02:00
-6:00 - CST 2002 Feb 20
-6:00 Mexico C%sT
# Chihuahua (near US border)
Zone America/Ojinaga -6:57:40 - LMT 1922 Jan 1 0:02:20
-7:00 - MST 1927 Jun 10 23:00
-6:00 - CST 1930 Nov 15
-7:00 - MST 1931 May 1 23:00
-6:00 - CST 1931 Oct
-7:00 - MST 1932 Apr 1
-6:00 - CST 1996
-6:00 Mexico C%sT 1998
-6:00 - CST 1998 Apr Sun>=1 3:00
-7:00 Mexico M%sT 2010
-7:00 US M%sT
# Chihuahua (away from US border)
Zone America/Chihuahua -7:04:20 - LMT 1921 Dec 31 23:55:40
-7:00 - MST 1927 Jun 10 23:00
-6:00 - CST 1930 Nov 15
-7:00 - MST 1931 May 1 23:00
-6:00 - CST 1931 Oct
-7:00 - MST 1932 Apr 1
-6:00 - CST 1996
-6:00 Mexico C%sT 1998
-6:00 - CST 1998 Apr Sun>=1 3:00
-7:00 Mexico M%sT
# Sonora
Zone America/Hermosillo -7:23:52 - LMT 1921 Dec 31 23:36:08
-7:00 - MST 1927 Jun 10 23:00
-6:00 - CST 1930 Nov 15
-7:00 - MST 1931 May 1 23:00
-6:00 - CST 1931 Oct
-7:00 - MST 1932 Apr 1
-6:00 - CST 1942 Apr 24
-7:00 - MST 1949 Jan 14
-8:00 - PST 1970
-7:00 Mexico M%sT 1999
-7:00 - MST
# From Alexander Krivenyshev (2010-04-21):
# According to news, Bahía de Banderas (Mexican state of Nayarit)
# changed time zone UTC-7 to new time zone UTC-6 on April 4, 2010 (to
# share the same time zone as nearby city Puerto Vallarta, Jalisco).
#
# (Spanish)
# Bahía de Banderas homologa su horario al del centro del
# país, a partir de este domingo
# <a href="http://www.nayarit.gob.mx/notes.asp?id=20748">
# http://www.nayarit.gob.mx/notes.asp?id=20748
# </a>
#
# Bahía de Banderas homologa su horario con el del Centro del
# País
# <a href="http://www.bahiadebanderas.gob.mx/principal/index.php?option=com_content&view=article&id=261:bahia-de-banderas-homologa-su-horario-con-el-del-centro-del-pais&catid=42:comunicacion-social&Itemid=50">
# http://www.bahiadebanderas.gob.mx/principal/index.php?option=com_content&view=article&id=261:bahia-de-banderas-homologa-su-horario-con-el-del-centro-del-pais&catid=42:comunicacion-social&Itemid=50"
# </a>
#
# (English)
# Puerto Vallarta and Bahía de Banderas: One Time Zone
# <a href="http://virtualvallarta.com/puertovallarta/puertovallarta/localnews/2009-12-03-Puerto-Vallarta-and-Bahia-de-Banderas-One-Time-Zone.shtml">
# http://virtualvallarta.com/puertovallarta/puertovallarta/localnews/2009-12-03-Puerto-Vallarta-and-Bahia-de-Banderas-One-Time-Zone.shtml
# </a>
#
# or
# <a href="http://www.worldtimezone.com/dst_news/dst_news_mexico08.html">
# http://www.worldtimezone.com/dst_news/dst_news_mexico08.html
# </a>
#
# "Mexico's Senate approved the amendments to the Mexican Schedule System that
# will allow Bahía de Banderas and Puerto Vallarta to share the same time
# zone ..."
# Baja California Sur, Nayarit, Sinaloa
# From Arthur David Olson (2010-05-01):
# Use "Bahia_Banderas" to keep the name to fourteen characters.
Zone America/Mazatlan -7:05:40 - LMT 1921 Dec 31 23:54:20
-7:00 - MST 1927 Jun 10 23:00
-6:00 - CST 1930 Nov 15
-7:00 - MST 1931 May 1 23:00
-6:00 - CST 1931 Oct
-7:00 - MST 1932 Apr 1
-6:00 - CST 1942 Apr 24
-7:00 - MST 1949 Jan 14
-8:00 - PST 1970
-7:00 Mexico M%sT
Zone America/Bahia_Banderas -7:01:00 - LMT 1921 Dec 31 23:59:00
-7:00 - MST 1927 Jun 10 23:00
-6:00 - CST 1930 Nov 15
-7:00 - MST 1931 May 1 23:00
-6:00 - CST 1931 Oct
-7:00 - MST 1932 Apr 1
-6:00 - CST 1942 Apr 24
-7:00 - MST 1949 Jan 14
-8:00 - PST 1970
-7:00 Mexico M%sT 2010 Apr 4 2:00
-6:00 Mexico C%sT
# Baja California (near US border)
Zone America/Tijuana -7:48:04 - LMT 1922 Jan 1 0:11:56
-7:00 - MST 1924
-8:00 - PST 1927 Jun 10 23:00
-7:00 - MST 1930 Nov 15
-8:00 - PST 1931 Apr 1
-8:00 1:00 PDT 1931 Sep 30
-8:00 - PST 1942 Apr 24
-8:00 1:00 PWT 1945 Aug 14 23:00u
-8:00 1:00 PPT 1945 Nov 12 # Peace
-8:00 - PST 1948 Apr 5
-8:00 1:00 PDT 1949 Jan 14
-8:00 - PST 1954
-8:00 CA P%sT 1961
-8:00 - PST 1976
-8:00 US P%sT 1996
-8:00 Mexico P%sT 2001
-8:00 US P%sT 2002 Feb 20
-8:00 Mexico P%sT 2010
-8:00 US P%sT
# Baja California (away from US border)
Zone America/Santa_Isabel -7:39:28 - LMT 1922 Jan 1 0:20:32
-7:00 - MST 1924
-8:00 - PST 1927 Jun 10 23:00
-7:00 - MST 1930 Nov 15
-8:00 - PST 1931 Apr 1
-8:00 1:00 PDT 1931 Sep 30
-8:00 - PST 1942 Apr 24
-8:00 1:00 PWT 1945 Aug 14 23:00u
-8:00 1:00 PPT 1945 Nov 12 # Peace
-8:00 - PST 1948 Apr 5
-8:00 1:00 PDT 1949 Jan 14
-8:00 - PST 1954
-8:00 CA P%sT 1961
-8:00 - PST 1976
-8:00 US P%sT 1996
-8:00 Mexico P%sT 2001
-8:00 US P%sT 2002 Feb 20
-8:00 Mexico P%sT
# From Paul Eggert (2006-03-22):
# Formerly there was an America/Ensenada zone, which differed from
# America/Tijuana only in that it did not observe DST from 1976
# through 1995. This was as per Shanks (1999). But Shanks & Pottenger say
# Ensenada did not observe DST from 1948 through 1975. Guy Harris reports
# that the 1987 OAG says "Only Ensenada, Mexicale, San Felipe and
# Tijuana observe DST," which agrees with Shanks & Pottenger but implies that
# DST-observance was a town-by-town matter back then. This concerns
# data after 1970 so most likely there should be at least one Zone
# other than America/Tijuana for Baja, but it's not clear yet what its
# name or contents should be.
#
# Revillagigedo Is
# no information
###############################################################################
# Anguilla
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Anguilla -4:12:16 - LMT 1912 Mar 2
-4:00 - AST
# Antigua and Barbuda
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Antigua -4:07:12 - LMT 1912 Mar 2
-5:00 - EST 1951
-4:00 - AST
# Bahamas
#
# From Sue Williams (2006-12-07):
# The Bahamas announced about a month ago that they plan to change their DST
# rules to sync with the U.S. starting in 2007....
# http://www.jonesbahamas.com/?c=45&a=10412
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Bahamas 1964 1975 - Oct lastSun 2:00 0 S
Rule Bahamas 1964 1975 - Apr lastSun 2:00 1:00 D
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Nassau -5:09:24 - LMT 1912 Mar 2
-5:00 Bahamas E%sT 1976
-5:00 US E%sT
# Barbados
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Barb 1977 only - Jun 12 2:00 1:00 D
Rule Barb 1977 1978 - Oct Sun>=1 2:00 0 S
Rule Barb 1978 1980 - Apr Sun>=15 2:00 1:00 D
Rule Barb 1979 only - Sep 30 2:00 0 S
Rule Barb 1980 only - Sep 25 2:00 0 S
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Barbados -3:58:28 - LMT 1924 # Bridgetown
-3:58:28 - BMT 1932 # Bridgetown Mean Time
-4:00 Barb A%sT
# Belize
# Whitman entirely disagrees with Shanks; go with Shanks & Pottenger.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Belize 1918 1942 - Oct Sun>=2 0:00 0:30 HD
Rule Belize 1919 1943 - Feb Sun>=9 0:00 0 S
Rule Belize 1973 only - Dec 5 0:00 1:00 D
Rule Belize 1974 only - Feb 9 0:00 0 S
Rule Belize 1982 only - Dec 18 0:00 1:00 D
Rule Belize 1983 only - Feb 12 0:00 0 S
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Belize -5:52:48 - LMT 1912 Apr
-6:00 Belize C%sT
# Bermuda
# From Dan Jones, reporting in The Royal Gazette (2006-06-26):
# Next year, however, clocks in the US will go forward on the second Sunday
# in March, until the first Sunday in November. And, after the Time Zone
# (Seasonal Variation) Bill 2006 was passed in the House of Assembly on
# Friday, the same thing will happen in Bermuda.
# http://www.theroyalgazette.com/apps/pbcs.dll/article?AID=/20060529/NEWS/105290135
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Atlantic/Bermuda -4:19:04 - LMT 1930 Jan 1 2:00 # Hamilton
-4:00 - AST 1974 Apr 28 2:00
-4:00 Bahamas A%sT 1976
-4:00 US A%sT
# Cayman Is
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Cayman -5:25:32 - LMT 1890 # Georgetown
-5:07:12 - KMT 1912 Feb # Kingston Mean Time
-5:00 - EST
# Costa Rica
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule CR 1979 1980 - Feb lastSun 0:00 1:00 D
Rule CR 1979 1980 - Jun Sun>=1 0:00 0 S
Rule CR 1991 1992 - Jan Sat>=15 0:00 1:00 D
# IATA SSIM (1991-09) says the following was at 1:00;
# go with Shanks & Pottenger.
Rule CR 1991 only - Jul 1 0:00 0 S
Rule CR 1992 only - Mar 15 0:00 0 S
# There are too many San Joses elsewhere, so we'll use `Costa Rica'.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Costa_Rica -5:36:20 - LMT 1890 # San Jose
-5:36:20 - SJMT 1921 Jan 15 # San Jose Mean Time
-6:00 CR C%sT
# Coco
# no information; probably like America/Costa_Rica
# Cuba
# From Arthur David Olson (1999-03-29):
# The 1999-03-28 exhibition baseball game held in Havana, Cuba, between
# the Cuban National Team and the Baltimore Orioles was carried live on
# the Orioles Radio Network, including affiliate WTOP in Washington, DC.
# During the game, play-by-play announcer Jim Hunter noted that
# "We'll be losing two hours of sleep...Cuba switched to Daylight Saving
# Time today." (The "two hour" remark referred to losing one hour of
# sleep on 1999-03-28--when the announcers were in Cuba as it switched
# to DST--and one more hour on 1999-04-04--when the announcers will have
# returned to Baltimore, which switches on that date.)
# From Evert van der Veer via Steffen Thorsen (2004-10-28):
# Cuba is not going back to standard time this year.
# From Paul Eggert (2006-03-22):
# http://www.granma.cu/ingles/2004/septiembre/juev30/41medid-i.html
# says that it's due to a problem at the Antonio Guiteras
# thermoelectric plant, and says "This October there will be no return
# to normal hours (after daylight saving time)".
# For now, let's assume that it's a temporary measure.
# From Carlos A. Carnero Delgado (2005-11-12):
# This year (just like in 2004-2005) there's no change in time zone
# adjustment in Cuba. We will stay in daylight saving time:
# http://www.granma.cu/espanol/2005/noviembre/mier9/horario.html
# From Jesper Norgaard Welen (2006-10-21):
# An article in GRANMA INTERNACIONAL claims that Cuba will end
# the 3 years of permanent DST next weekend, see
# http://www.granma.cu/ingles/2006/octubre/lun16/43horario.html
# "On Saturday night, October 28 going into Sunday, October 29, at 01:00,
# watches should be set back one hour -- going back to 00:00 hours -- returning
# to the normal schedule....
# From Paul Eggert (2007-03-02):
# http://www.granma.cubaweb.cu/english/news/art89.html, dated yesterday,
# says Cuban clocks will advance at midnight on March 10.
# For lack of better information, assume Cuba will use US rules,
# except that it switches at midnight standard time as usual.
#
# From Steffen Thorsen (2007-10-25):
# Carlos Alberto Fonseca Arauz informed me that Cuba will end DST one week
# earlier - on the last Sunday of October, just like in 2006.
#
# He supplied these references:
#
# http://www.prensalatina.com.mx/article.asp?ID=%7B4CC32C1B-A9F7-42FB-8A07-8631AFC923AF%7D&language=ES
# http://actualidad.terra.es/sociedad/articulo/cuba_llama_ahorrar_energia_cambio_1957044.htm
#
# From Alex Kryvenishev (2007-10-25):
# Here is also article from Granma (Cuba):
#
# [Regira] el Horario Normal desde el [proximo] domingo 28 de octubre
# http://www.granma.cubaweb.cu/2007/10/24/nacional/artic07.html
#
# http://www.worldtimezone.com/dst_news/dst_news_cuba03.html
# From Arthur David Olson (2008-03-09):
# I'm in Maryland which is now observing United States Eastern Daylight
# Time. At 9:44 local time I used RealPlayer to listen to
# <a href="http://media.enet.cu/radioreloj">
# http://media.enet.cu/radioreloj
# </a>, a Cuban information station, and heard
# the time announced as "ocho cuarenta y cuatro" ("eight forty-four"),
# indicating that Cuba is still on standard time.
# From Steffen Thorsen (2008-03-12):
# It seems that Cuba will start DST on Sunday, 2007-03-16...
# It was announced yesterday, according to this source (in Spanish):
# <a href="http://www.nnc.cubaweb.cu/marzo-2008/cien-1-11-3-08.htm">
# http://www.nnc.cubaweb.cu/marzo-2008/cien-1-11-3-08.htm
# </a>
#
# Some more background information is posted here:
# <a href="http://www.timeanddate.com/news/time/cuba-starts-dst-march-16.html">
# http://www.timeanddate.com/news/time/cuba-starts-dst-march-16.html
# </a>
#
# The article also says that Cuba has been observing DST since 1963,
# while Shanks (and tzdata) has 1965 as the first date (except in the
# 1940's). Many other web pages in Cuba also claim that it has been
# observed since 1963, but with the exception of 1970 - an exception
# which is not present in tzdata/Shanks. So there is a chance we need to
# change some historic records as well.
#
# One example:
# <a href="http://www.radiohc.cu/espanol/noticias/mar07/11mar/hor.htm">
# http://www.radiohc.cu/espanol/noticias/mar07/11mar/hor.htm
# </a>
# From Jesper Norgaard Welen (2008-03-13):
# The Cuban time change has just been confirmed on the most authoritative
# web site, the Granma. Please check out
# <a href="http://www.granma.cubaweb.cu/2008/03/13/nacional/artic10.html">
# http://www.granma.cubaweb.cu/2008/03/13/nacional/artic10.html
# </a>
#
# Basically as expected after Steffen Thorsens information, the change
# will take place midnight between Saturday and Sunday.
# From Arthur David Olson (2008-03-12):
# Assume Sun>=15 (third Sunday) going forward.
# From Alexander Krivenyshev (2009-03-04)
# According to the Radio Reloj - Cuba will start Daylight Saving Time on
# midnight between Saturday, March 07, 2009 and Sunday, March 08, 2009-
# not on midnight March 14 / March 15 as previously thought.
#
# <a href="http://www.worldtimezone.com/dst_news/dst_news_cuba05.html">
# http://www.worldtimezone.com/dst_news/dst_news_cuba05.html
# (in Spanish)
# </a>
# From Arthur David Olson (2009-03-09)
# I listened over the Internet to
# <a href="http://media.enet.cu/readioreloj">
# http://media.enet.cu/readioreloj
# </a>
# this morning; when it was 10:05 a. m. here in Bethesda, Maryland the
# the time was announced as "diez cinco"--the same time as here, indicating
# that has indeed switched to DST. Assume second Sunday from 2009 forward.
# From Steffen Thorsen (2011-03-08):
# Granma announced that Cuba is going to start DST on 2011-03-20 00:00:00
# this year. Nothing about the end date known so far (if that has
# changed at all).
#
# Source:
# <a href="http://granma.co.cu/2011/03/08/nacional/artic01.html">
# http://granma.co.cu/2011/03/08/nacional/artic01.html
# </a>
#
# Our info:
# <a href="http://www.timeanddate.com/news/time/cuba-starts-dst-2011.html">
# http://www.timeanddate.com/news/time/cuba-starts-dst-2011.html
# </a>
#
# From Steffen Thorsen (2011-10-30)
# Cuba will end DST two weeks later this year. Instead of going back
# tonight, it has been delayed to 2011-11-13 at 01:00.
#
# One source (Spanish)
# <a href="http://www.radioangulo.cu/noticias/cuba/17105-cuba-restablecera-el-horario-del-meridiano-de-greenwich.html">
# http://www.radioangulo.cu/noticias/cuba/17105-cuba-restablecera-el-horario-del-meridiano-de-greenwich.html
# </a>
#
# Our page:
# <a href="http://www.timeanddate.com/news/time/cuba-time-changes-2011.html">
# http://www.timeanddate.com/news/time/cuba-time-changes-2011.html
# </a>
#
# From Steffen Thorsen (2012-03-01)
# According to Radio Reloj, Cuba will start DST on Midnight between March
# 31 and April 1.
#
# Radio Reloj has the following info (Spanish):
# <a href="http://www.radioreloj.cu/index.php/noticias-radio-reloj/71-miscelaneas/7529-cuba-aplicara-el-horario-de-verano-desde-el-1-de-abril">
# http://www.radioreloj.cu/index.php/noticias-radio-reloj/71-miscelaneas/7529-cuba-aplicara-el-horario-de-verano-desde-el-1-de-abril
# </a>
#
# Our info on it:
# <a href="http://www.timeanddate.com/news/time/cuba-starts-dst-2012.html">
# http://www.timeanddate.com/news/time/cuba-starts-dst-2012.html
# </a>
# From Steffen Thorsen (2012-11-03):
# Radio Reloj and many other sources report that Cuba is changing back
# to standard time on 2012-11-04:
# http://www.radioreloj.cu/index.php/noticias-radio-reloj/36-nacionales/9961-regira-horario-normal-en-cuba-desde-el-domingo-cuatro-de-noviembre
# From Paul Eggert (2012-11-03):
# For now, assume the future rule is first Sunday in November.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Cuba 1928 only - Jun 10 0:00 1:00 D
Rule Cuba 1928 only - Oct 10 0:00 0 S
Rule Cuba 1940 1942 - Jun Sun>=1 0:00 1:00 D
Rule Cuba 1940 1942 - Sep Sun>=1 0:00 0 S
Rule Cuba 1945 1946 - Jun Sun>=1 0:00 1:00 D
Rule Cuba 1945 1946 - Sep Sun>=1 0:00 0 S
Rule Cuba 1965 only - Jun 1 0:00 1:00 D
Rule Cuba 1965 only - Sep 30 0:00 0 S
Rule Cuba 1966 only - May 29 0:00 1:00 D
Rule Cuba 1966 only - Oct 2 0:00 0 S
Rule Cuba 1967 only - Apr 8 0:00 1:00 D
Rule Cuba 1967 1968 - Sep Sun>=8 0:00 0 S
Rule Cuba 1968 only - Apr 14 0:00 1:00 D
Rule Cuba 1969 1977 - Apr lastSun 0:00 1:00 D
Rule Cuba 1969 1971 - Oct lastSun 0:00 0 S
Rule Cuba 1972 1974 - Oct 8 0:00 0 S
Rule Cuba 1975 1977 - Oct lastSun 0:00 0 S
Rule Cuba 1978 only - May 7 0:00 1:00 D
Rule Cuba 1978 1990 - Oct Sun>=8 0:00 0 S
Rule Cuba 1979 1980 - Mar Sun>=15 0:00 1:00 D
Rule Cuba 1981 1985 - May Sun>=5 0:00 1:00 D
Rule Cuba 1986 1989 - Mar Sun>=14 0:00 1:00 D
Rule Cuba 1990 1997 - Apr Sun>=1 0:00 1:00 D
Rule Cuba 1991 1995 - Oct Sun>=8 0:00s 0 S
Rule Cuba 1996 only - Oct 6 0:00s 0 S
Rule Cuba 1997 only - Oct 12 0:00s 0 S
Rule Cuba 1998 1999 - Mar lastSun 0:00s 1:00 D
Rule Cuba 1998 2003 - Oct lastSun 0:00s 0 S
Rule Cuba 2000 2004 - Apr Sun>=1 0:00s 1:00 D
Rule Cuba 2006 2010 - Oct lastSun 0:00s 0 S
Rule Cuba 2007 only - Mar Sun>=8 0:00s 1:00 D
Rule Cuba 2008 only - Mar Sun>=15 0:00s 1:00 D
Rule Cuba 2009 2010 - Mar Sun>=8 0:00s 1:00 D
Rule Cuba 2011 only - Mar Sun>=15 0:00s 1:00 D
Rule Cuba 2011 only - Nov 13 0:00s 0 S
Rule Cuba 2012 only - Apr 1 0:00s 1:00 D
Rule Cuba 2012 max - Nov Sun>=1 0:00s 0 S
Rule Cuba 2013 max - Mar Sun>=8 0:00s 1:00 D
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Havana -5:29:28 - LMT 1890
-5:29:36 - HMT 1925 Jul 19 12:00 # Havana MT
-5:00 Cuba C%sT
# Dominica
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Dominica -4:05:36 - LMT 1911 Jul 1 0:01 # Roseau
-4:00 - AST
# Dominican Republic
# From Steffen Thorsen (2000-10-30):
# Enrique Morales reported to me that the Dominican Republic has changed the
# time zone to Eastern Standard Time as of Sunday 29 at 2 am....
# http://www.listin.com.do/antes/261000/republica/princi.html
# From Paul Eggert (2000-12-04):
# That URL (2000-10-26, in Spanish) says they planned to use US-style DST.
# From Rives McDow (2000-12-01):
# Dominican Republic changed its mind and presidential decree on Tuesday,
# November 28, 2000, with a new decree. On Sunday, December 3 at 1:00 AM the
# Dominican Republic will be reverting to 8 hours from the International Date
# Line, and will not be using DST in the foreseeable future. The reason they
# decided to use DST was to be in synch with Puerto Rico, who was also going
# to implement DST. When Puerto Rico didn't implement DST, the president
# decided to revert.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule DR 1966 only - Oct 30 0:00 1:00 D
Rule DR 1967 only - Feb 28 0:00 0 S
Rule DR 1969 1973 - Oct lastSun 0:00 0:30 HD
Rule DR 1970 only - Feb 21 0:00 0 S
Rule DR 1971 only - Jan 20 0:00 0 S
Rule DR 1972 1974 - Jan 21 0:00 0 S
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Santo_Domingo -4:39:36 - LMT 1890
-4:40 - SDMT 1933 Apr 1 12:00 # S. Dom. MT
-5:00 DR E%sT 1974 Oct 27
-4:00 - AST 2000 Oct 29 02:00
-5:00 US E%sT 2000 Dec 3 01:00
-4:00 - AST
# El Salvador
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Salv 1987 1988 - May Sun>=1 0:00 1:00 D
Rule Salv 1987 1988 - Sep lastSun 0:00 0 S
# There are too many San Salvadors elsewhere, so use America/El_Salvador
# instead of America/San_Salvador.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/El_Salvador -5:56:48 - LMT 1921 # San Salvador
-6:00 Salv C%sT
# Grenada
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Grenada -4:07:00 - LMT 1911 Jul # St George's
-4:00 - AST
# Guadeloupe
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Guadeloupe -4:06:08 - LMT 1911 Jun 8 # Pointe a Pitre
-4:00 - AST
# St Barthelemy
Link America/Guadeloupe America/St_Barthelemy
# St Martin (French part)
Link America/Guadeloupe America/Marigot
# Guatemala
#
# From Gwillim Law (2006-04-22), after a heads-up from Oscar van Vlijmen:
# Diario Co Latino, at
# http://www.diariocolatino.com/internacionales/detalles.asp?NewsID=8079,
# says in an article dated 2006-04-19 that the Guatemalan government had
# decided on that date to advance official time by 60 minutes, to lessen the
# impact of the elevated cost of oil.... Daylight saving time will last from
# 2006-04-29 24:00 (Guatemalan standard time) to 2006-09-30 (time unspecified).
# From Paul Eggert (2006-06-22):
# The Ministry of Energy and Mines, press release CP-15/2006
# (2006-04-19), says DST ends at 24:00. See
# <http://www.sieca.org.gt/Sitio_publico/Energeticos/Doc/Medidas/Cambio_Horario_Nac_190406.pdf>.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Guat 1973 only - Nov 25 0:00 1:00 D
Rule Guat 1974 only - Feb 24 0:00 0 S
Rule Guat 1983 only - May 21 0:00 1:00 D
Rule Guat 1983 only - Sep 22 0:00 0 S
Rule Guat 1991 only - Mar 23 0:00 1:00 D
Rule Guat 1991 only - Sep 7 0:00 0 S
Rule Guat 2006 only - Apr 30 0:00 1:00 D
Rule Guat 2006 only - Oct 1 0:00 0 S
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Guatemala -6:02:04 - LMT 1918 Oct 5
-6:00 Guat C%sT
# Haiti
# From Gwillim Law (2005-04-15):
# Risto O. Nykanen wrote me that Haiti is now on DST.
# I searched for confirmation, and I found a
# <a href="http://www.haitianconsulate.org/time.doc"> press release
# on the Web page of the Haitian Consulate in Chicago (2005-03-31),
# </a>. Translated from French, it says:
#
# "The Prime Minister's Communication Office notifies the public in general
# and the press in particular that, following a decision of the Interior
# Ministry and the Territorial Collectivities [I suppose that means the
# provinces], Haiti will move to Eastern Daylight Time in the night from next
# Saturday the 2nd to Sunday the 3rd.
#
# "Consequently, the Prime Minister's Communication Office wishes to inform
# the population that the country's clocks will be set forward one hour
# starting at midnight. This provision will hold until the last Saturday in
# October 2005.
#
# "Port-au-Prince, March 31, 2005"
#
# From Steffen Thorsen (2006-04-04):
# I have been informed by users that Haiti observes DST this year like
# last year, so the current "only" rule for 2005 might be changed to a
# "max" rule or to last until 2006. (Who knows if they will observe DST
# next year or if they will extend their DST like US/Canada next year).
#
# I have found this article about it (in French):
# http://www.haitipressnetwork.com/news.cfm?articleID=7612
#
# The reason seems to be an energy crisis.
# From Stephen Colebourne (2007-02-22):
# Some IATA info: Haiti won't be having DST in 2007.
# From Steffen Thorsen (2012-03-11):
# According to several news sources, Haiti will observe DST this year,
# apparently using the same start and end date as USA/Canada.
# So this means they have already changed their time.
#
# (Sources in French):
# <a href="http://www.alterpresse.org/spip.php?article12510">
# http://www.alterpresse.org/spip.php?article12510
# </a>
# <a href="http://radiovision2000haiti.net/home/?p=13253">
# http://radiovision2000haiti.net/home/?p=13253
# </a>
#
# Our coverage:
# <a href="http://www.timeanddate.com/news/time/haiti-dst-2012.html">
# http://www.timeanddate.com/news/time/haiti-dst-2012.html
# </a>
# From Arthur David Olson (2012-03-11):
# The alterpresse.org source seems to show a US-style leap from 2:00 a.m. to
# 3:00 a.m. rather than the traditional Haitian jump at midnight.
# Assume a US-style fall back as well XXX.
# Do not yet assume that the change carries forward past 2012 XXX.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Haiti 1983 only - May 8 0:00 1:00 D
Rule Haiti 1984 1987 - Apr lastSun 0:00 1:00 D
Rule Haiti 1983 1987 - Oct lastSun 0:00 0 S
# Shanks & Pottenger say AT is 2:00, but IATA SSIM (1991/1997) says 1:00s.
# Go with IATA.
Rule Haiti 1988 1997 - Apr Sun>=1 1:00s 1:00 D
Rule Haiti 1988 1997 - Oct lastSun 1:00s 0 S
Rule Haiti 2005 2006 - Apr Sun>=1 0:00 1:00 D
Rule Haiti 2005 2006 - Oct lastSun 0:00 0 S
Rule Haiti 2012 only - Mar Sun>=8 2:00 1:00 D
Rule Haiti 2012 only - Nov Sun>=1 2:00 0 S
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Port-au-Prince -4:49:20 - LMT 1890
-4:49 - PPMT 1917 Jan 24 12:00 # P-a-P MT
-5:00 Haiti E%sT
# Honduras
# Shanks & Pottenger say 1921 Jan 1; go with Whitman's more precise Apr 1.
# From Paul Eggert (2006-05-05):
# worldtimezone.com reports a 2006-05-02 Spanish-language AP article
# saying Honduras will start using DST midnight Saturday, effective 4
# months until September. La Tribuna reported today
# <http://www.latribuna.hn/99299.html> that Manuel Zelaya, the president
# of Honduras, refused to back down on this.
# From Jesper Norgaard Welen (2006-08-08):
# It seems that Honduras has returned from DST to standard time this Monday at
# 00:00 hours (prolonging Sunday to 25 hours duration).
# http://www.worldtimezone.com/dst_news/dst_news_honduras04.html
# From Paul Eggert (2006-08-08):
# Also see Diario El Heraldo, The country returns to standard time (2006-08-08)
# <http://www.elheraldo.hn/nota.php?nid=54941&sec=12>.
# It mentions executive decree 18-2006.
# From Steffen Thorsen (2006-08-17):
# Honduras will observe DST from 2007 to 2009, exact dates are not
# published, I have located this authoritative source:
# http://www.presidencia.gob.hn/noticia.aspx?nId=47
# From Steffen Thorsen (2007-03-30):
# http://www.laprensahn.com/pais_nota.php?id04962=7386
# So it seems that Honduras will not enter DST this year....
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Hond 1987 1988 - May Sun>=1 0:00 1:00 D
Rule Hond 1987 1988 - Sep lastSun 0:00 0 S
Rule Hond 2006 only - May Sun>=1 0:00 1:00 D
Rule Hond 2006 only - Aug Mon>=1 0:00 0 S
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Tegucigalpa -5:48:52 - LMT 1921 Apr
-6:00 Hond C%sT
#
# Great Swan I ceded by US to Honduras in 1972
# Jamaica
# From Bob Devine (1988-01-28):
# Follows US rules.
# From U. S. Naval Observatory (1989-01-19):
# JAMAICA 5 H BEHIND UTC
# From Shanks & Pottenger:
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Jamaica -5:07:12 - LMT 1890 # Kingston
-5:07:12 - KMT 1912 Feb # Kingston Mean Time
-5:00 - EST 1974 Apr 28 2:00
-5:00 US E%sT 1984
-5:00 - EST
# Martinique
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Martinique -4:04:20 - LMT 1890 # Fort-de-France
-4:04:20 - FFMT 1911 May # Fort-de-France MT
-4:00 - AST 1980 Apr 6
-4:00 1:00 ADT 1980 Sep 28
-4:00 - AST
# Montserrat
# From Paul Eggert (2006-03-22):
# In 1995 volcanic eruptions forced evacuation of Plymouth, the capital.
# world.gazetteer.com says Cork Hill is the most populous location now.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Montserrat -4:08:52 - LMT 1911 Jul 1 0:01 # Cork Hill
-4:00 - AST
# Nicaragua
#
# This uses Shanks & Pottenger for times before 2005.
#
# From Steffen Thorsen (2005-04-12):
# I've got reports from 8 different people that Nicaragua just started
# DST on Sunday 2005-04-10, in order to save energy because of
# expensive petroleum. The exact end date for DST is not yet
# announced, only "September" but some sites also say "mid-September".
# Some background information is available on the President's official site:
# http://www.presidencia.gob.ni/Presidencia/Files_index/Secretaria/Notas%20de%20Prensa/Presidente/2005/ABRIL/Gobierno-de-nicaragua-adelanta-hora-oficial-06abril.htm
# The Decree, no 23-2005 is available here:
# http://www.presidencia.gob.ni/buscador_gaceta/BD/DECRETOS/2005/Decreto%2023-2005%20Se%20adelanta%20en%20una%20hora%20en%20todo%20el%20territorio%20nacional%20apartir%20de%20las%2024horas%20del%2009%20de%20Abril.pdf
#
# From Paul Eggert (2005-05-01):
# The decree doesn't say anything about daylight saving, but for now let's
# assume that it is daylight saving....
#
# From Gwillim Law (2005-04-21):
# The Associated Press story on the time change, which can be found at
# http://www.lapalmainteractivo.com/guias/content/gen/ap/America_Latina/AMC_GEN_NICARAGUA_HORA.html
# and elsewhere, says (fifth paragraph, translated from Spanish): "The last
# time that a change of clocks was applied to save energy was in the year 2000
# during the Arnoldo Aleman administration."...
# The northamerica file says that Nicaragua has been on UTC-6 continuously
# since December 1998. I wasn't able to find any details of Nicaraguan time
# changes in 2000. Perhaps a note could be added to the northamerica file, to
# the effect that we have indirect evidence that DST was observed in 2000.
#
# From Jesper Norgaard Welen (2005-11-02):
# Nicaragua left DST the 2005-10-02 at 00:00 (local time).
# http://www.presidencia.gob.ni/presidencia/files_index/secretaria/comunicados/2005/septiembre/26septiembre-cambio-hora.htm
# (2005-09-26)
#
# From Jesper Norgaard Welen (2006-05-05):
# http://www.elnuevodiario.com.ni/2006/05/01/nacionales/18410
# (my informal translation)
# By order of the president of the republic, Enrique Bolanos, Nicaragua
# advanced by sixty minutes their official time, yesterday at 2 in the
# morning, and will stay that way until 30.th. of september.
#
# From Jesper Norgaard Welen (2006-09-30):
# http://www.presidencia.gob.ni/buscador_gaceta/BD/DECRETOS/2006/D-063-2006P-PRN-Cambio-Hora.pdf
# My informal translation runs:
# The natural sun time is restored in all the national territory, in that the
# time is returned one hour at 01:00 am of October 1 of 2006.
#
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Nic 1979 1980 - Mar Sun>=16 0:00 1:00 D
Rule Nic 1979 1980 - Jun Mon>=23 0:00 0 S
Rule Nic 2005 only - Apr 10 0:00 1:00 D
Rule Nic 2005 only - Oct Sun>=1 0:00 0 S
Rule Nic 2006 only - Apr 30 2:00 1:00 D
Rule Nic 2006 only - Oct Sun>=1 1:00 0 S
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Managua -5:45:08 - LMT 1890
-5:45:12 - MMT 1934 Jun 23 # Managua Mean Time?
-6:00 - CST 1973 May
-5:00 - EST 1975 Feb 16
-6:00 Nic C%sT 1992 Jan 1 4:00
-5:00 - EST 1992 Sep 24
-6:00 - CST 1993
-5:00 - EST 1997
-6:00 Nic C%sT
# Panama
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Panama -5:18:08 - LMT 1890
-5:19:36 - CMT 1908 Apr 22 # Colon Mean Time
-5:00 - EST
# Puerto Rico
# There are too many San Juans elsewhere, so we'll use `Puerto_Rico'.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Puerto_Rico -4:24:25 - LMT 1899 Mar 28 12:00 # San Juan
-4:00 - AST 1942 May 3
-4:00 US A%sT 1946
-4:00 - AST
# St Kitts-Nevis
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/St_Kitts -4:10:52 - LMT 1912 Mar 2 # Basseterre
-4:00 - AST
# St Lucia
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/St_Lucia -4:04:00 - LMT 1890 # Castries
-4:04:00 - CMT 1912 # Castries Mean Time
-4:00 - AST
# St Pierre and Miquelon
# There are too many St Pierres elsewhere, so we'll use `Miquelon'.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Miquelon -3:44:40 - LMT 1911 May 15 # St Pierre
-4:00 - AST 1980 May
-3:00 - PMST 1987 # Pierre & Miquelon Time
-3:00 Canada PM%sT
# St Vincent and the Grenadines
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/St_Vincent -4:04:56 - LMT 1890 # Kingstown
-4:04:56 - KMT 1912 # Kingstown Mean Time
-4:00 - AST
# Turks and Caicos
#
# From Chris Dunn in
# <http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=415007>
# (2007-03-15): In the Turks & Caicos Islands (America/Grand_Turk) the
# daylight saving dates for time changes have been adjusted to match
# the recent U.S. change of dates.
#
# From Brian Inglis (2007-04-28):
# http://www.turksandcaicos.tc/calendar/index.htm [2007-04-26]
# there is an entry for Nov 4 "Daylight Savings Time Ends 2007" and three
# rows before that there is an out of date entry for Oct:
# "Eastern Standard Times Begins 2007
# Clocks are set back one hour at 2:00 a.m. local Daylight Saving Time"
# indicating that the normal ET rules are followed.
#
# From Paul Eggert (2006-05-01):
# Shanks & Pottenger say they use US DST rules, but IATA SSIM (1991/1998)
# says they switch at midnight. Go with Shanks & Pottenger.
#
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule TC 1979 1986 - Apr lastSun 2:00 1:00 D
Rule TC 1979 2006 - Oct lastSun 2:00 0 S
Rule TC 1987 2006 - Apr Sun>=1 2:00 1:00 D
Rule TC 2007 max - Mar Sun>=8 2:00 1:00 D
Rule TC 2007 max - Nov Sun>=1 2:00 0 S
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Grand_Turk -4:44:32 - LMT 1890
-5:07:12 - KMT 1912 Feb # Kingston Mean Time
-5:00 TC E%sT
# British Virgin Is
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Tortola -4:18:28 - LMT 1911 Jul # Road Town
-4:00 - AST
# Virgin Is
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/St_Thomas -4:19:44 - LMT 1911 Jul # Charlotte Amalie
-4:00 - AST
|
274056675/springboot-openai-chatgpt | 1,067 | mng_web/src/styles/animate/vue-transition.scss | // 过渡动画 横向渐变
.fade-transverse-leave-active,
.fade-transverse-enter-active {
transition: all .5s;
}
.fade-transverse-enter {
opacity: 0;
transform: translateX(-30px);
}
.fade-transverse-leave-to {
opacity: 0;
transform: translateX(30px);
}
// 过渡动画 缩放渐变
.fade-scale-leave-active,
.fade-scale-enter-active {
transition: all .5s;
}
.fade-scale-enter {
opacity: 0;
transform: scale(1.2);
}
.fade-scale-leave-to {
opacity: 0;
transform: scale(0.8);
}
@-webkit-keyframes animate-cloud {
from {
background-position: 600px 100%;
}
to {
background-position: 0 100%;
}
}
@-moz-keyframes animate-cloud {
from {
background-position: 600px 100%;
}
to {
background-position: 0 100%;
}
}
@-ms-keyframes animate-cloud {
from {
background-position: 600px 100%;
}
to {
background-position: 0 100%;
}
}
@-o-keyframes animate-cloud {
from {
background-position: 600px 100%;
}
to {
background-position: 0 100%;
}
}
|
233zzh/TitanDataOperationSystem | 1,546 | 代码/web代码/titanApp/src/main/resources/static/src/assets/extra-libs/flot/examples/axes-time-zones/tz/systemv | # <pre>
# This file is in the public domain, so clarified as of
# 2009-05-17 by Arthur David Olson.
# Old rules, should the need arise.
# No attempt is made to handle Newfoundland, since it cannot be expressed
# using the System V "TZ" scheme (half-hour offset), or anything outside
# North America (no support for non-standard DST start/end dates), nor
# the changes in the DST rules in the US after 1976 (which occurred after
# the old rules were written).
#
# If you need the old rules, uncomment ## lines.
# Compile this *without* leap second correction for true conformance.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule SystemV min 1973 - Apr lastSun 2:00 1:00 D
Rule SystemV min 1973 - Oct lastSun 2:00 0 S
Rule SystemV 1974 only - Jan 6 2:00 1:00 D
Rule SystemV 1974 only - Nov lastSun 2:00 0 S
Rule SystemV 1975 only - Feb 23 2:00 1:00 D
Rule SystemV 1975 only - Oct lastSun 2:00 0 S
Rule SystemV 1976 max - Apr lastSun 2:00 1:00 D
Rule SystemV 1976 max - Oct lastSun 2:00 0 S
# Zone NAME GMTOFF RULES/SAVE FORMAT [UNTIL]
## Zone SystemV/AST4ADT -4:00 SystemV A%sT
## Zone SystemV/EST5EDT -5:00 SystemV E%sT
## Zone SystemV/CST6CDT -6:00 SystemV C%sT
## Zone SystemV/MST7MDT -7:00 SystemV M%sT
## Zone SystemV/PST8PDT -8:00 SystemV P%sT
## Zone SystemV/YST9YDT -9:00 SystemV Y%sT
## Zone SystemV/AST4 -4:00 - AST
## Zone SystemV/EST5 -5:00 - EST
## Zone SystemV/CST6 -6:00 - CST
## Zone SystemV/MST7 -7:00 - MST
## Zone SystemV/PST8 -8:00 - PST
## Zone SystemV/YST9 -9:00 - YST
## Zone SystemV/HST10 -10:00 - HST
|
233zzh/TitanDataOperationSystem | 19,324 | 代码/web代码/titanApp/src/main/resources/static/src/assets/extra-libs/flot/examples/axes-time-zones/tz/solar88 | # <pre>
# This file is in the public domain, so clarified as of
# 2009-05-17 by Arthur David Olson.
# Apparent noon times below are for Riyadh; they're a bit off for other places.
# Times were computed using formulas in the U.S. Naval Observatory's
# Almanac for Computers 1988; the formulas "will give EqT to an accuracy of
# [plus or minus two] seconds during the current year."
#
# Rounding to the nearest five seconds results in fewer than
# 256 different "time types"--a limit that's faced because time types are
# stored on disk as unsigned chars.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule sol88 1988 only - Jan 1 12:03:15s -0:03:15 -
Rule sol88 1988 only - Jan 2 12:03:40s -0:03:40 -
Rule sol88 1988 only - Jan 3 12:04:10s -0:04:10 -
Rule sol88 1988 only - Jan 4 12:04:40s -0:04:40 -
Rule sol88 1988 only - Jan 5 12:05:05s -0:05:05 -
Rule sol88 1988 only - Jan 6 12:05:30s -0:05:30 -
Rule sol88 1988 only - Jan 7 12:06:00s -0:06:00 -
Rule sol88 1988 only - Jan 8 12:06:25s -0:06:25 -
Rule sol88 1988 only - Jan 9 12:06:50s -0:06:50 -
Rule sol88 1988 only - Jan 10 12:07:15s -0:07:15 -
Rule sol88 1988 only - Jan 11 12:07:40s -0:07:40 -
Rule sol88 1988 only - Jan 12 12:08:05s -0:08:05 -
Rule sol88 1988 only - Jan 13 12:08:25s -0:08:25 -
Rule sol88 1988 only - Jan 14 12:08:50s -0:08:50 -
Rule sol88 1988 only - Jan 15 12:09:10s -0:09:10 -
Rule sol88 1988 only - Jan 16 12:09:30s -0:09:30 -
Rule sol88 1988 only - Jan 17 12:09:50s -0:09:50 -
Rule sol88 1988 only - Jan 18 12:10:10s -0:10:10 -
Rule sol88 1988 only - Jan 19 12:10:30s -0:10:30 -
Rule sol88 1988 only - Jan 20 12:10:50s -0:10:50 -
Rule sol88 1988 only - Jan 21 12:11:05s -0:11:05 -
Rule sol88 1988 only - Jan 22 12:11:25s -0:11:25 -
Rule sol88 1988 only - Jan 23 12:11:40s -0:11:40 -
Rule sol88 1988 only - Jan 24 12:11:55s -0:11:55 -
Rule sol88 1988 only - Jan 25 12:12:10s -0:12:10 -
Rule sol88 1988 only - Jan 26 12:12:25s -0:12:25 -
Rule sol88 1988 only - Jan 27 12:12:40s -0:12:40 -
Rule sol88 1988 only - Jan 28 12:12:50s -0:12:50 -
Rule sol88 1988 only - Jan 29 12:13:00s -0:13:00 -
Rule sol88 1988 only - Jan 30 12:13:10s -0:13:10 -
Rule sol88 1988 only - Jan 31 12:13:20s -0:13:20 -
Rule sol88 1988 only - Feb 1 12:13:30s -0:13:30 -
Rule sol88 1988 only - Feb 2 12:13:40s -0:13:40 -
Rule sol88 1988 only - Feb 3 12:13:45s -0:13:45 -
Rule sol88 1988 only - Feb 4 12:13:55s -0:13:55 -
Rule sol88 1988 only - Feb 5 12:14:00s -0:14:00 -
Rule sol88 1988 only - Feb 6 12:14:05s -0:14:05 -
Rule sol88 1988 only - Feb 7 12:14:10s -0:14:10 -
Rule sol88 1988 only - Feb 8 12:14:10s -0:14:10 -
Rule sol88 1988 only - Feb 9 12:14:15s -0:14:15 -
Rule sol88 1988 only - Feb 10 12:14:15s -0:14:15 -
Rule sol88 1988 only - Feb 11 12:14:15s -0:14:15 -
Rule sol88 1988 only - Feb 12 12:14:15s -0:14:15 -
Rule sol88 1988 only - Feb 13 12:14:15s -0:14:15 -
Rule sol88 1988 only - Feb 14 12:14:15s -0:14:15 -
Rule sol88 1988 only - Feb 15 12:14:10s -0:14:10 -
Rule sol88 1988 only - Feb 16 12:14:10s -0:14:10 -
Rule sol88 1988 only - Feb 17 12:14:05s -0:14:05 -
Rule sol88 1988 only - Feb 18 12:14:00s -0:14:00 -
Rule sol88 1988 only - Feb 19 12:13:55s -0:13:55 -
Rule sol88 1988 only - Feb 20 12:13:50s -0:13:50 -
Rule sol88 1988 only - Feb 21 12:13:45s -0:13:45 -
Rule sol88 1988 only - Feb 22 12:13:40s -0:13:40 -
Rule sol88 1988 only - Feb 23 12:13:30s -0:13:30 -
Rule sol88 1988 only - Feb 24 12:13:20s -0:13:20 -
Rule sol88 1988 only - Feb 25 12:13:15s -0:13:15 -
Rule sol88 1988 only - Feb 26 12:13:05s -0:13:05 -
Rule sol88 1988 only - Feb 27 12:12:55s -0:12:55 -
Rule sol88 1988 only - Feb 28 12:12:45s -0:12:45 -
Rule sol88 1988 only - Feb 29 12:12:30s -0:12:30 -
Rule sol88 1988 only - Mar 1 12:12:20s -0:12:20 -
Rule sol88 1988 only - Mar 2 12:12:10s -0:12:10 -
Rule sol88 1988 only - Mar 3 12:11:55s -0:11:55 -
Rule sol88 1988 only - Mar 4 12:11:45s -0:11:45 -
Rule sol88 1988 only - Mar 5 12:11:30s -0:11:30 -
Rule sol88 1988 only - Mar 6 12:11:15s -0:11:15 -
Rule sol88 1988 only - Mar 7 12:11:00s -0:11:00 -
Rule sol88 1988 only - Mar 8 12:10:45s -0:10:45 -
Rule sol88 1988 only - Mar 9 12:10:30s -0:10:30 -
Rule sol88 1988 only - Mar 10 12:10:15s -0:10:15 -
Rule sol88 1988 only - Mar 11 12:10:00s -0:10:00 -
Rule sol88 1988 only - Mar 12 12:09:45s -0:09:45 -
Rule sol88 1988 only - Mar 13 12:09:30s -0:09:30 -
Rule sol88 1988 only - Mar 14 12:09:10s -0:09:10 -
Rule sol88 1988 only - Mar 15 12:08:55s -0:08:55 -
Rule sol88 1988 only - Mar 16 12:08:40s -0:08:40 -
Rule sol88 1988 only - Mar 17 12:08:20s -0:08:20 -
Rule sol88 1988 only - Mar 18 12:08:05s -0:08:05 -
Rule sol88 1988 only - Mar 19 12:07:45s -0:07:45 -
Rule sol88 1988 only - Mar 20 12:07:30s -0:07:30 -
Rule sol88 1988 only - Mar 21 12:07:10s -0:07:10 -
Rule sol88 1988 only - Mar 22 12:06:50s -0:06:50 -
Rule sol88 1988 only - Mar 23 12:06:35s -0:06:35 -
Rule sol88 1988 only - Mar 24 12:06:15s -0:06:15 -
Rule sol88 1988 only - Mar 25 12:06:00s -0:06:00 -
Rule sol88 1988 only - Mar 26 12:05:40s -0:05:40 -
Rule sol88 1988 only - Mar 27 12:05:20s -0:05:20 -
Rule sol88 1988 only - Mar 28 12:05:05s -0:05:05 -
Rule sol88 1988 only - Mar 29 12:04:45s -0:04:45 -
Rule sol88 1988 only - Mar 30 12:04:25s -0:04:25 -
Rule sol88 1988 only - Mar 31 12:04:10s -0:04:10 -
Rule sol88 1988 only - Apr 1 12:03:50s -0:03:50 -
Rule sol88 1988 only - Apr 2 12:03:35s -0:03:35 -
Rule sol88 1988 only - Apr 3 12:03:15s -0:03:15 -
Rule sol88 1988 only - Apr 4 12:03:00s -0:03:00 -
Rule sol88 1988 only - Apr 5 12:02:40s -0:02:40 -
Rule sol88 1988 only - Apr 6 12:02:25s -0:02:25 -
Rule sol88 1988 only - Apr 7 12:02:05s -0:02:05 -
Rule sol88 1988 only - Apr 8 12:01:50s -0:01:50 -
Rule sol88 1988 only - Apr 9 12:01:35s -0:01:35 -
Rule sol88 1988 only - Apr 10 12:01:15s -0:01:15 -
Rule sol88 1988 only - Apr 11 12:01:00s -0:01:00 -
Rule sol88 1988 only - Apr 12 12:00:45s -0:00:45 -
Rule sol88 1988 only - Apr 13 12:00:30s -0:00:30 -
Rule sol88 1988 only - Apr 14 12:00:15s -0:00:15 -
Rule sol88 1988 only - Apr 15 12:00:00s 0:00:00 -
Rule sol88 1988 only - Apr 16 11:59:45s 0:00:15 -
Rule sol88 1988 only - Apr 17 11:59:30s 0:00:30 -
Rule sol88 1988 only - Apr 18 11:59:20s 0:00:40 -
Rule sol88 1988 only - Apr 19 11:59:05s 0:00:55 -
Rule sol88 1988 only - Apr 20 11:58:55s 0:01:05 -
Rule sol88 1988 only - Apr 21 11:58:40s 0:01:20 -
Rule sol88 1988 only - Apr 22 11:58:30s 0:01:30 -
Rule sol88 1988 only - Apr 23 11:58:15s 0:01:45 -
Rule sol88 1988 only - Apr 24 11:58:05s 0:01:55 -
Rule sol88 1988 only - Apr 25 11:57:55s 0:02:05 -
Rule sol88 1988 only - Apr 26 11:57:45s 0:02:15 -
Rule sol88 1988 only - Apr 27 11:57:35s 0:02:25 -
Rule sol88 1988 only - Apr 28 11:57:30s 0:02:30 -
Rule sol88 1988 only - Apr 29 11:57:20s 0:02:40 -
Rule sol88 1988 only - Apr 30 11:57:10s 0:02:50 -
Rule sol88 1988 only - May 1 11:57:05s 0:02:55 -
Rule sol88 1988 only - May 2 11:56:55s 0:03:05 -
Rule sol88 1988 only - May 3 11:56:50s 0:03:10 -
Rule sol88 1988 only - May 4 11:56:45s 0:03:15 -
Rule sol88 1988 only - May 5 11:56:40s 0:03:20 -
Rule sol88 1988 only - May 6 11:56:35s 0:03:25 -
Rule sol88 1988 only - May 7 11:56:30s 0:03:30 -
Rule sol88 1988 only - May 8 11:56:25s 0:03:35 -
Rule sol88 1988 only - May 9 11:56:25s 0:03:35 -
Rule sol88 1988 only - May 10 11:56:20s 0:03:40 -
Rule sol88 1988 only - May 11 11:56:20s 0:03:40 -
Rule sol88 1988 only - May 12 11:56:20s 0:03:40 -
Rule sol88 1988 only - May 13 11:56:20s 0:03:40 -
Rule sol88 1988 only - May 14 11:56:20s 0:03:40 -
Rule sol88 1988 only - May 15 11:56:20s 0:03:40 -
Rule sol88 1988 only - May 16 11:56:20s 0:03:40 -
Rule sol88 1988 only - May 17 11:56:20s 0:03:40 -
Rule sol88 1988 only - May 18 11:56:25s 0:03:35 -
Rule sol88 1988 only - May 19 11:56:25s 0:03:35 -
Rule sol88 1988 only - May 20 11:56:30s 0:03:30 -
Rule sol88 1988 only - May 21 11:56:35s 0:03:25 -
Rule sol88 1988 only - May 22 11:56:40s 0:03:20 -
Rule sol88 1988 only - May 23 11:56:45s 0:03:15 -
Rule sol88 1988 only - May 24 11:56:50s 0:03:10 -
Rule sol88 1988 only - May 25 11:56:55s 0:03:05 -
Rule sol88 1988 only - May 26 11:57:00s 0:03:00 -
Rule sol88 1988 only - May 27 11:57:05s 0:02:55 -
Rule sol88 1988 only - May 28 11:57:15s 0:02:45 -
Rule sol88 1988 only - May 29 11:57:20s 0:02:40 -
Rule sol88 1988 only - May 30 11:57:30s 0:02:30 -
Rule sol88 1988 only - May 31 11:57:40s 0:02:20 -
Rule sol88 1988 only - Jun 1 11:57:50s 0:02:10 -
Rule sol88 1988 only - Jun 2 11:57:55s 0:02:05 -
Rule sol88 1988 only - Jun 3 11:58:05s 0:01:55 -
Rule sol88 1988 only - Jun 4 11:58:15s 0:01:45 -
Rule sol88 1988 only - Jun 5 11:58:30s 0:01:30 -
Rule sol88 1988 only - Jun 6 11:58:40s 0:01:20 -
Rule sol88 1988 only - Jun 7 11:58:50s 0:01:10 -
Rule sol88 1988 only - Jun 8 11:59:00s 0:01:00 -
Rule sol88 1988 only - Jun 9 11:59:15s 0:00:45 -
Rule sol88 1988 only - Jun 10 11:59:25s 0:00:35 -
Rule sol88 1988 only - Jun 11 11:59:35s 0:00:25 -
Rule sol88 1988 only - Jun 12 11:59:50s 0:00:10 -
Rule sol88 1988 only - Jun 13 12:00:00s 0:00:00 -
Rule sol88 1988 only - Jun 14 12:00:15s -0:00:15 -
Rule sol88 1988 only - Jun 15 12:00:25s -0:00:25 -
Rule sol88 1988 only - Jun 16 12:00:40s -0:00:40 -
Rule sol88 1988 only - Jun 17 12:00:55s -0:00:55 -
Rule sol88 1988 only - Jun 18 12:01:05s -0:01:05 -
Rule sol88 1988 only - Jun 19 12:01:20s -0:01:20 -
Rule sol88 1988 only - Jun 20 12:01:30s -0:01:30 -
Rule sol88 1988 only - Jun 21 12:01:45s -0:01:45 -
Rule sol88 1988 only - Jun 22 12:02:00s -0:02:00 -
Rule sol88 1988 only - Jun 23 12:02:10s -0:02:10 -
Rule sol88 1988 only - Jun 24 12:02:25s -0:02:25 -
Rule sol88 1988 only - Jun 25 12:02:35s -0:02:35 -
Rule sol88 1988 only - Jun 26 12:02:50s -0:02:50 -
Rule sol88 1988 only - Jun 27 12:03:00s -0:03:00 -
Rule sol88 1988 only - Jun 28 12:03:15s -0:03:15 -
Rule sol88 1988 only - Jun 29 12:03:25s -0:03:25 -
Rule sol88 1988 only - Jun 30 12:03:40s -0:03:40 -
Rule sol88 1988 only - Jul 1 12:03:50s -0:03:50 -
Rule sol88 1988 only - Jul 2 12:04:00s -0:04:00 -
Rule sol88 1988 only - Jul 3 12:04:10s -0:04:10 -
Rule sol88 1988 only - Jul 4 12:04:25s -0:04:25 -
Rule sol88 1988 only - Jul 5 12:04:35s -0:04:35 -
Rule sol88 1988 only - Jul 6 12:04:45s -0:04:45 -
Rule sol88 1988 only - Jul 7 12:04:55s -0:04:55 -
Rule sol88 1988 only - Jul 8 12:05:05s -0:05:05 -
Rule sol88 1988 only - Jul 9 12:05:10s -0:05:10 -
Rule sol88 1988 only - Jul 10 12:05:20s -0:05:20 -
Rule sol88 1988 only - Jul 11 12:05:30s -0:05:30 -
Rule sol88 1988 only - Jul 12 12:05:35s -0:05:35 -
Rule sol88 1988 only - Jul 13 12:05:45s -0:05:45 -
Rule sol88 1988 only - Jul 14 12:05:50s -0:05:50 -
Rule sol88 1988 only - Jul 15 12:05:55s -0:05:55 -
Rule sol88 1988 only - Jul 16 12:06:00s -0:06:00 -
Rule sol88 1988 only - Jul 17 12:06:05s -0:06:05 -
Rule sol88 1988 only - Jul 18 12:06:10s -0:06:10 -
Rule sol88 1988 only - Jul 19 12:06:15s -0:06:15 -
Rule sol88 1988 only - Jul 20 12:06:20s -0:06:20 -
Rule sol88 1988 only - Jul 21 12:06:25s -0:06:25 -
Rule sol88 1988 only - Jul 22 12:06:25s -0:06:25 -
Rule sol88 1988 only - Jul 23 12:06:25s -0:06:25 -
Rule sol88 1988 only - Jul 24 12:06:30s -0:06:30 -
Rule sol88 1988 only - Jul 25 12:06:30s -0:06:30 -
Rule sol88 1988 only - Jul 26 12:06:30s -0:06:30 -
Rule sol88 1988 only - Jul 27 12:06:30s -0:06:30 -
Rule sol88 1988 only - Jul 28 12:06:30s -0:06:30 -
Rule sol88 1988 only - Jul 29 12:06:25s -0:06:25 -
Rule sol88 1988 only - Jul 30 12:06:25s -0:06:25 -
Rule sol88 1988 only - Jul 31 12:06:20s -0:06:20 -
Rule sol88 1988 only - Aug 1 12:06:15s -0:06:15 -
Rule sol88 1988 only - Aug 2 12:06:15s -0:06:15 -
Rule sol88 1988 only - Aug 3 12:06:10s -0:06:10 -
Rule sol88 1988 only - Aug 4 12:06:05s -0:06:05 -
Rule sol88 1988 only - Aug 5 12:05:55s -0:05:55 -
Rule sol88 1988 only - Aug 6 12:05:50s -0:05:50 -
Rule sol88 1988 only - Aug 7 12:05:45s -0:05:45 -
Rule sol88 1988 only - Aug 8 12:05:35s -0:05:35 -
Rule sol88 1988 only - Aug 9 12:05:25s -0:05:25 -
Rule sol88 1988 only - Aug 10 12:05:20s -0:05:20 -
Rule sol88 1988 only - Aug 11 12:05:10s -0:05:10 -
Rule sol88 1988 only - Aug 12 12:05:00s -0:05:00 -
Rule sol88 1988 only - Aug 13 12:04:50s -0:04:50 -
Rule sol88 1988 only - Aug 14 12:04:35s -0:04:35 -
Rule sol88 1988 only - Aug 15 12:04:25s -0:04:25 -
Rule sol88 1988 only - Aug 16 12:04:15s -0:04:15 -
Rule sol88 1988 only - Aug 17 12:04:00s -0:04:00 -
Rule sol88 1988 only - Aug 18 12:03:50s -0:03:50 -
Rule sol88 1988 only - Aug 19 12:03:35s -0:03:35 -
Rule sol88 1988 only - Aug 20 12:03:20s -0:03:20 -
Rule sol88 1988 only - Aug 21 12:03:05s -0:03:05 -
Rule sol88 1988 only - Aug 22 12:02:50s -0:02:50 -
Rule sol88 1988 only - Aug 23 12:02:35s -0:02:35 -
Rule sol88 1988 only - Aug 24 12:02:20s -0:02:20 -
Rule sol88 1988 only - Aug 25 12:02:00s -0:02:00 -
Rule sol88 1988 only - Aug 26 12:01:45s -0:01:45 -
Rule sol88 1988 only - Aug 27 12:01:30s -0:01:30 -
Rule sol88 1988 only - Aug 28 12:01:10s -0:01:10 -
Rule sol88 1988 only - Aug 29 12:00:50s -0:00:50 -
Rule sol88 1988 only - Aug 30 12:00:35s -0:00:35 -
Rule sol88 1988 only - Aug 31 12:00:15s -0:00:15 -
Rule sol88 1988 only - Sep 1 11:59:55s 0:00:05 -
Rule sol88 1988 only - Sep 2 11:59:35s 0:00:25 -
Rule sol88 1988 only - Sep 3 11:59:20s 0:00:40 -
Rule sol88 1988 only - Sep 4 11:59:00s 0:01:00 -
Rule sol88 1988 only - Sep 5 11:58:40s 0:01:20 -
Rule sol88 1988 only - Sep 6 11:58:20s 0:01:40 -
Rule sol88 1988 only - Sep 7 11:58:00s 0:02:00 -
Rule sol88 1988 only - Sep 8 11:57:35s 0:02:25 -
Rule sol88 1988 only - Sep 9 11:57:15s 0:02:45 -
Rule sol88 1988 only - Sep 10 11:56:55s 0:03:05 -
Rule sol88 1988 only - Sep 11 11:56:35s 0:03:25 -
Rule sol88 1988 only - Sep 12 11:56:15s 0:03:45 -
Rule sol88 1988 only - Sep 13 11:55:50s 0:04:10 -
Rule sol88 1988 only - Sep 14 11:55:30s 0:04:30 -
Rule sol88 1988 only - Sep 15 11:55:10s 0:04:50 -
Rule sol88 1988 only - Sep 16 11:54:50s 0:05:10 -
Rule sol88 1988 only - Sep 17 11:54:25s 0:05:35 -
Rule sol88 1988 only - Sep 18 11:54:05s 0:05:55 -
Rule sol88 1988 only - Sep 19 11:53:45s 0:06:15 -
Rule sol88 1988 only - Sep 20 11:53:25s 0:06:35 -
Rule sol88 1988 only - Sep 21 11:53:00s 0:07:00 -
Rule sol88 1988 only - Sep 22 11:52:40s 0:07:20 -
Rule sol88 1988 only - Sep 23 11:52:20s 0:07:40 -
Rule sol88 1988 only - Sep 24 11:52:00s 0:08:00 -
Rule sol88 1988 only - Sep 25 11:51:40s 0:08:20 -
Rule sol88 1988 only - Sep 26 11:51:15s 0:08:45 -
Rule sol88 1988 only - Sep 27 11:50:55s 0:09:05 -
Rule sol88 1988 only - Sep 28 11:50:35s 0:09:25 -
Rule sol88 1988 only - Sep 29 11:50:15s 0:09:45 -
Rule sol88 1988 only - Sep 30 11:49:55s 0:10:05 -
Rule sol88 1988 only - Oct 1 11:49:35s 0:10:25 -
Rule sol88 1988 only - Oct 2 11:49:20s 0:10:40 -
Rule sol88 1988 only - Oct 3 11:49:00s 0:11:00 -
Rule sol88 1988 only - Oct 4 11:48:40s 0:11:20 -
Rule sol88 1988 only - Oct 5 11:48:25s 0:11:35 -
Rule sol88 1988 only - Oct 6 11:48:05s 0:11:55 -
Rule sol88 1988 only - Oct 7 11:47:50s 0:12:10 -
Rule sol88 1988 only - Oct 8 11:47:30s 0:12:30 -
Rule sol88 1988 only - Oct 9 11:47:15s 0:12:45 -
Rule sol88 1988 only - Oct 10 11:47:00s 0:13:00 -
Rule sol88 1988 only - Oct 11 11:46:45s 0:13:15 -
Rule sol88 1988 only - Oct 12 11:46:30s 0:13:30 -
Rule sol88 1988 only - Oct 13 11:46:15s 0:13:45 -
Rule sol88 1988 only - Oct 14 11:46:00s 0:14:00 -
Rule sol88 1988 only - Oct 15 11:45:45s 0:14:15 -
Rule sol88 1988 only - Oct 16 11:45:35s 0:14:25 -
Rule sol88 1988 only - Oct 17 11:45:20s 0:14:40 -
Rule sol88 1988 only - Oct 18 11:45:10s 0:14:50 -
Rule sol88 1988 only - Oct 19 11:45:00s 0:15:00 -
Rule sol88 1988 only - Oct 20 11:44:45s 0:15:15 -
Rule sol88 1988 only - Oct 21 11:44:40s 0:15:20 -
Rule sol88 1988 only - Oct 22 11:44:30s 0:15:30 -
Rule sol88 1988 only - Oct 23 11:44:20s 0:15:40 -
Rule sol88 1988 only - Oct 24 11:44:10s 0:15:50 -
Rule sol88 1988 only - Oct 25 11:44:05s 0:15:55 -
Rule sol88 1988 only - Oct 26 11:44:00s 0:16:00 -
Rule sol88 1988 only - Oct 27 11:43:55s 0:16:05 -
Rule sol88 1988 only - Oct 28 11:43:50s 0:16:10 -
Rule sol88 1988 only - Oct 29 11:43:45s 0:16:15 -
Rule sol88 1988 only - Oct 30 11:43:40s 0:16:20 -
Rule sol88 1988 only - Oct 31 11:43:40s 0:16:20 -
Rule sol88 1988 only - Nov 1 11:43:35s 0:16:25 -
Rule sol88 1988 only - Nov 2 11:43:35s 0:16:25 -
Rule sol88 1988 only - Nov 3 11:43:35s 0:16:25 -
Rule sol88 1988 only - Nov 4 11:43:35s 0:16:25 -
Rule sol88 1988 only - Nov 5 11:43:40s 0:16:20 -
Rule sol88 1988 only - Nov 6 11:43:40s 0:16:20 -
Rule sol88 1988 only - Nov 7 11:43:45s 0:16:15 -
Rule sol88 1988 only - Nov 8 11:43:45s 0:16:15 -
Rule sol88 1988 only - Nov 9 11:43:50s 0:16:10 -
Rule sol88 1988 only - Nov 10 11:44:00s 0:16:00 -
Rule sol88 1988 only - Nov 11 11:44:05s 0:15:55 -
Rule sol88 1988 only - Nov 12 11:44:10s 0:15:50 -
Rule sol88 1988 only - Nov 13 11:44:20s 0:15:40 -
Rule sol88 1988 only - Nov 14 11:44:30s 0:15:30 -
Rule sol88 1988 only - Nov 15 11:44:40s 0:15:20 -
Rule sol88 1988 only - Nov 16 11:44:50s 0:15:10 -
Rule sol88 1988 only - Nov 17 11:45:00s 0:15:00 -
Rule sol88 1988 only - Nov 18 11:45:15s 0:14:45 -
Rule sol88 1988 only - Nov 19 11:45:25s 0:14:35 -
Rule sol88 1988 only - Nov 20 11:45:40s 0:14:20 -
Rule sol88 1988 only - Nov 21 11:45:55s 0:14:05 -
Rule sol88 1988 only - Nov 22 11:46:10s 0:13:50 -
Rule sol88 1988 only - Nov 23 11:46:30s 0:13:30 -
Rule sol88 1988 only - Nov 24 11:46:45s 0:13:15 -
Rule sol88 1988 only - Nov 25 11:47:05s 0:12:55 -
Rule sol88 1988 only - Nov 26 11:47:20s 0:12:40 -
Rule sol88 1988 only - Nov 27 11:47:40s 0:12:20 -
Rule sol88 1988 only - Nov 28 11:48:00s 0:12:00 -
Rule sol88 1988 only - Nov 29 11:48:25s 0:11:35 -
Rule sol88 1988 only - Nov 30 11:48:45s 0:11:15 -
Rule sol88 1988 only - Dec 1 11:49:05s 0:10:55 -
Rule sol88 1988 only - Dec 2 11:49:30s 0:10:30 -
Rule sol88 1988 only - Dec 3 11:49:55s 0:10:05 -
Rule sol88 1988 only - Dec 4 11:50:15s 0:09:45 -
Rule sol88 1988 only - Dec 5 11:50:40s 0:09:20 -
Rule sol88 1988 only - Dec 6 11:51:05s 0:08:55 -
Rule sol88 1988 only - Dec 7 11:51:35s 0:08:25 -
Rule sol88 1988 only - Dec 8 11:52:00s 0:08:00 -
Rule sol88 1988 only - Dec 9 11:52:25s 0:07:35 -
Rule sol88 1988 only - Dec 10 11:52:55s 0:07:05 -
Rule sol88 1988 only - Dec 11 11:53:20s 0:06:40 -
Rule sol88 1988 only - Dec 12 11:53:50s 0:06:10 -
Rule sol88 1988 only - Dec 13 11:54:15s 0:05:45 -
Rule sol88 1988 only - Dec 14 11:54:45s 0:05:15 -
Rule sol88 1988 only - Dec 15 11:55:15s 0:04:45 -
Rule sol88 1988 only - Dec 16 11:55:45s 0:04:15 -
Rule sol88 1988 only - Dec 17 11:56:15s 0:03:45 -
Rule sol88 1988 only - Dec 18 11:56:40s 0:03:20 -
Rule sol88 1988 only - Dec 19 11:57:10s 0:02:50 -
Rule sol88 1988 only - Dec 20 11:57:40s 0:02:20 -
Rule sol88 1988 only - Dec 21 11:58:10s 0:01:50 -
Rule sol88 1988 only - Dec 22 11:58:40s 0:01:20 -
Rule sol88 1988 only - Dec 23 11:59:10s 0:00:50 -
Rule sol88 1988 only - Dec 24 11:59:40s 0:00:20 -
Rule sol88 1988 only - Dec 25 12:00:10s -0:00:10 -
Rule sol88 1988 only - Dec 26 12:00:40s -0:00:40 -
Rule sol88 1988 only - Dec 27 12:01:10s -0:01:10 -
Rule sol88 1988 only - Dec 28 12:01:40s -0:01:40 -
Rule sol88 1988 only - Dec 29 12:02:10s -0:02:10 -
Rule sol88 1988 only - Dec 30 12:02:35s -0:02:35 -
Rule sol88 1988 only - Dec 31 12:03:05s -0:03:05 -
# Riyadh is at about 46 degrees 46 minutes East: 3 hrs, 7 mins, 4 secs
# Before and after 1988, we'll operate on local mean solar time.
# Zone NAME GMTOFF RULES/SAVE FORMAT [UNTIL]
Zone Asia/Riyadh88 3:07:04 - zzz 1988
3:07:04 sol88 zzz 1989
3:07:04 - zzz
# For backward compatibility...
Link Asia/Riyadh88 Mideast/Riyadh88
|
233zzh/TitanDataOperationSystem | 4,083 | 代码/web代码/titanApp/src/main/resources/static/src/assets/extra-libs/flot/examples/axes-time-zones/tz/backward | # <pre>
# This file is in the public domain, so clarified as of
# 2009-05-17 by Arthur David Olson.
# This file provides links between current names for time zones
# and their old names. Many names changed in late 1993.
Link Africa/Asmara Africa/Asmera
Link Africa/Bamako Africa/Timbuktu
Link America/Argentina/Catamarca America/Argentina/ComodRivadavia
Link America/Adak America/Atka
Link America/Argentina/Buenos_Aires America/Buenos_Aires
Link America/Argentina/Catamarca America/Catamarca
Link America/Atikokan America/Coral_Harbour
Link America/Argentina/Cordoba America/Cordoba
Link America/Tijuana America/Ensenada
Link America/Indiana/Indianapolis America/Fort_Wayne
Link America/Indiana/Indianapolis America/Indianapolis
Link America/Argentina/Jujuy America/Jujuy
Link America/Indiana/Knox America/Knox_IN
Link America/Kentucky/Louisville America/Louisville
Link America/Argentina/Mendoza America/Mendoza
Link America/Rio_Branco America/Porto_Acre
Link America/Argentina/Cordoba America/Rosario
Link America/St_Thomas America/Virgin
Link Asia/Ashgabat Asia/Ashkhabad
Link Asia/Chongqing Asia/Chungking
Link Asia/Dhaka Asia/Dacca
Link Asia/Kathmandu Asia/Katmandu
Link Asia/Kolkata Asia/Calcutta
Link Asia/Macau Asia/Macao
Link Asia/Jerusalem Asia/Tel_Aviv
Link Asia/Ho_Chi_Minh Asia/Saigon
Link Asia/Thimphu Asia/Thimbu
Link Asia/Makassar Asia/Ujung_Pandang
Link Asia/Ulaanbaatar Asia/Ulan_Bator
Link Atlantic/Faroe Atlantic/Faeroe
Link Europe/Oslo Atlantic/Jan_Mayen
Link Australia/Sydney Australia/ACT
Link Australia/Sydney Australia/Canberra
Link Australia/Lord_Howe Australia/LHI
Link Australia/Sydney Australia/NSW
Link Australia/Darwin Australia/North
Link Australia/Brisbane Australia/Queensland
Link Australia/Adelaide Australia/South
Link Australia/Hobart Australia/Tasmania
Link Australia/Melbourne Australia/Victoria
Link Australia/Perth Australia/West
Link Australia/Broken_Hill Australia/Yancowinna
Link America/Rio_Branco Brazil/Acre
Link America/Noronha Brazil/DeNoronha
Link America/Sao_Paulo Brazil/East
Link America/Manaus Brazil/West
Link America/Halifax Canada/Atlantic
Link America/Winnipeg Canada/Central
Link America/Regina Canada/East-Saskatchewan
Link America/Toronto Canada/Eastern
Link America/Edmonton Canada/Mountain
Link America/St_Johns Canada/Newfoundland
Link America/Vancouver Canada/Pacific
Link America/Regina Canada/Saskatchewan
Link America/Whitehorse Canada/Yukon
Link America/Santiago Chile/Continental
Link Pacific/Easter Chile/EasterIsland
Link America/Havana Cuba
Link Africa/Cairo Egypt
Link Europe/Dublin Eire
Link Europe/London Europe/Belfast
Link Europe/Chisinau Europe/Tiraspol
Link Europe/London GB
Link Europe/London GB-Eire
Link Etc/GMT GMT+0
Link Etc/GMT GMT-0
Link Etc/GMT GMT0
Link Etc/GMT Greenwich
Link Asia/Hong_Kong Hongkong
Link Atlantic/Reykjavik Iceland
Link Asia/Tehran Iran
Link Asia/Jerusalem Israel
Link America/Jamaica Jamaica
Link Asia/Tokyo Japan
Link Pacific/Kwajalein Kwajalein
Link Africa/Tripoli Libya
Link America/Tijuana Mexico/BajaNorte
Link America/Mazatlan Mexico/BajaSur
Link America/Mexico_City Mexico/General
Link Pacific/Auckland NZ
Link Pacific/Chatham NZ-CHAT
Link America/Denver Navajo
Link Asia/Shanghai PRC
Link Pacific/Pago_Pago Pacific/Samoa
Link Pacific/Chuuk Pacific/Yap
Link Pacific/Chuuk Pacific/Truk
Link Pacific/Pohnpei Pacific/Ponape
Link Europe/Warsaw Poland
Link Europe/Lisbon Portugal
Link Asia/Taipei ROC
Link Asia/Seoul ROK
Link Asia/Singapore Singapore
Link Europe/Istanbul Turkey
Link Etc/UCT UCT
Link America/Anchorage US/Alaska
Link America/Adak US/Aleutian
Link America/Phoenix US/Arizona
Link America/Chicago US/Central
Link America/Indiana/Indianapolis US/East-Indiana
Link America/New_York US/Eastern
Link Pacific/Honolulu US/Hawaii
Link America/Indiana/Knox US/Indiana-Starke
Link America/Detroit US/Michigan
Link America/Denver US/Mountain
Link America/Los_Angeles US/Pacific
Link Pacific/Pago_Pago US/Samoa
Link Etc/UTC UTC
Link Etc/UTC Universal
Link Europe/Moscow W-SU
Link Etc/UTC Zulu
|
27182812/ChatGLM-LLaMA-chinese-insturct | 63,547 | src/transformers/utils/dummy_tf_objects.py | # This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
class TensorFlowBenchmarkArguments(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TensorFlowBenchmark(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFForcedBOSTokenLogitsProcessor(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFForcedEOSTokenLogitsProcessor(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFGenerationMixin(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFLogitsProcessor(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFLogitsProcessorList(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFLogitsWarper(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFMinLengthLogitsProcessor(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFNoBadWordsLogitsProcessor(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFNoRepeatNGramLogitsProcessor(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFRepetitionPenaltyLogitsProcessor(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFTemperatureLogitsWarper(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFTopKLogitsWarper(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFTopPLogitsWarper(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
def tf_top_k_top_p_filtering(*args, **kwargs):
requires_backends(tf_top_k_top_p_filtering, ["tf"])
class KerasMetricCallback(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class PushToHubCallback(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST = None
class TFLayoutLMForMaskedLM(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFLayoutLMForQuestionAnswering(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFLayoutLMForSequenceClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFLayoutLMForTokenClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFLayoutLMMainLayer(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFLayoutLMModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFLayoutLMPreTrainedModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFPreTrainedModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFSequenceSummary(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFSharedEmbeddings(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
def shape_list(*args, **kwargs):
requires_backends(shape_list, ["tf"])
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class TFAlbertForMaskedLM(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFAlbertForMultipleChoice(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFAlbertForPreTraining(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFAlbertForQuestionAnswering(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFAlbertForSequenceClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFAlbertForTokenClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFAlbertMainLayer(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFAlbertModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFAlbertPreTrainedModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
TF_MODEL_FOR_CAUSAL_LM_MAPPING = None
TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING = None
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING = None
TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING = None
TF_MODEL_FOR_MASKED_LM_MAPPING = None
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING = None
TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING = None
TF_MODEL_FOR_PRETRAINING_MAPPING = None
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING = None
TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING = None
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING = None
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING = None
TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING = None
TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING = None
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING = None
TF_MODEL_FOR_VISION_2_SEQ_MAPPING = None
TF_MODEL_MAPPING = None
TF_MODEL_WITH_LM_HEAD_MAPPING = None
class TFAutoModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFAutoModelForCausalLM(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFAutoModelForDocumentQuestionAnswering(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFAutoModelForImageClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFAutoModelForMaskedLM(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFAutoModelForMultipleChoice(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFAutoModelForNextSentencePrediction(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFAutoModelForPreTraining(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFAutoModelForQuestionAnswering(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFAutoModelForSemanticSegmentation(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFAutoModelForSeq2SeqLM(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFAutoModelForSequenceClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFAutoModelForSpeechSeq2Seq(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFAutoModelForTableQuestionAnswering(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFAutoModelForTokenClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFAutoModelForVision2Seq(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFAutoModelWithLMHead(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFBartForConditionalGeneration(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFBartForSequenceClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFBartModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFBartPretrainedModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class TFBertEmbeddings(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFBertForMaskedLM(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFBertForMultipleChoice(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFBertForNextSentencePrediction(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFBertForPreTraining(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFBertForQuestionAnswering(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFBertForSequenceClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFBertForTokenClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFBertLMHeadModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFBertMainLayer(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFBertModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFBertPreTrainedModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFBlenderbotForConditionalGeneration(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFBlenderbotModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFBlenderbotPreTrainedModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFBlenderbotSmallForConditionalGeneration(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFBlenderbotSmallModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFBlenderbotSmallPreTrainedModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
TF_CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class TFCamembertForCausalLM(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFCamembertForMaskedLM(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFCamembertForMultipleChoice(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFCamembertForQuestionAnswering(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFCamembertForSequenceClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFCamembertForTokenClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFCamembertModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFCamembertPreTrainedModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST = None
class TFCLIPModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFCLIPPreTrainedModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFCLIPTextModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFCLIPVisionModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class TFConvBertForMaskedLM(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFConvBertForMultipleChoice(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFConvBertForQuestionAnswering(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFConvBertForSequenceClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFConvBertForTokenClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFConvBertLayer(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFConvBertModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFConvBertPreTrainedModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFConvNextForImageClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFConvNextModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFConvNextPreTrainedModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST = None
class TFCTRLForSequenceClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFCTRLLMHeadModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFCTRLModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFCTRLPreTrainedModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class TFCvtForImageClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFCvtModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFCvtPreTrainedModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFData2VecVisionForImageClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFData2VecVisionForSemanticSegmentation(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFData2VecVisionModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFData2VecVisionPreTrainedModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = None
class TFDebertaForMaskedLM(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFDebertaForQuestionAnswering(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFDebertaForSequenceClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFDebertaForTokenClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFDebertaModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFDebertaPreTrainedModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
TF_DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST = None
class TFDebertaV2ForMaskedLM(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFDebertaV2ForQuestionAnswering(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFDebertaV2ForSequenceClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFDebertaV2ForTokenClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFDebertaV2Model(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFDebertaV2PreTrainedModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class TFDeiTForImageClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFDeiTForImageClassificationWithTeacher(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFDeiTForMaskedImageModeling(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFDeiTModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFDeiTPreTrainedModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class TFDistilBertForMaskedLM(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFDistilBertForMultipleChoice(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFDistilBertForQuestionAnswering(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFDistilBertForSequenceClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFDistilBertForTokenClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFDistilBertMainLayer(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFDistilBertModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFDistilBertPreTrainedModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST = None
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST = None
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST = None
class TFDPRContextEncoder(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFDPRPretrainedContextEncoder(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFDPRPretrainedQuestionEncoder(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFDPRPretrainedReader(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFDPRQuestionEncoder(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFDPRReader(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST = None
class TFElectraForMaskedLM(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFElectraForMultipleChoice(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFElectraForPreTraining(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFElectraForQuestionAnswering(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFElectraForSequenceClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFElectraForTokenClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFElectraModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFElectraPreTrainedModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFEncoderDecoderModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
ESM_PRETRAINED_MODEL_ARCHIVE_LIST = None
class TFEsmForMaskedLM(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFEsmForSequenceClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFEsmForTokenClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFEsmModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFEsmPreTrainedModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class TFFlaubertForMultipleChoice(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFFlaubertForQuestionAnsweringSimple(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFFlaubertForSequenceClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFFlaubertForTokenClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFFlaubertModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFFlaubertPreTrainedModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFFlaubertWithLMHeadModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST = None
class TFFunnelBaseModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFFunnelForMaskedLM(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFFunnelForMultipleChoice(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFFunnelForPreTraining(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFFunnelForQuestionAnswering(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFFunnelForSequenceClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFFunnelForTokenClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFFunnelModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFFunnelPreTrainedModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST = None
class TFGPT2DoubleHeadsModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFGPT2ForSequenceClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFGPT2LMHeadModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFGPT2MainLayer(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFGPT2Model(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFGPT2PreTrainedModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFGPTJForCausalLM(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFGPTJForQuestionAnswering(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFGPTJForSequenceClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFGPTJModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFGPTJPreTrainedModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class TFGroupViTModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFGroupViTPreTrainedModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFGroupViTTextModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFGroupViTVisionModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
TF_HUBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class TFHubertForCTC(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFHubertModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFHubertPreTrainedModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST = None
class TFLayoutLMv3ForQuestionAnswering(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFLayoutLMv3ForSequenceClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFLayoutLMv3ForTokenClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFLayoutLMv3Model(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFLayoutLMv3PreTrainedModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFLEDForConditionalGeneration(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFLEDModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFLEDPreTrainedModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None
class TFLongformerForMaskedLM(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFLongformerForMultipleChoice(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFLongformerForQuestionAnswering(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFLongformerForSequenceClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFLongformerForTokenClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFLongformerModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFLongformerPreTrainedModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFLongformerSelfAttention(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class TFLxmertForPreTraining(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFLxmertMainLayer(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFLxmertModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFLxmertPreTrainedModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFLxmertVisualFeatureEncoder(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFMarianModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFMarianMTModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFMarianPreTrainedModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFMBartForConditionalGeneration(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFMBartModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFMBartPreTrainedModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class TFMobileBertForMaskedLM(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFMobileBertForMultipleChoice(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFMobileBertForNextSentencePrediction(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFMobileBertForPreTraining(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFMobileBertForQuestionAnswering(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFMobileBertForSequenceClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFMobileBertForTokenClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFMobileBertMainLayer(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFMobileBertModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFMobileBertPreTrainedModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFMobileViTForImageClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFMobileViTForSemanticSegmentation(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFMobileViTModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFMobileViTPreTrainedModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
TF_MPNET_PRETRAINED_MODEL_ARCHIVE_LIST = None
class TFMPNetForMaskedLM(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFMPNetForMultipleChoice(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFMPNetForQuestionAnswering(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFMPNetForSequenceClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFMPNetForTokenClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFMPNetMainLayer(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFMPNetModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFMPNetPreTrainedModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFMT5EncoderModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFMT5ForConditionalGeneration(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFMT5Model(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class TFOpenAIGPTDoubleHeadsModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFOpenAIGPTForSequenceClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFOpenAIGPTLMHeadModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFOpenAIGPTMainLayer(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFOpenAIGPTModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFOpenAIGPTPreTrainedModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFOPTForCausalLM(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFOPTModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFOPTPreTrainedModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFPegasusForConditionalGeneration(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFPegasusModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFPegasusPreTrainedModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFRagModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFRagPreTrainedModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFRagSequenceForGeneration(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFRagTokenForGeneration(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST = None
class TFRegNetForImageClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFRegNetModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFRegNetPreTrainedModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class TFRemBertForCausalLM(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFRemBertForMaskedLM(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFRemBertForMultipleChoice(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFRemBertForQuestionAnswering(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFRemBertForSequenceClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFRemBertForTokenClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFRemBertLayer(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFRemBertModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFRemBertPreTrainedModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST = None
class TFResNetForImageClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFResNetModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFResNetPreTrainedModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = None
class TFRobertaForCausalLM(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFRobertaForMaskedLM(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFRobertaForMultipleChoice(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFRobertaForQuestionAnswering(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFRobertaForSequenceClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFRobertaForTokenClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFRobertaMainLayer(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFRobertaModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFRobertaPreTrainedModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST = None
class TFRobertaPreLayerNormForCausalLM(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFRobertaPreLayerNormForMaskedLM(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFRobertaPreLayerNormForMultipleChoice(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFRobertaPreLayerNormForQuestionAnswering(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFRobertaPreLayerNormForSequenceClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFRobertaPreLayerNormForTokenClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFRobertaPreLayerNormMainLayer(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFRobertaPreLayerNormModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFRobertaPreLayerNormPreTrainedModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None
class TFRoFormerForCausalLM(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFRoFormerForMaskedLM(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFRoFormerForMultipleChoice(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFRoFormerForQuestionAnswering(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFRoFormerForSequenceClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFRoFormerForTokenClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFRoFormerLayer(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFRoFormerModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFRoFormerPreTrainedModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
TF_SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None
class TFSegformerDecodeHead(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFSegformerForImageClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFSegformerForSemanticSegmentation(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFSegformerModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFSegformerPreTrainedModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class TFSpeech2TextForConditionalGeneration(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFSpeech2TextModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFSpeech2TextPreTrainedModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST = None
class TFSwinForImageClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFSwinForMaskedImageModeling(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFSwinModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFSwinPreTrainedModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST = None
class TFT5EncoderModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFT5ForConditionalGeneration(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFT5Model(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFT5PreTrainedModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST = None
class TFTapasForMaskedLM(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFTapasForQuestionAnswering(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFTapasForSequenceClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFTapasModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFTapasPreTrainedModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST = None
class TFAdaptiveEmbedding(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFTransfoXLForSequenceClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFTransfoXLLMHeadModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFTransfoXLMainLayer(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFTransfoXLModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFTransfoXLPreTrainedModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFVisionEncoderDecoderModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFVisionTextDualEncoderModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFViTForImageClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFViTModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFViTPreTrainedModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFViTMAEForPreTraining(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFViTMAEModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFViTMAEPreTrainedModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST = None
class TFWav2Vec2ForCTC(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFWav2Vec2Model(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFWav2Vec2PreTrainedModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST = None
class TFWhisperForConditionalGeneration(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFWhisperModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFWhisperPreTrainedModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST = None
class TFXGLMForCausalLM(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFXGLMModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFXGLMPreTrainedModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST = None
class TFXLMForMultipleChoice(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFXLMForQuestionAnsweringSimple(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFXLMForSequenceClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFXLMForTokenClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFXLMMainLayer(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFXLMModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFXLMPreTrainedModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFXLMWithLMHeadModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = None
class TFXLMRobertaForCausalLM(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFXLMRobertaForMaskedLM(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFXLMRobertaForMultipleChoice(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFXLMRobertaForQuestionAnswering(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFXLMRobertaForSequenceClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFXLMRobertaForTokenClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFXLMRobertaModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFXLMRobertaPreTrainedModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST = None
class TFXLNetForMultipleChoice(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFXLNetForQuestionAnsweringSimple(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFXLNetForSequenceClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFXLNetForTokenClassification(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFXLNetLMHeadModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFXLNetMainLayer(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFXLNetModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class TFXLNetPreTrainedModel(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class AdamWeightDecay(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class GradientAccumulator(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
class WarmUp(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
def create_optimizer(*args, **kwargs):
requires_backends(create_optimizer, ["tf"])
class TFTrainer(metaclass=DummyObject):
_backends = ["tf"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["tf"])
|
233zzh/TitanDataOperationSystem | 1,748 | 代码/web代码/titanApp/src/main/resources/static/src/assets/extra-libs/flot/examples/shared/jquery-ui/jquery-ui.min.css | /*! jQuery UI - v1.10.0 - 2013-01-26
* http://jqueryui.com
* Includes: jquery.ui.core.css, jquery.ui.resizable.css
* Copyright (c) 2013 jQuery Foundation and other contributors Licensed MIT */
.ui-helper-hidden{display:none}.ui-helper-hidden-accessible{border:0;clip:rect(0 0 0 0);height:1px;margin:-1px;overflow:hidden;padding:0;position:absolute;width:1px}.ui-helper-reset{margin:0;padding:0;border:0;outline:0;line-height:1.3;text-decoration:none;font-size:100%;list-style:none}.ui-helper-clearfix:before,.ui-helper-clearfix:after{content:"";display:table}.ui-helper-clearfix:after{clear:both}.ui-helper-clearfix{min-height:0}.ui-helper-zfix{width:100%;height:100%;top:0;left:0;position:absolute;opacity:0;filter:Alpha(Opacity=0)}.ui-front{z-index:100}.ui-state-disabled{cursor:default!important}.ui-icon{display:block;text-indent:-99999px;overflow:hidden;background-repeat:no-repeat}.ui-widget-overlay{position:fixed;top:0;left:0;width:100%;height:100%}.ui-resizable{position:relative}.ui-resizable-handle{position:absolute;font-size:0.1px;display:block}.ui-resizable-disabled .ui-resizable-handle,.ui-resizable-autohide .ui-resizable-handle{display:none}.ui-resizable-n{cursor:n-resize;height:7px;width:100%;top:-5px;left:0}.ui-resizable-s{cursor:s-resize;height:7px;width:100%;bottom:-5px;left:0}.ui-resizable-e{cursor:e-resize;width:7px;right:-5px;top:0;height:100%}.ui-resizable-w{cursor:w-resize;width:7px;left:-5px;top:0;height:100%}.ui-resizable-se{cursor:se-resize;width:12px;height:12px;right:1px;bottom:1px}.ui-resizable-sw{cursor:sw-resize;width:9px;height:9px;left:-5px;bottom:-5px}.ui-resizable-nw{cursor:nw-resize;width:9px;height:9px;left:-5px;top:-5px}.ui-resizable-ne{cursor:ne-resize;width:9px;height:9px;right:-5px;top:-5px} |
27182812/ChatGLM-LLaMA-chinese-insturct | 47,136 | src/transformers/utils/hub.py | # Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Hub utilities: utilities related to download and cache models
"""
import json
import os
import re
import shutil
import sys
import tempfile
import traceback
import warnings
from pathlib import Path
from typing import Dict, List, Optional, Tuple, Union
from urllib.parse import urlparse
from uuid import uuid4
import huggingface_hub
import requests
from huggingface_hub import (
CommitOperationAdd,
create_commit,
create_repo,
get_hf_file_metadata,
hf_hub_download,
hf_hub_url,
whoami,
)
from huggingface_hub.file_download import REGEX_COMMIT_HASH, http_get
from huggingface_hub.utils import (
EntryNotFoundError,
LocalEntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
build_hf_headers,
hf_raise_for_status,
)
from requests.exceptions import HTTPError
from . import __version__, logging
from .generic import working_or_temp_dir
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_tf_version,
_torch_version,
is_tf_available,
is_torch_available,
is_training_run_on_sagemaker,
)
from .logging import tqdm
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
_is_offline_mode = True if os.environ.get("TRANSFORMERS_OFFLINE", "0").upper() in ENV_VARS_TRUE_VALUES else False
def is_offline_mode():
return _is_offline_mode
torch_cache_home = os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"))
old_default_cache_path = os.path.join(torch_cache_home, "transformers")
# New default cache, shared with the Datasets library
hf_cache_home = os.path.expanduser(
os.getenv("HF_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "huggingface"))
)
default_cache_path = os.path.join(hf_cache_home, "hub")
# Onetime move from the old location to the new one if no ENV variable has been set.
if (
os.path.isdir(old_default_cache_path)
and not os.path.isdir(default_cache_path)
and "PYTORCH_PRETRAINED_BERT_CACHE" not in os.environ
and "PYTORCH_TRANSFORMERS_CACHE" not in os.environ
and "TRANSFORMERS_CACHE" not in os.environ
):
logger.warning(
"In Transformers v4.0.0, the default path to cache downloaded models changed from"
" '~/.cache/torch/transformers' to '~/.cache/huggingface/transformers'. Since you don't seem to have"
" overridden and '~/.cache/torch/transformers' is a directory that exists, we're moving it to"
" '~/.cache/huggingface/transformers' to avoid redownloading models you have already in the cache. You should"
" only see this message once."
)
shutil.move(old_default_cache_path, default_cache_path)
PYTORCH_PRETRAINED_BERT_CACHE = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path)
PYTORCH_TRANSFORMERS_CACHE = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE)
HUGGINGFACE_HUB_CACHE = os.getenv("HUGGINGFACE_HUB_CACHE", PYTORCH_TRANSFORMERS_CACHE)
TRANSFORMERS_CACHE = os.getenv("TRANSFORMERS_CACHE", HUGGINGFACE_HUB_CACHE)
HF_MODULES_CACHE = os.getenv("HF_MODULES_CACHE", os.path.join(hf_cache_home, "modules"))
TRANSFORMERS_DYNAMIC_MODULE_NAME = "transformers_modules"
SESSION_ID = uuid4().hex
DISABLE_TELEMETRY = os.getenv("DISABLE_TELEMETRY", False) in ENV_VARS_TRUE_VALUES
S3_BUCKET_PREFIX = "https://s3.amazonaws.com/models.huggingface.co/bert"
CLOUDFRONT_DISTRIB_PREFIX = "https://cdn.huggingface.co"
_staging_mode = os.environ.get("HUGGINGFACE_CO_STAGING", "NO").upper() in ENV_VARS_TRUE_VALUES
_default_endpoint = "https://hub-ci.huggingface.co" if _staging_mode else "https://huggingface.co"
HUGGINGFACE_CO_RESOLVE_ENDPOINT = _default_endpoint
if os.environ.get("HUGGINGFACE_CO_RESOLVE_ENDPOINT", None) is not None:
warnings.warn(
"Using the environment variable `HUGGINGFACE_CO_RESOLVE_ENDPOINT` is deprecated and will be removed in "
"Transformers v5. Use `HF_ENDPOINT` instead.",
FutureWarning,
)
HUGGINGFACE_CO_RESOLVE_ENDPOINT = os.environ.get("HUGGINGFACE_CO_RESOLVE_ENDPOINT", None)
HUGGINGFACE_CO_RESOLVE_ENDPOINT = os.environ.get("HF_ENDPOINT", HUGGINGFACE_CO_RESOLVE_ENDPOINT)
HUGGINGFACE_CO_PREFIX = HUGGINGFACE_CO_RESOLVE_ENDPOINT + "/{model_id}/resolve/{revision}/{filename}"
HUGGINGFACE_CO_EXAMPLES_TELEMETRY = HUGGINGFACE_CO_RESOLVE_ENDPOINT + "/api/telemetry/examples"
# Return value when trying to load a file from cache but the file does not exist in the distant repo.
_CACHED_NO_EXIST = object()
def is_remote_url(url_or_filename):
parsed = urlparse(url_or_filename)
return parsed.scheme in ("http", "https")
def get_cached_models(cache_dir: Union[str, Path] = None) -> List[Tuple]:
"""
Returns a list of tuples representing model binaries that are cached locally. Each tuple has shape `(model_url,
etag, size_MB)`. Filenames in `cache_dir` are use to get the metadata for each model, only urls ending with *.bin*
are added.
Args:
cache_dir (`Union[str, Path]`, *optional*):
The cache directory to search for models within. Will default to the transformers cache if unset.
Returns:
List[Tuple]: List of tuples each with shape `(model_url, etag, size_MB)`
"""
if cache_dir is None:
cache_dir = TRANSFORMERS_CACHE
elif isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if not os.path.isdir(cache_dir):
return []
cached_models = []
for file in os.listdir(cache_dir):
if file.endswith(".json"):
meta_path = os.path.join(cache_dir, file)
with open(meta_path, encoding="utf-8") as meta_file:
metadata = json.load(meta_file)
url = metadata["url"]
etag = metadata["etag"]
if url.endswith(".bin"):
size_MB = os.path.getsize(meta_path.strip(".json")) / 1e6
cached_models.append((url, etag, size_MB))
return cached_models
def define_sagemaker_information():
try:
instance_data = requests.get(os.environ["ECS_CONTAINER_METADATA_URI"]).json()
dlc_container_used = instance_data["Image"]
dlc_tag = instance_data["Image"].split(":")[1]
except Exception:
dlc_container_used = None
dlc_tag = None
sagemaker_params = json.loads(os.getenv("SM_FRAMEWORK_PARAMS", "{}"))
runs_distributed_training = True if "sagemaker_distributed_dataparallel_enabled" in sagemaker_params else False
account_id = os.getenv("TRAINING_JOB_ARN").split(":")[4] if "TRAINING_JOB_ARN" in os.environ else None
sagemaker_object = {
"sm_framework": os.getenv("SM_FRAMEWORK_MODULE", None),
"sm_region": os.getenv("AWS_REGION", None),
"sm_number_gpu": os.getenv("SM_NUM_GPUS", 0),
"sm_number_cpu": os.getenv("SM_NUM_CPUS", 0),
"sm_distributed_training": runs_distributed_training,
"sm_deep_learning_container": dlc_container_used,
"sm_deep_learning_container_tag": dlc_tag,
"sm_account_id": account_id,
}
return sagemaker_object
def http_user_agent(user_agent: Union[Dict, str, None] = None) -> str:
"""
Formats a user-agent string with basic info about a request.
"""
ua = f"transformers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"
if is_torch_available():
ua += f"; torch/{_torch_version}"
if is_tf_available():
ua += f"; tensorflow/{_tf_version}"
if DISABLE_TELEMETRY:
return ua + "; telemetry/off"
if is_training_run_on_sagemaker():
ua += "; " + "; ".join(f"{k}/{v}" for k, v in define_sagemaker_information().items())
# CI will set this value to True
if os.environ.get("TRANSFORMERS_IS_CI", "").upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(user_agent, dict):
ua += "; " + "; ".join(f"{k}/{v}" for k, v in user_agent.items())
elif isinstance(user_agent, str):
ua += "; " + user_agent
return ua
def extract_commit_hash(resolved_file: Optional[str], commit_hash: Optional[str]):
"""
Extracts the commit hash from a resolved filename toward a cache file.
"""
if resolved_file is None or commit_hash is not None:
return commit_hash
resolved_file = str(Path(resolved_file).as_posix())
search = re.search(r"snapshots/([^/]+)/", resolved_file)
if search is None:
return None
commit_hash = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(commit_hash) else None
def try_to_load_from_cache(
repo_id: str,
filename: str,
cache_dir: Union[str, Path, None] = None,
revision: Optional[str] = None,
) -> Optional[str]:
"""
Explores the cache to return the latest cached file for a given revision if found.
This function will not raise any exception if the file in not cached.
Args:
cache_dir (`str` or `os.PathLike`):
The folder where the cached files lie.
repo_id (`str`):
The ID of the repo on huggingface.co.
filename (`str`):
The filename to look for inside `repo_id`.
revision (`str`, *optional*):
The specific model version to use. Will default to `"main"` if it's not provided and no `commit_hash` is
provided either.
Returns:
`Optional[str]` or `_CACHED_NO_EXIST`:
Will return `None` if the file was not cached. Otherwise:
- The exact path to the cached file if it's found in the cache
- A special value `_CACHED_NO_EXIST` if the file does not exist at the given commit hash and this fact was
cached.
"""
if revision is None:
revision = "main"
if cache_dir is None:
cache_dir = TRANSFORMERS_CACHE
object_id = repo_id.replace("/", "--")
repo_cache = os.path.join(cache_dir, f"models--{object_id}")
if not os.path.isdir(repo_cache):
# No cache for this model
return None
for subfolder in ["refs", "snapshots"]:
if not os.path.isdir(os.path.join(repo_cache, subfolder)):
return None
# Resolve refs (for instance to convert main to the associated commit sha)
cached_refs = os.listdir(os.path.join(repo_cache, "refs"))
if revision in cached_refs:
with open(os.path.join(repo_cache, "refs", revision)) as f:
revision = f.read()
if os.path.isfile(os.path.join(repo_cache, ".no_exist", revision, filename)):
return _CACHED_NO_EXIST
cached_shas = os.listdir(os.path.join(repo_cache, "snapshots"))
if revision not in cached_shas:
# No cache for this revision and we won't try to return a random revision
return None
cached_file = os.path.join(repo_cache, "snapshots", revision, filename)
return cached_file if os.path.isfile(cached_file) else None
def cached_file(
path_or_repo_id: Union[str, os.PathLike],
filename: str,
cache_dir: Optional[Union[str, os.PathLike]] = None,
force_download: bool = False,
resume_download: bool = False,
proxies: Optional[Dict[str, str]] = None,
use_auth_token: Optional[Union[bool, str]] = None,
revision: Optional[str] = None,
local_files_only: bool = False,
subfolder: str = "",
user_agent: Optional[Union[str, Dict[str, str]]] = None,
_raise_exceptions_for_missing_entries: bool = True,
_raise_exceptions_for_connection_errors: bool = True,
_commit_hash: Optional[str] = None,
):
"""
Tries to locate a file in a local folder and repo, downloads and cache it if necessary.
Args:
path_or_repo_id (`str` or `os.PathLike`):
This can be either:
- a string, the *model id* of a model repo on huggingface.co.
- a path to a *directory* potentially containing the file.
filename (`str`):
The name of the file to locate in `path_or_repo`.
cache_dir (`str` or `os.PathLike`, *optional*):
Path to a directory in which a downloaded pretrained model configuration should be cached if the standard
cache should not be used.
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force to (re-)download the configuration files and override the cached versions if they
exist.
resume_download (`bool`, *optional*, defaults to `False`):
Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists.
proxies (`Dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
use_auth_token (`str` or *bool*, *optional*):
The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
when running `huggingface-cli login` (stored in `~/.huggingface`).
revision (`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
identifier allowed by git.
local_files_only (`bool`, *optional*, defaults to `False`):
If `True`, will only try to load the tokenizer configuration from local files.
subfolder (`str`, *optional*, defaults to `""`):
In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can
specify the folder name here.
<Tip>
Passing `use_auth_token=True` is required when you want to use a private model.
</Tip>
Returns:
`Optional[str]`: Returns the resolved file (to the cache folder if downloaded from a repo).
Examples:
```python
# Download a model weight from the Hub and cache it.
model_weights_file = cached_file("bert-base-uncased", "pytorch_model.bin")
```"""
# Private arguments
# _raise_exceptions_for_missing_entries: if False, do not raise an exception for missing entries but return
# None.
# _raise_exceptions_for_connection_errors: if False, do not raise an exception for connection errors but return
# None.
# _commit_hash: passed when we are chaining several calls to various files (e.g. when loading a tokenizer or
# a pipeline). If files are cached for this commit hash, avoid calls to head and get from the cache.
if is_offline_mode() and not local_files_only:
logger.info("Offline mode: forcing local_files_only=True")
local_files_only = True
if subfolder is None:
subfolder = ""
path_or_repo_id = str(path_or_repo_id)
full_filename = os.path.join(subfolder, filename)
if os.path.isdir(path_or_repo_id):
resolved_file = os.path.join(os.path.join(path_or_repo_id, subfolder), filename)
if not os.path.isfile(resolved_file):
if _raise_exceptions_for_missing_entries:
raise EnvironmentError(
f"{path_or_repo_id} does not appear to have a file named {full_filename}. Checkout "
f"'https://huggingface.co/{path_or_repo_id}/{revision}' for available files."
)
else:
return None
return resolved_file
if cache_dir is None:
cache_dir = TRANSFORMERS_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if _commit_hash is not None:
# If the file is cached under that commit hash, we return it directly.
resolved_file = try_to_load_from_cache(
path_or_repo_id, full_filename, cache_dir=cache_dir, revision=_commit_hash
)
if resolved_file is not None:
if resolved_file is not _CACHED_NO_EXIST:
return resolved_file
elif not _raise_exceptions_for_missing_entries:
return None
else:
raise EnvironmentError(f"Could not locate {full_filename} inside {path_or_repo_id}.")
user_agent = http_user_agent(user_agent)
try:
# Load from URL or cache if already cached
resolved_file = hf_hub_download(
path_or_repo_id,
filename,
subfolder=None if len(subfolder) == 0 else subfolder,
revision=revision,
cache_dir=cache_dir,
user_agent=user_agent,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
use_auth_token=use_auth_token,
local_files_only=local_files_only,
)
except RepositoryNotFoundError:
raise EnvironmentError(
f"{path_or_repo_id} is not a local folder and is not a valid model identifier "
"listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to "
"pass a token having permission to this repo with `use_auth_token` or log in with "
"`huggingface-cli login` and pass `use_auth_token=True`."
)
except RevisionNotFoundError:
raise EnvironmentError(
f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists "
"for this model name. Check the model page at "
f"'https://huggingface.co/{path_or_repo_id}' for available revisions."
)
except LocalEntryNotFoundError:
# We try to see if we have a cached version (not up to date):
resolved_file = try_to_load_from_cache(path_or_repo_id, full_filename, cache_dir=cache_dir, revision=revision)
if resolved_file is not None and resolved_file != _CACHED_NO_EXIST:
return resolved_file
if not _raise_exceptions_for_missing_entries or not _raise_exceptions_for_connection_errors:
return None
raise EnvironmentError(
f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this file, couldn't find it in the"
f" cached files and it looks like {path_or_repo_id} is not the path to a directory containing a file named"
f" {full_filename}.\nCheckout your internet connection or see how to run the library in offline mode at"
" 'https://huggingface.co/docs/transformers/installation#offline-mode'."
)
except EntryNotFoundError:
if not _raise_exceptions_for_missing_entries:
return None
if revision is None:
revision = "main"
raise EnvironmentError(
f"{path_or_repo_id} does not appear to have a file named {full_filename}. Checkout "
f"'https://huggingface.co/{path_or_repo_id}/{revision}' for available files."
)
except HTTPError as err:
# First we try to see if we have a cached version (not up to date):
resolved_file = try_to_load_from_cache(path_or_repo_id, full_filename, cache_dir=cache_dir, revision=revision)
if resolved_file is not None and resolved_file != _CACHED_NO_EXIST:
return resolved_file
if not _raise_exceptions_for_connection_errors:
return None
raise EnvironmentError(f"There was a specific connection error when trying to load {path_or_repo_id}:\n{err}")
return resolved_file
def get_file_from_repo(
path_or_repo: Union[str, os.PathLike],
filename: str,
cache_dir: Optional[Union[str, os.PathLike]] = None,
force_download: bool = False,
resume_download: bool = False,
proxies: Optional[Dict[str, str]] = None,
use_auth_token: Optional[Union[bool, str]] = None,
revision: Optional[str] = None,
local_files_only: bool = False,
subfolder: str = "",
):
"""
Tries to locate a file in a local folder and repo, downloads and cache it if necessary.
Args:
path_or_repo (`str` or `os.PathLike`):
This can be either:
- a string, the *model id* of a model repo on huggingface.co.
- a path to a *directory* potentially containing the file.
filename (`str`):
The name of the file to locate in `path_or_repo`.
cache_dir (`str` or `os.PathLike`, *optional*):
Path to a directory in which a downloaded pretrained model configuration should be cached if the standard
cache should not be used.
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force to (re-)download the configuration files and override the cached versions if they
exist.
resume_download (`bool`, *optional*, defaults to `False`):
Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists.
proxies (`Dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
use_auth_token (`str` or *bool*, *optional*):
The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
when running `huggingface-cli login` (stored in `~/.huggingface`).
revision (`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
identifier allowed by git.
local_files_only (`bool`, *optional*, defaults to `False`):
If `True`, will only try to load the tokenizer configuration from local files.
subfolder (`str`, *optional*, defaults to `""`):
In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can
specify the folder name here.
<Tip>
Passing `use_auth_token=True` is required when you want to use a private model.
</Tip>
Returns:
`Optional[str]`: Returns the resolved file (to the cache folder if downloaded from a repo) or `None` if the
file does not exist.
Examples:
```python
# Download a tokenizer configuration from huggingface.co and cache.
tokenizer_config = get_file_from_repo("bert-base-uncased", "tokenizer_config.json")
# This model does not have a tokenizer config so the result will be None.
tokenizer_config = get_file_from_repo("xlm-roberta-base", "tokenizer_config.json")
```"""
return cached_file(
path_or_repo_id=path_or_repo,
filename=filename,
cache_dir=cache_dir,
force_download=force_download,
resume_download=resume_download,
proxies=proxies,
use_auth_token=use_auth_token,
revision=revision,
local_files_only=local_files_only,
subfolder=subfolder,
_raise_exceptions_for_missing_entries=False,
_raise_exceptions_for_connection_errors=False,
)
def download_url(url, proxies=None):
"""
Downloads a given url in a temporary file. This function is not safe to use in multiple processes. Its only use is
for deprecated behavior allowing to download config/models with a single url instead of using the Hub.
Args:
url (`str`): The url of the file to download.
proxies (`Dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
Returns:
`str`: The location of the temporary file where the url was downloaded.
"""
warnings.warn(
f"Using `from_pretrained` with the url of a file (here {url}) is deprecated and won't be possible anymore in"
" v5 of Transformers. You should host your file on the Hub (hf.co) instead and use the repository ID. Note"
" that this is not compatible with the caching system (your file will be downloaded at each execution) or"
" multiple processes (each process will download the file in a different temporary file)."
)
tmp_file = tempfile.mktemp()
with open(tmp_file, "wb") as f:
http_get(url, f, proxies=proxies)
return tmp_file
def has_file(
path_or_repo: Union[str, os.PathLike],
filename: str,
revision: Optional[str] = None,
proxies: Optional[Dict[str, str]] = None,
use_auth_token: Optional[Union[bool, str]] = None,
):
"""
Checks if a repo contains a given file without downloading it. Works for remote repos and local folders.
<Tip warning={false}>
This function will raise an error if the repository `path_or_repo` is not valid or if `revision` does not exist for
this repo, but will return False for regular connection errors.
</Tip>
"""
if os.path.isdir(path_or_repo):
return os.path.isfile(os.path.join(path_or_repo, filename))
url = hf_hub_url(path_or_repo, filename=filename, revision=revision)
headers = build_hf_headers(use_auth_token=use_auth_token, user_agent=http_user_agent())
r = requests.head(url, headers=headers, allow_redirects=False, proxies=proxies, timeout=10)
try:
hf_raise_for_status(r)
return True
except RepositoryNotFoundError as e:
logger.error(e)
raise EnvironmentError(f"{path_or_repo} is not a local folder or a valid repository name on 'https://hf.co'.")
except RevisionNotFoundError as e:
logger.error(e)
raise EnvironmentError(
f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for this "
f"model name. Check the model page at 'https://huggingface.co/{path_or_repo}' for available revisions."
)
except requests.HTTPError:
# We return false for EntryNotFoundError (logical) as well as any connection error.
return False
class PushToHubMixin:
"""
A Mixin containing the functionality to push a model or tokenizer to the hub.
"""
def _create_repo(
self,
repo_id: str,
private: Optional[bool] = None,
use_auth_token: Optional[Union[bool, str]] = None,
repo_url: Optional[str] = None,
organization: Optional[str] = None,
) -> str:
"""
Create the repo if needed, cleans up repo_id with deprecated kwargs `repo_url` and `organization`, retrieves
the token.
"""
if repo_url is not None:
warnings.warn(
"The `repo_url` argument is deprecated and will be removed in v5 of Transformers. Use `repo_id` "
"instead."
)
repo_id = repo_url.replace(f"{HUGGINGFACE_CO_RESOLVE_ENDPOINT}/", "")
if organization is not None:
warnings.warn(
"The `organization` argument is deprecated and will be removed in v5 of Transformers. Set your "
"organization directly in the `repo_id` passed instead (`repo_id={organization}/{model_id}`)."
)
if not repo_id.startswith(organization):
if "/" in repo_id:
repo_id = repo_id.split("/")[-1]
repo_id = f"{organization}/{repo_id}"
url = create_repo(repo_id=repo_id, token=use_auth_token, private=private, exist_ok=True)
# If the namespace is not there, add it or `upload_file` will complain
if "/" not in repo_id and url != f"{HUGGINGFACE_CO_RESOLVE_ENDPOINT}/{repo_id}":
repo_id = get_full_repo_name(repo_id, token=use_auth_token)
return repo_id
def _get_files_timestamps(self, working_dir: Union[str, os.PathLike]):
"""
Returns the list of files with their last modification timestamp.
"""
return {f: os.path.getmtime(os.path.join(working_dir, f)) for f in os.listdir(working_dir)}
def _upload_modified_files(
self,
working_dir: Union[str, os.PathLike],
repo_id: str,
files_timestamps: Dict[str, float],
commit_message: Optional[str] = None,
token: Optional[Union[bool, str]] = None,
create_pr: bool = False,
):
"""
Uploads all modified files in `working_dir` to `repo_id`, based on `files_timestamps`.
"""
if commit_message is None:
if "Model" in self.__class__.__name__:
commit_message = "Upload model"
elif "Config" in self.__class__.__name__:
commit_message = "Upload config"
elif "Tokenizer" in self.__class__.__name__:
commit_message = "Upload tokenizer"
elif "FeatureExtractor" in self.__class__.__name__:
commit_message = "Upload feature extractor"
elif "Processor" in self.__class__.__name__:
commit_message = "Upload processor"
else:
commit_message = f"Upload {self.__class__.__name__}"
modified_files = [
f
for f in os.listdir(working_dir)
if f not in files_timestamps or os.path.getmtime(os.path.join(working_dir, f)) > files_timestamps[f]
]
operations = []
for file in modified_files:
operations.append(CommitOperationAdd(path_or_fileobj=os.path.join(working_dir, file), path_in_repo=file))
logger.info(f"Uploading the following files to {repo_id}: {','.join(modified_files)}")
return create_commit(
repo_id=repo_id, operations=operations, commit_message=commit_message, token=token, create_pr=create_pr
)
def push_to_hub(
self,
repo_id: str,
use_temp_dir: Optional[bool] = None,
commit_message: Optional[str] = None,
private: Optional[bool] = None,
use_auth_token: Optional[Union[bool, str]] = None,
max_shard_size: Optional[Union[int, str]] = "10GB",
create_pr: bool = False,
**deprecated_kwargs,
) -> str:
"""
Upload the {object_files} to the 🤗 Model Hub while synchronizing a local clone of the repo in
`repo_path_or_name`.
Parameters:
repo_id (`str`):
The name of the repository you want to push your {object} to. It should contain your organization name
when pushing to a given organization.
use_temp_dir (`bool`, *optional*):
Whether or not to use a temporary directory to store the files saved before they are pushed to the Hub.
Will default to `True` if there is no directory named like `repo_id`, `False` otherwise.
commit_message (`str`, *optional*):
Message to commit while pushing. Will default to `"Upload {object}"`.
private (`bool`, *optional*):
Whether or not the repository created should be private.
use_auth_token (`bool` or `str`, *optional*):
The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
when running `huggingface-cli login` (stored in `~/.huggingface`). Will default to `True` if `repo_url`
is not specified.
max_shard_size (`int` or `str`, *optional*, defaults to `"10GB"`):
Only applicable for models. The maximum size for a checkpoint before being sharded. Checkpoints shard
will then be each of size lower than this size. If expressed as a string, needs to be digits followed
by a unit (like `"5MB"`).
create_pr (`bool`, *optional*, defaults to `False`):
Whether or not to create a PR with the uploaded files or directly commit.
Examples:
```python
from transformers import {object_class}
{object} = {object_class}.from_pretrained("bert-base-cased")
# Push the {object} to your namespace with the name "my-finetuned-bert".
{object}.push_to_hub("my-finetuned-bert")
# Push the {object} to an organization with the name "my-finetuned-bert".
{object}.push_to_hub("huggingface/my-finetuned-bert")
```
"""
if "repo_path_or_name" in deprecated_kwargs:
warnings.warn(
"The `repo_path_or_name` argument is deprecated and will be removed in v5 of Transformers. Use "
"`repo_id` instead."
)
repo_id = deprecated_kwargs.pop("repo_path_or_name")
# Deprecation warning will be sent after for repo_url and organization
repo_url = deprecated_kwargs.pop("repo_url", None)
organization = deprecated_kwargs.pop("organization", None)
if os.path.isdir(repo_id):
working_dir = repo_id
repo_id = repo_id.split(os.path.sep)[-1]
else:
working_dir = repo_id.split("/")[-1]
repo_id = self._create_repo(
repo_id, private=private, use_auth_token=use_auth_token, repo_url=repo_url, organization=organization
)
if use_temp_dir is None:
use_temp_dir = not os.path.isdir(working_dir)
with working_or_temp_dir(working_dir=working_dir, use_temp_dir=use_temp_dir) as work_dir:
files_timestamps = self._get_files_timestamps(work_dir)
# Save all files.
self.save_pretrained(work_dir, max_shard_size=max_shard_size)
return self._upload_modified_files(
work_dir,
repo_id,
files_timestamps,
commit_message=commit_message,
token=use_auth_token,
create_pr=create_pr,
)
def get_full_repo_name(model_id: str, organization: Optional[str] = None, token: Optional[str] = None):
if organization is None:
username = whoami(token)["name"]
return f"{username}/{model_id}"
else:
return f"{organization}/{model_id}"
def send_example_telemetry(example_name, *example_args, framework="pytorch"):
"""
Sends telemetry that helps tracking the examples use.
Args:
example_name (`str`): The name of the example.
*example_args (dataclasses or `argparse.ArgumentParser`): The arguments to the script. This function will only
try to extract the model and dataset name from those. Nothing else is tracked.
framework (`str`, *optional*, defaults to `"pytorch"`): The framework for the example.
"""
if is_offline_mode():
return
data = {"example": example_name, "framework": framework}
for args in example_args:
args_as_dict = {k: v for k, v in args.__dict__.items() if not k.startswith("_") and v is not None}
if "model_name_or_path" in args_as_dict:
model_name = args_as_dict["model_name_or_path"]
# Filter out local paths
if not os.path.isdir(model_name):
data["model_name"] = args_as_dict["model_name_or_path"]
if "dataset_name" in args_as_dict:
data["dataset_name"] = args_as_dict["dataset_name"]
elif "task_name" in args_as_dict:
# Extract script name from the example_name
script_name = example_name.replace("tf_", "").replace("flax_", "").replace("run_", "")
script_name = script_name.replace("_no_trainer", "")
data["dataset_name"] = f"{script_name}-{args_as_dict['task_name']}"
headers = {"user-agent": http_user_agent(data)}
try:
r = requests.head(HUGGINGFACE_CO_EXAMPLES_TELEMETRY, headers=headers)
r.raise_for_status()
except Exception:
# We don't want to error in case of connection errors of any kind.
pass
def convert_file_size_to_int(size: Union[int, str]):
"""
Converts a size expressed as a string with digits an unit (like `"5MB"`) to an integer (in bytes).
Args:
size (`int` or `str`): The size to convert. Will be directly returned if an `int`.
Example:
```py
>>> convert_file_size_to_int("1MiB")
1048576
```
"""
if isinstance(size, int):
return size
if size.upper().endswith("GIB"):
return int(size[:-3]) * (2**30)
if size.upper().endswith("MIB"):
return int(size[:-3]) * (2**20)
if size.upper().endswith("KIB"):
return int(size[:-3]) * (2**10)
if size.upper().endswith("GB"):
int_size = int(size[:-2]) * (10**9)
return int_size // 8 if size.endswith("b") else int_size
if size.upper().endswith("MB"):
int_size = int(size[:-2]) * (10**6)
return int_size // 8 if size.endswith("b") else int_size
if size.upper().endswith("KB"):
int_size = int(size[:-2]) * (10**3)
return int_size // 8 if size.endswith("b") else int_size
raise ValueError("`size` is not in a valid format. Use an integer followed by the unit, e.g., '5GB'.")
def get_checkpoint_shard_files(
pretrained_model_name_or_path,
index_filename,
cache_dir=None,
force_download=False,
proxies=None,
resume_download=False,
local_files_only=False,
use_auth_token=None,
user_agent=None,
revision=None,
subfolder="",
_commit_hash=None,
):
"""
For a given model:
- download and cache all the shards of a sharded checkpoint if `pretrained_model_name_or_path` is a model ID on the
Hub
- returns the list of paths to all the shards, as well as some metadata.
For the description of each arg, see [`PreTrainedModel.from_pretrained`]. `index_filename` is the full path to the
index (downloaded and cached if `pretrained_model_name_or_path` is a model ID on the Hub).
"""
import json
if not os.path.isfile(index_filename):
raise ValueError(f"Can't find a checkpoint index ({index_filename}) in {pretrained_model_name_or_path}.")
with open(index_filename, "r") as f:
index = json.loads(f.read())
shard_filenames = sorted(set(index["weight_map"].values()))
sharded_metadata = index["metadata"]
sharded_metadata["all_checkpoint_keys"] = list(index["weight_map"].keys())
sharded_metadata["weight_map"] = index["weight_map"].copy()
# First, let's deal with local folder.
if os.path.isdir(pretrained_model_name_or_path):
shard_filenames = [os.path.join(pretrained_model_name_or_path, subfolder, f) for f in shard_filenames]
return shard_filenames, sharded_metadata
# At this stage pretrained_model_name_or_path is a model identifier on the Hub
cached_filenames = []
for shard_filename in shard_filenames:
try:
# Load from URL
cached_filename = cached_file(
pretrained_model_name_or_path,
shard_filename,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
local_files_only=local_files_only,
use_auth_token=use_auth_token,
user_agent=user_agent,
revision=revision,
subfolder=subfolder,
_commit_hash=_commit_hash,
)
# We have already dealt with RepositoryNotFoundError and RevisionNotFoundError when getting the index, so
# we don't have to catch them here.
except EntryNotFoundError:
raise EnvironmentError(
f"{pretrained_model_name_or_path} does not appear to have a file named {shard_filename} which is "
"required according to the checkpoint index."
)
except HTTPError:
raise EnvironmentError(
f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load {shard_filename}. You should try"
" again after checking your internet connection."
)
cached_filenames.append(cached_filename)
return cached_filenames, sharded_metadata
# All what is below is for conversion between old cache format and new cache format.
def get_all_cached_files(cache_dir=None):
"""
Returns a list for all files cached with appropriate metadata.
"""
if cache_dir is None:
cache_dir = TRANSFORMERS_CACHE
else:
cache_dir = str(cache_dir)
if not os.path.isdir(cache_dir):
return []
cached_files = []
for file in os.listdir(cache_dir):
meta_path = os.path.join(cache_dir, f"{file}.json")
if not os.path.isfile(meta_path):
continue
with open(meta_path, encoding="utf-8") as meta_file:
metadata = json.load(meta_file)
url = metadata["url"]
etag = metadata["etag"].replace('"', "")
cached_files.append({"file": file, "url": url, "etag": etag})
return cached_files
def extract_info_from_url(url):
"""
Extract repo_name, revision and filename from an url.
"""
search = re.search(r"^https://huggingface\.co/(.*)/resolve/([^/]*)/(.*)$", url)
if search is None:
return None
repo, revision, filename = search.groups()
cache_repo = "--".join(["models"] + repo.split("/"))
return {"repo": cache_repo, "revision": revision, "filename": filename}
def clean_files_for(file):
"""
Remove, if they exist, file, file.json and file.lock
"""
for f in [file, f"{file}.json", f"{file}.lock"]:
if os.path.isfile(f):
os.remove(f)
def move_to_new_cache(file, repo, filename, revision, etag, commit_hash):
"""
Move file to repo following the new huggingface hub cache organization.
"""
os.makedirs(repo, exist_ok=True)
# refs
os.makedirs(os.path.join(repo, "refs"), exist_ok=True)
if revision != commit_hash:
ref_path = os.path.join(repo, "refs", revision)
with open(ref_path, "w") as f:
f.write(commit_hash)
# blobs
os.makedirs(os.path.join(repo, "blobs"), exist_ok=True)
blob_path = os.path.join(repo, "blobs", etag)
shutil.move(file, blob_path)
# snapshots
os.makedirs(os.path.join(repo, "snapshots"), exist_ok=True)
os.makedirs(os.path.join(repo, "snapshots", commit_hash), exist_ok=True)
pointer_path = os.path.join(repo, "snapshots", commit_hash, filename)
huggingface_hub.file_download._create_relative_symlink(blob_path, pointer_path)
clean_files_for(file)
def move_cache(cache_dir=None, new_cache_dir=None, token=None):
if new_cache_dir is None:
new_cache_dir = TRANSFORMERS_CACHE
if cache_dir is None:
# Migrate from old cache in .cache/huggingface/hub
old_cache = Path(TRANSFORMERS_CACHE).parent / "transformers"
if os.path.isdir(str(old_cache)):
cache_dir = str(old_cache)
else:
cache_dir = new_cache_dir
cached_files = get_all_cached_files(cache_dir=cache_dir)
logger.info(f"Moving {len(cached_files)} files to the new cache system")
hub_metadata = {}
for file_info in tqdm(cached_files):
url = file_info.pop("url")
if url not in hub_metadata:
try:
hub_metadata[url] = get_hf_file_metadata(url, token=token)
except requests.HTTPError:
continue
etag, commit_hash = hub_metadata[url].etag, hub_metadata[url].commit_hash
if etag is None or commit_hash is None:
continue
if file_info["etag"] != etag:
# Cached file is not up to date, we just throw it as a new version will be downloaded anyway.
clean_files_for(os.path.join(cache_dir, file_info["file"]))
continue
url_info = extract_info_from_url(url)
if url_info is None:
# Not a file from huggingface.co
continue
repo = os.path.join(new_cache_dir, url_info["repo"])
move_to_new_cache(
file=os.path.join(cache_dir, file_info["file"]),
repo=repo,
filename=url_info["filename"],
revision=url_info["revision"],
etag=etag,
commit_hash=commit_hash,
)
cache_version_file = os.path.join(TRANSFORMERS_CACHE, "version.txt")
if not os.path.isfile(cache_version_file):
cache_version = 0
else:
with open(cache_version_file) as f:
cache_version = int(f.read())
cache_is_not_empty = os.path.isdir(TRANSFORMERS_CACHE) and len(os.listdir(TRANSFORMERS_CACHE)) > 0
if cache_version < 1 and cache_is_not_empty:
if is_offline_mode():
logger.warning(
"You are offline and the cache for model files in Transformers v4.22.0 has been updated while your local "
"cache seems to be the one of a previous version. It is very likely that all your calls to any "
"`from_pretrained()` method will fail. Remove the offline mode and enable internet connection to have "
"your cache be updated automatically, then you can go back to offline mode."
)
else:
logger.warning(
"The cache for model files in Transformers v4.22.0 has been updated. Migrating your old cache. This is a "
"one-time only operation. You can interrupt this and resume the migration later on by calling "
"`transformers.utils.move_cache()`."
)
try:
if TRANSFORMERS_CACHE != default_cache_path:
# Users set some env variable to customize cache storage
move_cache(TRANSFORMERS_CACHE, TRANSFORMERS_CACHE)
else:
move_cache()
except Exception as e:
trace = "\n".join(traceback.format_tb(e.__traceback__))
logger.error(
f"There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease "
"file an issue at https://github.com/huggingface/transformers/issues/new/choose and copy paste this whole "
"message and we will do our best to help."
)
if cache_version < 1:
try:
os.makedirs(TRANSFORMERS_CACHE, exist_ok=True)
with open(cache_version_file, "w") as f:
f.write("1")
except Exception:
logger.warning(
f"There was a problem when trying to write in your cache folder ({TRANSFORMERS_CACHE}). You should set "
"the environment variable TRANSFORMERS_CACHE to a writable directory."
)
|
27182812/ChatGLM-LLaMA-chinese-insturct | 29,516 | src/transformers/utils/dummy_flax_objects.py | # This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
class FlaxForcedBOSTokenLogitsProcessor(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxForcedEOSTokenLogitsProcessor(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxGenerationMixin(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxLogitsProcessor(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxLogitsProcessorList(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxLogitsWarper(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxMinLengthLogitsProcessor(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxTemperatureLogitsWarper(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxTopKLogitsWarper(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxTopPLogitsWarper(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxPreTrainedModel(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxAlbertForMaskedLM(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxAlbertForMultipleChoice(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxAlbertForPreTraining(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxAlbertForQuestionAnswering(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxAlbertForSequenceClassification(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxAlbertForTokenClassification(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxAlbertModel(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxAlbertPreTrainedModel(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
FLAX_MODEL_FOR_CAUSAL_LM_MAPPING = None
FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING = None
FLAX_MODEL_FOR_MASKED_LM_MAPPING = None
FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING = None
FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING = None
FLAX_MODEL_FOR_PRETRAINING_MAPPING = None
FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING = None
FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING = None
FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING = None
FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING = None
FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING = None
FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING = None
FLAX_MODEL_MAPPING = None
class FlaxAutoModel(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxAutoModelForCausalLM(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxAutoModelForImageClassification(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxAutoModelForMaskedLM(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxAutoModelForMultipleChoice(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxAutoModelForNextSentencePrediction(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxAutoModelForPreTraining(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxAutoModelForQuestionAnswering(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxAutoModelForSeq2SeqLM(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxAutoModelForSequenceClassification(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxAutoModelForSpeechSeq2Seq(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxAutoModelForTokenClassification(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxAutoModelForVision2Seq(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxBartDecoderPreTrainedModel(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxBartForCausalLM(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxBartForConditionalGeneration(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxBartForQuestionAnswering(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxBartForSequenceClassification(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxBartModel(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxBartPreTrainedModel(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxBeitForImageClassification(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxBeitForMaskedImageModeling(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxBeitModel(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxBeitPreTrainedModel(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxBertForCausalLM(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxBertForMaskedLM(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxBertForMultipleChoice(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxBertForNextSentencePrediction(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxBertForPreTraining(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxBertForQuestionAnswering(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxBertForSequenceClassification(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxBertForTokenClassification(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxBertModel(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxBertPreTrainedModel(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxBigBirdForCausalLM(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxBigBirdForMaskedLM(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxBigBirdForMultipleChoice(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxBigBirdForPreTraining(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxBigBirdForQuestionAnswering(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxBigBirdForSequenceClassification(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxBigBirdForTokenClassification(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxBigBirdModel(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxBigBirdPreTrainedModel(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxBlenderbotForConditionalGeneration(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxBlenderbotModel(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxBlenderbotPreTrainedModel(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxBlenderbotSmallForConditionalGeneration(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxBlenderbotSmallModel(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxBlenderbotSmallPreTrainedModel(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxCLIPModel(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxCLIPPreTrainedModel(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxCLIPTextModel(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxCLIPTextPreTrainedModel(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxCLIPVisionModel(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxCLIPVisionPreTrainedModel(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxDistilBertForMaskedLM(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxDistilBertForMultipleChoice(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxDistilBertForQuestionAnswering(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxDistilBertForSequenceClassification(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxDistilBertForTokenClassification(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxDistilBertModel(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxDistilBertPreTrainedModel(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxElectraForCausalLM(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxElectraForMaskedLM(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxElectraForMultipleChoice(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxElectraForPreTraining(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxElectraForQuestionAnswering(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxElectraForSequenceClassification(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxElectraForTokenClassification(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxElectraModel(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxElectraPreTrainedModel(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxEncoderDecoderModel(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxGPT2LMHeadModel(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxGPT2Model(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxGPT2PreTrainedModel(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxGPTNeoForCausalLM(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxGPTNeoModel(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxGPTNeoPreTrainedModel(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxGPTJForCausalLM(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxGPTJModel(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxGPTJPreTrainedModel(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxLongT5ForConditionalGeneration(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxLongT5Model(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxLongT5PreTrainedModel(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxMarianModel(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxMarianMTModel(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxMarianPreTrainedModel(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxMBartForConditionalGeneration(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxMBartForQuestionAnswering(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxMBartForSequenceClassification(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxMBartModel(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxMBartPreTrainedModel(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxMT5EncoderModel(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxMT5ForConditionalGeneration(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxMT5Model(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxOPTForCausalLM(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxOPTModel(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxOPTPreTrainedModel(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxPegasusForConditionalGeneration(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxPegasusModel(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxPegasusPreTrainedModel(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxRobertaForCausalLM(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxRobertaForMaskedLM(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxRobertaForMultipleChoice(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxRobertaForQuestionAnswering(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxRobertaForSequenceClassification(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxRobertaForTokenClassification(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxRobertaModel(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxRobertaPreTrainedModel(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxRobertaPreLayerNormForCausalLM(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxRobertaPreLayerNormForMaskedLM(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxRobertaPreLayerNormForMultipleChoice(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxRobertaPreLayerNormForQuestionAnswering(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxRobertaPreLayerNormForSequenceClassification(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxRobertaPreLayerNormForTokenClassification(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxRobertaPreLayerNormModel(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxRobertaPreLayerNormPreTrainedModel(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxRoFormerForMaskedLM(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxRoFormerForMultipleChoice(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxRoFormerForQuestionAnswering(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxRoFormerForSequenceClassification(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxRoFormerForTokenClassification(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxRoFormerModel(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxRoFormerPreTrainedModel(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxSpeechEncoderDecoderModel(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxT5EncoderModel(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxT5ForConditionalGeneration(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxT5Model(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxT5PreTrainedModel(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxVisionEncoderDecoderModel(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxVisionTextDualEncoderModel(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxViTForImageClassification(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxViTModel(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxViTPreTrainedModel(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxWav2Vec2ForCTC(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxWav2Vec2ForPreTraining(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxWav2Vec2Model(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxWav2Vec2PreTrainedModel(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxWhisperForConditionalGeneration(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxWhisperModel(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxWhisperPreTrainedModel(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxXGLMForCausalLM(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxXGLMModel(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxXGLMPreTrainedModel(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = None
class FlaxXLMRobertaForCausalLM(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxXLMRobertaForMaskedLM(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxXLMRobertaForMultipleChoice(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxXLMRobertaForQuestionAnswering(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxXLMRobertaForSequenceClassification(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxXLMRobertaForTokenClassification(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxXLMRobertaModel(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
class FlaxXLMRobertaPreTrainedModel(metaclass=DummyObject):
_backends = ["flax"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["flax"])
|
27182812/ChatGLM-LLaMA-chinese-insturct | 10,159 | src/transformers/utils/logging.py | # coding=utf-8
# Copyright 2020 Optuna, Hugging Face
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Logging utilities."""
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
_lock = threading.Lock()
_default_handler: Optional[logging.Handler] = None
log_levels = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
_default_log_level = logging.WARNING
_tqdm_active = True
def _get_default_logging_level():
"""
If TRANSFORMERS_VERBOSITY env var is set to one of the valid choices return that as the new default level. If it is
not - fall back to `_default_log_level`
"""
env_level_str = os.getenv("TRANSFORMERS_VERBOSITY", None)
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, "
f"has to be one of: { ', '.join(log_levels.keys()) }"
)
return _default_log_level
def _get_library_name() -> str:
return __name__.split(".")[0]
def _get_library_root_logger() -> logging.Logger:
return logging.getLogger(_get_library_name())
def _configure_library_root_logger() -> None:
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
_default_handler = logging.StreamHandler() # Set sys.stderr as stream.
_default_handler.flush = sys.stderr.flush
# Apply our default configuration to the library root logger.
library_root_logger = _get_library_root_logger()
library_root_logger.addHandler(_default_handler)
library_root_logger.setLevel(_get_default_logging_level())
library_root_logger.propagate = False
def _reset_library_root_logger() -> None:
global _default_handler
with _lock:
if not _default_handler:
return
library_root_logger = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler)
library_root_logger.setLevel(logging.NOTSET)
_default_handler = None
def get_log_levels_dict():
return log_levels
def get_logger(name: Optional[str] = None) -> logging.Logger:
"""
Return a logger with the specified name.
This function is not supposed to be directly accessed unless you are writing a custom transformers module.
"""
if name is None:
name = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(name)
def get_verbosity() -> int:
"""
Return the current level for the 🤗 Transformers's root logger as an int.
Returns:
`int`: The logging level.
<Tip>
🤗 Transformers has following logging levels:
- 50: `transformers.logging.CRITICAL` or `transformers.logging.FATAL`
- 40: `transformers.logging.ERROR`
- 30: `transformers.logging.WARNING` or `transformers.logging.WARN`
- 20: `transformers.logging.INFO`
- 10: `transformers.logging.DEBUG`
</Tip>"""
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def set_verbosity(verbosity: int) -> None:
"""
Set the verbosity level for the 🤗 Transformers's root logger.
Args:
verbosity (`int`):
Logging level, e.g., one of:
- `transformers.logging.CRITICAL` or `transformers.logging.FATAL`
- `transformers.logging.ERROR`
- `transformers.logging.WARNING` or `transformers.logging.WARN`
- `transformers.logging.INFO`
- `transformers.logging.DEBUG`
"""
_configure_library_root_logger()
_get_library_root_logger().setLevel(verbosity)
def set_verbosity_info():
"""Set the verbosity to the `INFO` level."""
return set_verbosity(INFO)
def set_verbosity_warning():
"""Set the verbosity to the `WARNING` level."""
return set_verbosity(WARNING)
def set_verbosity_debug():
"""Set the verbosity to the `DEBUG` level."""
return set_verbosity(DEBUG)
def set_verbosity_error():
"""Set the verbosity to the `ERROR` level."""
return set_verbosity(ERROR)
def disable_default_handler() -> None:
"""Disable the default handler of the HuggingFace Transformers's root logger."""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler)
def enable_default_handler() -> None:
"""Enable the default handler of the HuggingFace Transformers's root logger."""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler)
def add_handler(handler: logging.Handler) -> None:
"""adds a handler to the HuggingFace Transformers's root logger."""
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(handler)
def remove_handler(handler: logging.Handler) -> None:
"""removes given handler from the HuggingFace Transformers's root logger."""
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(handler)
def disable_propagation() -> None:
"""
Disable propagation of the library log outputs. Note that log propagation is disabled by default.
"""
_configure_library_root_logger()
_get_library_root_logger().propagate = False
def enable_propagation() -> None:
"""
Enable propagation of the library log outputs. Please disable the HuggingFace Transformers's default handler to
prevent double logging if the root logger has been configured.
"""
_configure_library_root_logger()
_get_library_root_logger().propagate = True
def enable_explicit_format() -> None:
"""
Enable explicit formatting for every HuggingFace Transformers's logger. The explicit formatter is as follows:
```
[LEVELNAME|FILENAME|LINE NUMBER] TIME >> MESSAGE
```
All handlers currently bound to the root logger are affected by this method.
"""
handlers = _get_library_root_logger().handlers
for handler in handlers:
formatter = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s")
handler.setFormatter(formatter)
def reset_format() -> None:
"""
Resets the formatting for HuggingFace Transformers's loggers.
All handlers currently bound to the root logger are affected by this method.
"""
handlers = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(None)
def warning_advice(self, *args, **kwargs):
"""
This method is identical to `logger.warning()`, but if env var TRANSFORMERS_NO_ADVISORY_WARNINGS=1 is set, this
warning will not be printed
"""
no_advisory_warnings = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS", False)
if no_advisory_warnings:
return
self.warning(*args, **kwargs)
logging.Logger.warning_advice = warning_advice
@functools.lru_cache(None)
def warning_once(self, *args, **kwargs):
"""
This method is identical to `logger.warning()`, but will emit the warning with the same message only once
Note: The cache is for the function arguments, so 2 different callers using the same arguments will hit the cache.
The assumption here is that all warning messages are unique across the code. If they aren't then need to switch to
another type of cache that includes the caller frame information in the hashing function.
"""
self.warning(*args, **kwargs)
logging.Logger.warning_once = warning_once
class EmptyTqdm:
"""Dummy tqdm which doesn't do anything."""
def __init__(self, *args, **kwargs): # pylint: disable=unused-argument
self._iterator = args[0] if args else None
def __iter__(self):
return iter(self._iterator)
def __getattr__(self, _):
"""Return empty function."""
def empty_fn(*args, **kwargs): # pylint: disable=unused-argument
return
return empty_fn
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
return
class _tqdm_cls:
def __call__(self, *args, **kwargs):
if _tqdm_active:
return tqdm_lib.tqdm(*args, **kwargs)
else:
return EmptyTqdm(*args, **kwargs)
def set_lock(self, *args, **kwargs):
self._lock = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*args, **kwargs)
def get_lock(self):
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
tqdm = _tqdm_cls()
def is_progress_bar_enabled() -> bool:
"""Return a boolean indicating whether tqdm progress bars are enabled."""
global _tqdm_active
return bool(_tqdm_active)
def enable_progress_bar():
"""Enable tqdm progress bar."""
global _tqdm_active
_tqdm_active = True
hf_hub_utils.enable_progress_bars()
def disable_progress_bar():
"""Disable tqdm progress bar."""
global _tqdm_active
_tqdm_active = False
hf_hub_utils.disable_progress_bars()
|
27182812/ChatGLM-LLaMA-chinese-insturct | 5,401 | src/transformers/utils/quantization_config.py | #!/usr/bin/env python
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
@dataclass
class BitsAndBytesConfig:
"""
This is a wrapper class about all possible attributes and features that you can play with a model that has been
loaded using `bitsandbytes`.
This replaces `load_in_8bit` therefore both options are mutually exclusive.
For now, only arguments that are relative to `LLM.int8()` are supported, therefore the arguments are all termed as
`llm_int8_*`. If more methods are added to `bitsandbytes`, then more arguments will be added to this class.
Args:
load_in_8bit (`bool`, *optional*, defaults to `False`):
This flag is used to enable 8-bit quantization with LLM.int8().
llm_int8_threshold (`float`, *optional*, defaults to 6):
This corresponds to the outlier threshold for outlier detection as described in `LLM.int8() : 8-bit Matrix
Multiplication for Transformers at Scale` paper: https://arxiv.org/abs/2208.07339 Any hidden states value
that is above this threshold will be considered an outlier and the operation on those values will be done
in fp16. Values are usually normally distributed, that is, most values are in the range [-3.5, 3.5], but
there are some exceptional systematic outliers that are very differently distributed for large models.
These outliers are often in the interval [-60, -6] or [6, 60]. Int8 quantization works well for values of
magnitude ~5, but beyond that, there is a significant performance penalty. A good default threshold is 6,
but a lower threshold might be needed for more unstable models (small models, fine-tuning).
llm_int8_skip_modules (`List[str]`, *optional*):
An explicit list of the modules that we do not want to convert in 8-bit. This is useful for models such as
Jukebox that has several heads in different places and not necessarily at the last position. For example
for `CausalLM` models, the last `lm_head` is kept in its original `dtype`.
llm_int8_enable_fp32_cpu_offload (`bool`, *optional*, defaults to `False`):
This flag is used for advanced use cases and users that are aware of this feature. If you want to split
your model in different parts and run some parts in int8 on GPU and some parts in fp32 on CPU, you can use
this flag. This is useful for offloading large models such as `google/flan-t5-xxl`. Note that the int8
operations will not be run on CPU.
"""
def __init__(
self,
load_in_8bit=False,
llm_int8_threshold=6.0,
llm_int8_skip_modules=None,
llm_int8_enable_fp32_cpu_offload=False,
):
self.load_in_8bit = load_in_8bit
self.llm_int8_threshold = llm_int8_threshold
self.llm_int8_skip_modules = llm_int8_skip_modules
self.llm_int8_enable_fp32_cpu_offload = llm_int8_enable_fp32_cpu_offload
self.post_init()
def post_init(self):
r"""
Safety checker that arguments are correct - also replaces some NoneType arguments with their default values.
"""
if not isinstance(self.llm_int8_threshold, float):
raise ValueError("llm_int8_threshold must be a float")
if self.llm_int8_skip_modules is not None and not isinstance(self.llm_int8_skip_modules, list):
raise ValueError("llm_int8_skip_modules must be a list of strings")
if not isinstance(self.llm_int8_enable_fp32_cpu_offload, bool):
raise ValueError("llm_int8_enable_fp32_cpu_offload must be a boolean")
@classmethod
def from_dict(cls, config_dict, return_unused_kwargs, **kwargs):
"""
Instantiates a [`PretrainedConfig`] from a Python dictionary of parameters.
Args:
config_dict (`Dict[str, Any]`):
Dictionary that will be used to instantiate the configuration object. Such a dictionary can be
retrieved from a pretrained checkpoint by leveraging the [`~PretrainedConfig.get_config_dict`] method.
kwargs (`Dict[str, Any]`):
Additional parameters from which to initialize the configuration object.
Returns:
[`PretrainedConfig`]: The configuration object instantiated from those parameters.
"""
config = cls(**config_dict)
to_remove = []
for key, value in kwargs.items():
if hasattr(config, key):
setattr(config, key, value)
to_remove.append(key)
for key in to_remove:
kwargs.pop(key, None)
if return_unused_kwargs:
return config, kwargs
else:
return config
|
27182812/ChatGLM-LLaMA-chinese-insturct | 2,955 | src/transformers/utils/dummy_timm_and_vision_objects.py | # This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST = None
class ConditionalDetrForObjectDetection(metaclass=DummyObject):
_backends = ["timm", "vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["timm", "vision"])
class ConditionalDetrForSegmentation(metaclass=DummyObject):
_backends = ["timm", "vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["timm", "vision"])
class ConditionalDetrModel(metaclass=DummyObject):
_backends = ["timm", "vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["timm", "vision"])
class ConditionalDetrPreTrainedModel(metaclass=DummyObject):
_backends = ["timm", "vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["timm", "vision"])
DEFORMABLE_DETR_PRETRAINED_MODEL_ARCHIVE_LIST = None
class DeformableDetrForObjectDetection(metaclass=DummyObject):
_backends = ["timm", "vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["timm", "vision"])
class DeformableDetrModel(metaclass=DummyObject):
_backends = ["timm", "vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["timm", "vision"])
class DeformableDetrPreTrainedModel(metaclass=DummyObject):
_backends = ["timm", "vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["timm", "vision"])
DETR_PRETRAINED_MODEL_ARCHIVE_LIST = None
class DetrForObjectDetection(metaclass=DummyObject):
_backends = ["timm", "vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["timm", "vision"])
class DetrForSegmentation(metaclass=DummyObject):
_backends = ["timm", "vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["timm", "vision"])
class DetrModel(metaclass=DummyObject):
_backends = ["timm", "vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["timm", "vision"])
class DetrPreTrainedModel(metaclass=DummyObject):
_backends = ["timm", "vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["timm", "vision"])
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None
class TableTransformerForObjectDetection(metaclass=DummyObject):
_backends = ["timm", "vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["timm", "vision"])
class TableTransformerModel(metaclass=DummyObject):
_backends = ["timm", "vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["timm", "vision"])
class TableTransformerPreTrainedModel(metaclass=DummyObject):
_backends = ["timm", "vision"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["timm", "vision"])
|
27182812/ChatGLM-LLaMA-chinese-insturct | 5,575 | src/transformers/utils/dummy_sentencepiece_objects.py | # This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
class AlbertTokenizer(metaclass=DummyObject):
_backends = ["sentencepiece"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["sentencepiece"])
class BarthezTokenizer(metaclass=DummyObject):
_backends = ["sentencepiece"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["sentencepiece"])
class BartphoTokenizer(metaclass=DummyObject):
_backends = ["sentencepiece"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["sentencepiece"])
class BertGenerationTokenizer(metaclass=DummyObject):
_backends = ["sentencepiece"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["sentencepiece"])
class BigBirdTokenizer(metaclass=DummyObject):
_backends = ["sentencepiece"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["sentencepiece"])
class CamembertTokenizer(metaclass=DummyObject):
_backends = ["sentencepiece"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["sentencepiece"])
class CpmTokenizer(metaclass=DummyObject):
_backends = ["sentencepiece"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["sentencepiece"])
class DebertaV2Tokenizer(metaclass=DummyObject):
_backends = ["sentencepiece"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["sentencepiece"])
class ErnieMTokenizer(metaclass=DummyObject):
_backends = ["sentencepiece"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["sentencepiece"])
class FNetTokenizer(metaclass=DummyObject):
_backends = ["sentencepiece"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["sentencepiece"])
class GPTSw3Tokenizer(metaclass=DummyObject):
_backends = ["sentencepiece"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["sentencepiece"])
class LayoutXLMTokenizer(metaclass=DummyObject):
_backends = ["sentencepiece"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["sentencepiece"])
class LlamaTokenizer(metaclass=DummyObject):
_backends = ["sentencepiece"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["sentencepiece"])
class M2M100Tokenizer(metaclass=DummyObject):
_backends = ["sentencepiece"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["sentencepiece"])
class MarianTokenizer(metaclass=DummyObject):
_backends = ["sentencepiece"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["sentencepiece"])
class MBart50Tokenizer(metaclass=DummyObject):
_backends = ["sentencepiece"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["sentencepiece"])
class MBartTokenizer(metaclass=DummyObject):
_backends = ["sentencepiece"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["sentencepiece"])
class MLukeTokenizer(metaclass=DummyObject):
_backends = ["sentencepiece"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["sentencepiece"])
class MT5Tokenizer(metaclass=DummyObject):
_backends = ["sentencepiece"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["sentencepiece"])
class NllbTokenizer(metaclass=DummyObject):
_backends = ["sentencepiece"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["sentencepiece"])
class PegasusTokenizer(metaclass=DummyObject):
_backends = ["sentencepiece"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["sentencepiece"])
class PLBartTokenizer(metaclass=DummyObject):
_backends = ["sentencepiece"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["sentencepiece"])
class ReformerTokenizer(metaclass=DummyObject):
_backends = ["sentencepiece"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["sentencepiece"])
class RemBertTokenizer(metaclass=DummyObject):
_backends = ["sentencepiece"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["sentencepiece"])
class Speech2TextTokenizer(metaclass=DummyObject):
_backends = ["sentencepiece"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["sentencepiece"])
class SpeechT5Tokenizer(metaclass=DummyObject):
_backends = ["sentencepiece"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["sentencepiece"])
class T5Tokenizer(metaclass=DummyObject):
_backends = ["sentencepiece"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["sentencepiece"])
class XGLMTokenizer(metaclass=DummyObject):
_backends = ["sentencepiece"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["sentencepiece"])
class XLMProphetNetTokenizer(metaclass=DummyObject):
_backends = ["sentencepiece"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["sentencepiece"])
class XLMRobertaTokenizer(metaclass=DummyObject):
_backends = ["sentencepiece"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["sentencepiece"])
class XLNetTokenizer(metaclass=DummyObject):
_backends = ["sentencepiece"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["sentencepiece"])
|
27182812/ChatGLM-LLaMA-chinese-insturct | 40,011 | src/transformers/utils/doc.py | # Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Doc utilities: Utilities related to documentation
"""
import functools
import re
import types
def add_start_docstrings(*docstr):
def docstring_decorator(fn):
fn.__doc__ = "".join(docstr) + (fn.__doc__ if fn.__doc__ is not None else "")
return fn
return docstring_decorator
def add_start_docstrings_to_model_forward(*docstr):
def docstring_decorator(fn):
docstring = "".join(docstr) + (fn.__doc__ if fn.__doc__ is not None else "")
class_name = f"[`{fn.__qualname__.split('.')[0]}`]"
intro = f" The {class_name} forward method, overrides the `__call__` special method."
note = r"""
<Tip>
Although the recipe for forward pass needs to be defined within this function, one should call the [`Module`]
instance afterwards instead of this since the former takes care of running the pre and post processing steps while
the latter silently ignores them.
</Tip>
"""
fn.__doc__ = intro + note + docstring
return fn
return docstring_decorator
def add_end_docstrings(*docstr):
def docstring_decorator(fn):
fn.__doc__ = (fn.__doc__ if fn.__doc__ is not None else "") + "".join(docstr)
return fn
return docstring_decorator
PT_RETURN_INTRODUCTION = r"""
Returns:
[`{full_output_type}`] or `tuple(torch.FloatTensor)`: A [`{full_output_type}`] or a tuple of
`torch.FloatTensor` (if `return_dict=False` is passed or when `config.return_dict=False`) comprising various
elements depending on the configuration ([`{config_class}`]) and inputs.
"""
TF_RETURN_INTRODUCTION = r"""
Returns:
[`{full_output_type}`] or `tuple(tf.Tensor)`: A [`{full_output_type}`] or a tuple of `tf.Tensor` (if
`return_dict=False` is passed or when `config.return_dict=False`) comprising various elements depending on the
configuration ([`{config_class}`]) and inputs.
"""
def _get_indent(t):
"""Returns the indentation in the first line of t"""
search = re.search(r"^(\s*)\S", t)
return "" if search is None else search.groups()[0]
def _convert_output_args_doc(output_args_doc):
"""Convert output_args_doc to display properly."""
# Split output_arg_doc in blocks argument/description
indent = _get_indent(output_args_doc)
blocks = []
current_block = ""
for line in output_args_doc.split("\n"):
# If the indent is the same as the beginning, the line is the name of new arg.
if _get_indent(line) == indent:
if len(current_block) > 0:
blocks.append(current_block[:-1])
current_block = f"{line}\n"
else:
# Otherwise it's part of the description of the current arg.
# We need to remove 2 spaces to the indentation.
current_block += f"{line[2:]}\n"
blocks.append(current_block[:-1])
# Format each block for proper rendering
for i in range(len(blocks)):
blocks[i] = re.sub(r"^(\s+)(\S+)(\s+)", r"\1- **\2**\3", blocks[i])
blocks[i] = re.sub(r":\s*\n\s*(\S)", r" -- \1", blocks[i])
return "\n".join(blocks)
def _prepare_output_docstrings(output_type, config_class, min_indent=None):
"""
Prepares the return part of the docstring using `output_type`.
"""
output_docstring = output_type.__doc__
# Remove the head of the docstring to keep the list of args only
lines = output_docstring.split("\n")
i = 0
while i < len(lines) and re.search(r"^\s*(Args|Parameters):\s*$", lines[i]) is None:
i += 1
if i < len(lines):
params_docstring = "\n".join(lines[(i + 1) :])
params_docstring = _convert_output_args_doc(params_docstring)
# Add the return introduction
full_output_type = f"{output_type.__module__}.{output_type.__name__}"
intro = TF_RETURN_INTRODUCTION if output_type.__name__.startswith("TF") else PT_RETURN_INTRODUCTION
intro = intro.format(full_output_type=full_output_type, config_class=config_class)
result = intro + params_docstring
# Apply minimum indent if necessary
if min_indent is not None:
lines = result.split("\n")
# Find the indent of the first nonempty line
i = 0
while len(lines[i]) == 0:
i += 1
indent = len(_get_indent(lines[i]))
# If too small, add indentation to all nonempty lines
if indent < min_indent:
to_add = " " * (min_indent - indent)
lines = [(f"{to_add}{line}" if len(line) > 0 else line) for line in lines]
result = "\n".join(lines)
return result
FAKE_MODEL_DISCLAIMER = """
<Tip warning={true}>
This example uses a random model as the real ones are all very big. To get proper results, you should use
{real_checkpoint} instead of {fake_checkpoint}. If you get out-of-memory when loading that checkpoint, you can try
adding `device_map="auto"` in the `from_pretrained` call.
</Tip>
"""
PT_TOKEN_CLASSIFICATION_SAMPLE = r"""
Example:
```python
>>> from transformers import AutoTokenizer, {model_class}
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> inputs = tokenizer(
... "HuggingFace is a company based in Paris and New York", add_special_tokens=False, return_tensors="pt"
... )
>>> with torch.no_grad():
... logits = model(**inputs).logits
>>> predicted_token_class_ids = logits.argmax(-1)
>>> # Note that tokens are classified rather then input words which means that
>>> # there might be more predicted token classes than words.
>>> # Multiple token classes might account for the same word
>>> predicted_tokens_classes = [model.config.id2label[t.item()] for t in predicted_token_class_ids[0]]
>>> predicted_tokens_classes
{expected_output}
>>> labels = predicted_token_class_ids
>>> loss = model(**inputs, labels=labels).loss
>>> round(loss.item(), 2)
{expected_loss}
```
"""
PT_QUESTION_ANSWERING_SAMPLE = r"""
Example:
```python
>>> from transformers import AutoTokenizer, {model_class}
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet"
>>> inputs = tokenizer(question, text, return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**inputs)
>>> answer_start_index = outputs.start_logits.argmax()
>>> answer_end_index = outputs.end_logits.argmax()
>>> predict_answer_tokens = inputs.input_ids[0, answer_start_index : answer_end_index + 1]
>>> tokenizer.decode(predict_answer_tokens, skip_special_tokens=True)
{expected_output}
>>> # target is "nice puppet"
>>> target_start_index = torch.tensor([{qa_target_start_index}])
>>> target_end_index = torch.tensor([{qa_target_end_index}])
>>> outputs = model(**inputs, start_positions=target_start_index, end_positions=target_end_index)
>>> loss = outputs.loss
>>> round(loss.item(), 2)
{expected_loss}
```
"""
PT_SEQUENCE_CLASSIFICATION_SAMPLE = r"""
Example of single-label classification:
```python
>>> import torch
>>> from transformers import AutoTokenizer, {model_class}
>>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> with torch.no_grad():
... logits = model(**inputs).logits
>>> predicted_class_id = logits.argmax().item()
>>> model.config.id2label[predicted_class_id]
{expected_output}
>>> # To train a model on `num_labels` classes, you can pass `num_labels=num_labels` to `.from_pretrained(...)`
>>> num_labels = len(model.config.id2label)
>>> model = {model_class}.from_pretrained("{checkpoint}", num_labels=num_labels)
>>> labels = torch.tensor([1])
>>> loss = model(**inputs, labels=labels).loss
>>> round(loss.item(), 2)
{expected_loss}
```
Example of multi-label classification:
```python
>>> import torch
>>> from transformers import AutoTokenizer, {model_class}
>>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}", problem_type="multi_label_classification")
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> with torch.no_grad():
... logits = model(**inputs).logits
>>> predicted_class_ids = torch.arange(0, logits.shape[-1])[torch.sigmoid(logits).squeeze(dim=0) > 0.5]
>>> # To train a model on `num_labels` classes, you can pass `num_labels=num_labels` to `.from_pretrained(...)`
>>> num_labels = len(model.config.id2label)
>>> model = {model_class}.from_pretrained(
... "{checkpoint}", num_labels=num_labels, problem_type="multi_label_classification"
... )
>>> labels = torch.sum(
... torch.nn.functional.one_hot(predicted_class_ids[None, :].clone(), num_classes=num_labels), dim=1
... ).to(torch.float)
>>> loss = model(**inputs, labels=labels).loss
```
"""
PT_MASKED_LM_SAMPLE = r"""
Example:
```python
>>> from transformers import AutoTokenizer, {model_class}
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> inputs = tokenizer("The capital of France is {mask}.", return_tensors="pt")
>>> with torch.no_grad():
... logits = model(**inputs).logits
>>> # retrieve index of {mask}
>>> mask_token_index = (inputs.input_ids == tokenizer.mask_token_id)[0].nonzero(as_tuple=True)[0]
>>> predicted_token_id = logits[0, mask_token_index].argmax(axis=-1)
>>> tokenizer.decode(predicted_token_id)
{expected_output}
>>> labels = tokenizer("The capital of France is Paris.", return_tensors="pt")["input_ids"]
>>> # mask labels of non-{mask} tokens
>>> labels = torch.where(inputs.input_ids == tokenizer.mask_token_id, labels, -100)
>>> outputs = model(**inputs, labels=labels)
>>> round(outputs.loss.item(), 2)
{expected_loss}
```
"""
PT_BASE_MODEL_SAMPLE = r"""
Example:
```python
>>> from transformers import AutoTokenizer, {model_class}
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_states = outputs.last_hidden_state
```
"""
PT_MULTIPLE_CHOICE_SAMPLE = r"""
Example:
```python
>>> from transformers import AutoTokenizer, {model_class}
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
>>> choice0 = "It is eaten with a fork and a knife."
>>> choice1 = "It is eaten while held in the hand."
>>> labels = torch.tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1
>>> encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors="pt", padding=True)
>>> outputs = model(**{{k: v.unsqueeze(0) for k, v in encoding.items()}}, labels=labels) # batch size is 1
>>> # the linear classifier still needs to be trained
>>> loss = outputs.loss
>>> logits = outputs.logits
```
"""
PT_CAUSAL_LM_SAMPLE = r"""
Example:
```python
>>> import torch
>>> from transformers import AutoTokenizer, {model_class}
>>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs, labels=inputs["input_ids"])
>>> loss = outputs.loss
>>> logits = outputs.logits
```
"""
PT_SPEECH_BASE_MODEL_SAMPLE = r"""
Example:
```python
>>> from transformers import AutoProcessor, {model_class}
>>> import torch
>>> from datasets import load_dataset
>>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation")
>>> dataset = dataset.sort("id")
>>> sampling_rate = dataset.features["audio"].sampling_rate
>>> processor = AutoProcessor.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> # audio file is decoded on the fly
>>> inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**inputs)
>>> last_hidden_states = outputs.last_hidden_state
>>> list(last_hidden_states.shape)
{expected_output}
```
"""
PT_SPEECH_CTC_SAMPLE = r"""
Example:
```python
>>> from transformers import AutoProcessor, {model_class}
>>> from datasets import load_dataset
>>> import torch
>>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation")
>>> dataset = dataset.sort("id")
>>> sampling_rate = dataset.features["audio"].sampling_rate
>>> processor = AutoProcessor.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> # audio file is decoded on the fly
>>> inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt")
>>> with torch.no_grad():
... logits = model(**inputs).logits
>>> predicted_ids = torch.argmax(logits, dim=-1)
>>> # transcribe speech
>>> transcription = processor.batch_decode(predicted_ids)
>>> transcription[0]
{expected_output}
>>> inputs["labels"] = processor(text=dataset[0]["text"], return_tensors="pt").input_ids
>>> # compute loss
>>> loss = model(**inputs).loss
>>> round(loss.item(), 2)
{expected_loss}
```
"""
PT_SPEECH_SEQ_CLASS_SAMPLE = r"""
Example:
```python
>>> from transformers import AutoFeatureExtractor, {model_class}
>>> from datasets import load_dataset
>>> import torch
>>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation")
>>> dataset = dataset.sort("id")
>>> sampling_rate = dataset.features["audio"].sampling_rate
>>> feature_extractor = AutoFeatureExtractor.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> # audio file is decoded on the fly
>>> inputs = feature_extractor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt")
>>> with torch.no_grad():
... logits = model(**inputs).logits
>>> predicted_class_ids = torch.argmax(logits, dim=-1).item()
>>> predicted_label = model.config.id2label[predicted_class_ids]
>>> predicted_label
{expected_output}
>>> # compute loss - target_label is e.g. "down"
>>> target_label = model.config.id2label[0]
>>> inputs["labels"] = torch.tensor([model.config.label2id[target_label]])
>>> loss = model(**inputs).loss
>>> round(loss.item(), 2)
{expected_loss}
```
"""
PT_SPEECH_FRAME_CLASS_SAMPLE = r"""
Example:
```python
>>> from transformers import AutoFeatureExtractor, {model_class}
>>> from datasets import load_dataset
>>> import torch
>>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation")
>>> dataset = dataset.sort("id")
>>> sampling_rate = dataset.features["audio"].sampling_rate
>>> feature_extractor = AutoFeatureExtractor.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> # audio file is decoded on the fly
>>> inputs = feature_extractor(dataset[0]["audio"]["array"], return_tensors="pt", sampling_rate=sampling_rate)
>>> with torch.no_grad():
... logits = model(**inputs).logits
>>> probabilities = torch.sigmoid(logits[0])
>>> # labels is a one-hot array of shape (num_frames, num_speakers)
>>> labels = (probabilities > 0.5).long()
>>> labels[0].tolist()
{expected_output}
```
"""
PT_SPEECH_XVECTOR_SAMPLE = r"""
Example:
```python
>>> from transformers import AutoFeatureExtractor, {model_class}
>>> from datasets import load_dataset
>>> import torch
>>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation")
>>> dataset = dataset.sort("id")
>>> sampling_rate = dataset.features["audio"].sampling_rate
>>> feature_extractor = AutoFeatureExtractor.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> # audio file is decoded on the fly
>>> inputs = feature_extractor(
... [d["array"] for d in dataset[:2]["audio"]], sampling_rate=sampling_rate, return_tensors="pt", padding=True
... )
>>> with torch.no_grad():
... embeddings = model(**inputs).embeddings
>>> embeddings = torch.nn.functional.normalize(embeddings, dim=-1).cpu()
>>> # the resulting embeddings can be used for cosine similarity-based retrieval
>>> cosine_sim = torch.nn.CosineSimilarity(dim=-1)
>>> similarity = cosine_sim(embeddings[0], embeddings[1])
>>> threshold = 0.7 # the optimal threshold is dataset-dependent
>>> if similarity < threshold:
... print("Speakers are not the same!")
>>> round(similarity.item(), 2)
{expected_output}
```
"""
PT_VISION_BASE_MODEL_SAMPLE = r"""
Example:
```python
>>> from transformers import AutoImageProcessor, {model_class}
>>> import torch
>>> from datasets import load_dataset
>>> dataset = load_dataset("huggingface/cats-image")
>>> image = dataset["test"]["image"][0]
>>> image_processor = AutoImageProcessor.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> inputs = image_processor(image, return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**inputs)
>>> last_hidden_states = outputs.last_hidden_state
>>> list(last_hidden_states.shape)
{expected_output}
```
"""
PT_VISION_SEQ_CLASS_SAMPLE = r"""
Example:
```python
>>> from transformers import AutoImageProcessor, {model_class}
>>> import torch
>>> from datasets import load_dataset
>>> dataset = load_dataset("huggingface/cats-image")
>>> image = dataset["test"]["image"][0]
>>> image_processor = AutoImageProcessor.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> inputs = image_processor(image, return_tensors="pt")
>>> with torch.no_grad():
... logits = model(**inputs).logits
>>> # model predicts one of the 1000 ImageNet classes
>>> predicted_label = logits.argmax(-1).item()
>>> print(model.config.id2label[predicted_label])
{expected_output}
```
"""
PT_SAMPLE_DOCSTRINGS = {
"SequenceClassification": PT_SEQUENCE_CLASSIFICATION_SAMPLE,
"QuestionAnswering": PT_QUESTION_ANSWERING_SAMPLE,
"TokenClassification": PT_TOKEN_CLASSIFICATION_SAMPLE,
"MultipleChoice": PT_MULTIPLE_CHOICE_SAMPLE,
"MaskedLM": PT_MASKED_LM_SAMPLE,
"LMHead": PT_CAUSAL_LM_SAMPLE,
"BaseModel": PT_BASE_MODEL_SAMPLE,
"SpeechBaseModel": PT_SPEECH_BASE_MODEL_SAMPLE,
"CTC": PT_SPEECH_CTC_SAMPLE,
"AudioClassification": PT_SPEECH_SEQ_CLASS_SAMPLE,
"AudioFrameClassification": PT_SPEECH_FRAME_CLASS_SAMPLE,
"AudioXVector": PT_SPEECH_XVECTOR_SAMPLE,
"VisionBaseModel": PT_VISION_BASE_MODEL_SAMPLE,
"ImageClassification": PT_VISION_SEQ_CLASS_SAMPLE,
}
TF_TOKEN_CLASSIFICATION_SAMPLE = r"""
Example:
```python
>>> from transformers import AutoTokenizer, {model_class}
>>> import tensorflow as tf
>>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> inputs = tokenizer(
... "HuggingFace is a company based in Paris and New York", add_special_tokens=False, return_tensors="tf"
... )
>>> logits = model(**inputs).logits
>>> predicted_token_class_ids = tf.math.argmax(logits, axis=-1)
>>> # Note that tokens are classified rather then input words which means that
>>> # there might be more predicted token classes than words.
>>> # Multiple token classes might account for the same word
>>> predicted_tokens_classes = [model.config.id2label[t] for t in predicted_token_class_ids[0].numpy().tolist()]
>>> predicted_tokens_classes
{expected_output}
```
```python
>>> labels = predicted_token_class_ids
>>> loss = tf.math.reduce_mean(model(**inputs, labels=labels).loss)
>>> round(float(loss), 2)
{expected_loss}
```
"""
TF_QUESTION_ANSWERING_SAMPLE = r"""
Example:
```python
>>> from transformers import AutoTokenizer, {model_class}
>>> import tensorflow as tf
>>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet"
>>> inputs = tokenizer(question, text, return_tensors="tf")
>>> outputs = model(**inputs)
>>> answer_start_index = int(tf.math.argmax(outputs.start_logits, axis=-1)[0])
>>> answer_end_index = int(tf.math.argmax(outputs.end_logits, axis=-1)[0])
>>> predict_answer_tokens = inputs.input_ids[0, answer_start_index : answer_end_index + 1]
>>> tokenizer.decode(predict_answer_tokens)
{expected_output}
```
```python
>>> # target is "nice puppet"
>>> target_start_index = tf.constant([{qa_target_start_index}])
>>> target_end_index = tf.constant([{qa_target_end_index}])
>>> outputs = model(**inputs, start_positions=target_start_index, end_positions=target_end_index)
>>> loss = tf.math.reduce_mean(outputs.loss)
>>> round(float(loss), 2)
{expected_loss}
```
"""
TF_SEQUENCE_CLASSIFICATION_SAMPLE = r"""
Example:
```python
>>> from transformers import AutoTokenizer, {model_class}
>>> import tensorflow as tf
>>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf")
>>> logits = model(**inputs).logits
>>> predicted_class_id = int(tf.math.argmax(logits, axis=-1)[0])
>>> model.config.id2label[predicted_class_id]
{expected_output}
```
```python
>>> # To train a model on `num_labels` classes, you can pass `num_labels=num_labels` to `.from_pretrained(...)`
>>> num_labels = len(model.config.id2label)
>>> model = {model_class}.from_pretrained("{checkpoint}", num_labels=num_labels)
>>> labels = tf.constant(1)
>>> loss = model(**inputs, labels=labels).loss
>>> round(float(loss), 2)
{expected_loss}
```
"""
TF_MASKED_LM_SAMPLE = r"""
Example:
```python
>>> from transformers import AutoTokenizer, {model_class}
>>> import tensorflow as tf
>>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> inputs = tokenizer("The capital of France is {mask}.", return_tensors="tf")
>>> logits = model(**inputs).logits
>>> # retrieve index of {mask}
>>> mask_token_index = tf.where((inputs.input_ids == tokenizer.mask_token_id)[0])
>>> selected_logits = tf.gather_nd(logits[0], indices=mask_token_index)
>>> predicted_token_id = tf.math.argmax(selected_logits, axis=-1)
>>> tokenizer.decode(predicted_token_id)
{expected_output}
```
```python
>>> labels = tokenizer("The capital of France is Paris.", return_tensors="tf")["input_ids"]
>>> # mask labels of non-{mask} tokens
>>> labels = tf.where(inputs.input_ids == tokenizer.mask_token_id, labels, -100)
>>> outputs = model(**inputs, labels=labels)
>>> round(float(outputs.loss), 2)
{expected_loss}
```
"""
TF_BASE_MODEL_SAMPLE = r"""
Example:
```python
>>> from transformers import AutoTokenizer, {model_class}
>>> import tensorflow as tf
>>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf")
>>> outputs = model(inputs)
>>> last_hidden_states = outputs.last_hidden_state
```
"""
TF_MULTIPLE_CHOICE_SAMPLE = r"""
Example:
```python
>>> from transformers import AutoTokenizer, {model_class}
>>> import tensorflow as tf
>>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
>>> choice0 = "It is eaten with a fork and a knife."
>>> choice1 = "It is eaten while held in the hand."
>>> encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors="tf", padding=True)
>>> inputs = {{k: tf.expand_dims(v, 0) for k, v in encoding.items()}}
>>> outputs = model(inputs) # batch size is 1
>>> # the linear classifier still needs to be trained
>>> logits = outputs.logits
```
"""
TF_CAUSAL_LM_SAMPLE = r"""
Example:
```python
>>> from transformers import AutoTokenizer, {model_class}
>>> import tensorflow as tf
>>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf")
>>> outputs = model(inputs)
>>> logits = outputs.logits
```
"""
TF_SPEECH_BASE_MODEL_SAMPLE = r"""
Example:
```python
>>> from transformers import AutoProcessor, {model_class}
>>> from datasets import load_dataset
>>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation")
>>> dataset = dataset.sort("id")
>>> sampling_rate = dataset.features["audio"].sampling_rate
>>> processor = AutoProcessor.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> # audio file is decoded on the fly
>>> inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="tf")
>>> outputs = model(**inputs)
>>> last_hidden_states = outputs.last_hidden_state
>>> list(last_hidden_states.shape)
{expected_output}
```
"""
TF_SPEECH_CTC_SAMPLE = r"""
Example:
```python
>>> from transformers import AutoProcessor, {model_class}
>>> from datasets import load_dataset
>>> import tensorflow as tf
>>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation")
>>> dataset = dataset.sort("id")
>>> sampling_rate = dataset.features["audio"].sampling_rate
>>> processor = AutoProcessor.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> # audio file is decoded on the fly
>>> inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="tf")
>>> logits = model(**inputs).logits
>>> predicted_ids = tf.math.argmax(logits, axis=-1)
>>> # transcribe speech
>>> transcription = processor.batch_decode(predicted_ids)
>>> transcription[0]
{expected_output}
```
```python
>>> inputs["labels"] = processor(text=dataset[0]["text"], return_tensors="tf").input_ids
>>> # compute loss
>>> loss = model(**inputs).loss
>>> round(float(loss), 2)
{expected_loss}
```
"""
TF_VISION_BASE_MODEL_SAMPLE = r"""
Example:
```python
>>> from transformers import AutoImageProcessor, {model_class}
>>> from datasets import load_dataset
>>> dataset = load_dataset("huggingface/cats-image")
>>> image = dataset["test"]["image"][0]
>>> image_processor = AutoImageProcessor.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> inputs = image_processor(image, return_tensors="tf")
>>> outputs = model(**inputs)
>>> last_hidden_states = outputs.last_hidden_state
>>> list(last_hidden_states.shape)
{expected_output}
```
"""
TF_VISION_SEQ_CLASS_SAMPLE = r"""
Example:
```python
>>> from transformers import AutoImageProcessor, {model_class}
>>> import tensorflow as tf
>>> from datasets import load_dataset
>>> dataset = load_dataset("huggingface/cats-image")
>>> image = dataset["test"]["image"][0]
>>> image_processor = AutoImageProcessor.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> inputs = image_processor(image, return_tensors="tf")
>>> logits = model(**inputs).logits
>>> # model predicts one of the 1000 ImageNet classes
>>> predicted_label = int(tf.math.argmax(logits, axis=-1))
>>> print(model.config.id2label[predicted_label])
{expected_output}
```
"""
TF_SAMPLE_DOCSTRINGS = {
"SequenceClassification": TF_SEQUENCE_CLASSIFICATION_SAMPLE,
"QuestionAnswering": TF_QUESTION_ANSWERING_SAMPLE,
"TokenClassification": TF_TOKEN_CLASSIFICATION_SAMPLE,
"MultipleChoice": TF_MULTIPLE_CHOICE_SAMPLE,
"MaskedLM": TF_MASKED_LM_SAMPLE,
"LMHead": TF_CAUSAL_LM_SAMPLE,
"BaseModel": TF_BASE_MODEL_SAMPLE,
"SpeechBaseModel": TF_SPEECH_BASE_MODEL_SAMPLE,
"CTC": TF_SPEECH_CTC_SAMPLE,
"VisionBaseModel": TF_VISION_BASE_MODEL_SAMPLE,
"ImageClassification": TF_VISION_SEQ_CLASS_SAMPLE,
}
FLAX_TOKEN_CLASSIFICATION_SAMPLE = r"""
Example:
```python
>>> from transformers import AutoTokenizer, {model_class}
>>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="jax")
>>> outputs = model(**inputs)
>>> logits = outputs.logits
```
"""
FLAX_QUESTION_ANSWERING_SAMPLE = r"""
Example:
```python
>>> from transformers import AutoTokenizer, {model_class}
>>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet"
>>> inputs = tokenizer(question, text, return_tensors="jax")
>>> outputs = model(**inputs)
>>> start_scores = outputs.start_logits
>>> end_scores = outputs.end_logits
```
"""
FLAX_SEQUENCE_CLASSIFICATION_SAMPLE = r"""
Example:
```python
>>> from transformers import AutoTokenizer, {model_class}
>>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="jax")
>>> outputs = model(**inputs)
>>> logits = outputs.logits
```
"""
FLAX_MASKED_LM_SAMPLE = r"""
Example:
```python
>>> from transformers import AutoTokenizer, {model_class}
>>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> inputs = tokenizer("The capital of France is {mask}.", return_tensors="jax")
>>> outputs = model(**inputs)
>>> logits = outputs.logits
```
"""
FLAX_BASE_MODEL_SAMPLE = r"""
Example:
```python
>>> from transformers import AutoTokenizer, {model_class}
>>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="jax")
>>> outputs = model(**inputs)
>>> last_hidden_states = outputs.last_hidden_state
```
"""
FLAX_MULTIPLE_CHOICE_SAMPLE = r"""
Example:
```python
>>> from transformers import AutoTokenizer, {model_class}
>>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
>>> choice0 = "It is eaten with a fork and a knife."
>>> choice1 = "It is eaten while held in the hand."
>>> encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors="jax", padding=True)
>>> outputs = model(**{{k: v[None, :] for k, v in encoding.items()}})
>>> logits = outputs.logits
```
"""
FLAX_CAUSAL_LM_SAMPLE = r"""
Example:
```python
>>> from transformers import AutoTokenizer, {model_class}
>>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="np")
>>> outputs = model(**inputs)
>>> # retrieve logts for next token
>>> next_token_logits = outputs.logits[:, -1]
```
"""
FLAX_SAMPLE_DOCSTRINGS = {
"SequenceClassification": FLAX_SEQUENCE_CLASSIFICATION_SAMPLE,
"QuestionAnswering": FLAX_QUESTION_ANSWERING_SAMPLE,
"TokenClassification": FLAX_TOKEN_CLASSIFICATION_SAMPLE,
"MultipleChoice": FLAX_MULTIPLE_CHOICE_SAMPLE,
"MaskedLM": FLAX_MASKED_LM_SAMPLE,
"BaseModel": FLAX_BASE_MODEL_SAMPLE,
"LMHead": FLAX_CAUSAL_LM_SAMPLE,
}
def filter_outputs_from_example(docstring, **kwargs):
"""
Removes the lines testing an output with the doctest syntax in a code sample when it's set to `None`.
"""
for key, value in kwargs.items():
if value is not None:
continue
doc_key = "{" + key + "}"
docstring = re.sub(rf"\n([^\n]+)\n\s+{doc_key}\n", "\n", docstring)
return docstring
def add_code_sample_docstrings(
*docstr,
processor_class=None,
checkpoint=None,
output_type=None,
config_class=None,
mask="[MASK]",
qa_target_start_index=14,
qa_target_end_index=15,
model_cls=None,
modality=None,
expected_output=None,
expected_loss=None,
real_checkpoint=None,
):
def docstring_decorator(fn):
# model_class defaults to function's class if not specified otherwise
model_class = fn.__qualname__.split(".")[0] if model_cls is None else model_cls
if model_class[:2] == "TF":
sample_docstrings = TF_SAMPLE_DOCSTRINGS
elif model_class[:4] == "Flax":
sample_docstrings = FLAX_SAMPLE_DOCSTRINGS
else:
sample_docstrings = PT_SAMPLE_DOCSTRINGS
# putting all kwargs for docstrings in a dict to be used
# with the `.format(**doc_kwargs)`. Note that string might
# be formatted with non-existing keys, which is fine.
doc_kwargs = {
"model_class": model_class,
"processor_class": processor_class,
"checkpoint": checkpoint,
"mask": mask,
"qa_target_start_index": qa_target_start_index,
"qa_target_end_index": qa_target_end_index,
"expected_output": expected_output,
"expected_loss": expected_loss,
"real_checkpoint": real_checkpoint,
"fake_checkpoint": checkpoint,
"true": "{true}", # For <Tip warning={true}> syntax that conflicts with formatting.
}
if ("SequenceClassification" in model_class or "AudioClassification" in model_class) and modality == "audio":
code_sample = sample_docstrings["AudioClassification"]
elif "SequenceClassification" in model_class:
code_sample = sample_docstrings["SequenceClassification"]
elif "QuestionAnswering" in model_class:
code_sample = sample_docstrings["QuestionAnswering"]
elif "TokenClassification" in model_class:
code_sample = sample_docstrings["TokenClassification"]
elif "MultipleChoice" in model_class:
code_sample = sample_docstrings["MultipleChoice"]
elif "MaskedLM" in model_class or model_class in ["FlaubertWithLMHeadModel", "XLMWithLMHeadModel"]:
code_sample = sample_docstrings["MaskedLM"]
elif "LMHead" in model_class or "CausalLM" in model_class:
code_sample = sample_docstrings["LMHead"]
elif "CTC" in model_class:
code_sample = sample_docstrings["CTC"]
elif "AudioFrameClassification" in model_class:
code_sample = sample_docstrings["AudioFrameClassification"]
elif "XVector" in model_class and modality == "audio":
code_sample = sample_docstrings["AudioXVector"]
elif "Model" in model_class and modality == "audio":
code_sample = sample_docstrings["SpeechBaseModel"]
elif "Model" in model_class and modality == "vision":
code_sample = sample_docstrings["VisionBaseModel"]
elif "Model" in model_class or "Encoder" in model_class:
code_sample = sample_docstrings["BaseModel"]
elif "ImageClassification" in model_class:
code_sample = sample_docstrings["ImageClassification"]
else:
raise ValueError(f"Docstring can't be built for model {model_class}")
code_sample = filter_outputs_from_example(
code_sample, expected_output=expected_output, expected_loss=expected_loss
)
if real_checkpoint is not None:
code_sample = FAKE_MODEL_DISCLAIMER + code_sample
func_doc = (fn.__doc__ or "") + "".join(docstr)
output_doc = "" if output_type is None else _prepare_output_docstrings(output_type, config_class)
built_doc = code_sample.format(**doc_kwargs)
fn.__doc__ = func_doc + output_doc + built_doc
return fn
return docstring_decorator
def replace_return_docstrings(output_type=None, config_class=None):
def docstring_decorator(fn):
func_doc = fn.__doc__
lines = func_doc.split("\n")
i = 0
while i < len(lines) and re.search(r"^\s*Returns?:\s*$", lines[i]) is None:
i += 1
if i < len(lines):
indent = len(_get_indent(lines[i]))
lines[i] = _prepare_output_docstrings(output_type, config_class, min_indent=indent)
func_doc = "\n".join(lines)
else:
raise ValueError(
f"The function {fn} should have an empty 'Return:' or 'Returns:' in its docstring as placeholder, "
f"current docstring is:\n{func_doc}"
)
fn.__doc__ = func_doc
return fn
return docstring_decorator
def copy_func(f):
"""Returns a copy of a function f."""
# Based on http://stackoverflow.com/a/6528148/190597 (Glenn Maynard)
g = types.FunctionType(f.__code__, f.__globals__, name=f.__name__, argdefs=f.__defaults__, closure=f.__closure__)
g = functools.update_wrapper(g, f)
g.__kwdefaults__ = f.__kwdefaults__
return g
|
233zzh/TitanDataOperationSystem | 4,544 | 代码/web代码/titanApp/src/main/resources/static/src/assets/extra-libs/jquery-sessiontimeout/idle/jquery.idletimer.js | /*
* jQuery idleTimer plugin
* version 0.8.092209
* by Paul Irish.
* http://github.com/paulirish/yui-misc/tree/
* MIT license
* adapted from YUI idle timer by nzakas:
* http://github.com/nzakas/yui-misc/
* Copyright (c) 2009 Nicholas C. Zakas
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
(function($){
$.idleTimer = function f(newTimeout){
//$.idleTimer.tId = -1 //timeout ID
var idle = false, //indicates if the user is idle
enabled = true, //indicates if the idle timer is enabled
timeout = 30000, //the amount of time (ms) before the user is considered idle
events = 'mousemove keydown DOMMouseScroll mousewheel mousedown', // activity is one of these events
//f.olddate = undefined, // olddate used for getElapsedTime. stored on the function
/* (intentionally not documented)
* Toggles the idle state and fires an appropriate event.
* @return {void}
*/
toggleIdleState = function(){
//toggle the state
idle = !idle;
// reset timeout counter
f.olddate = +new Date;
//fire appropriate event
$(document).trigger( $.data(document,'idleTimer', idle ? "idle" : "active" ) + '.idleTimer');
},
/**
* Stops the idle timer. This removes appropriate event handlers
* and cancels any pending timeouts.
* @return {void}
* @method stop
* @static
*/
stop = function(){
//set to disabled
enabled = false;
//clear any pending timeouts
clearTimeout($.idleTimer.tId);
//detach the event handlers
$(document).unbind('.idleTimer');
},
/* (intentionally not documented)
* Handles a user event indicating that the user isn't idle.
* @param {Event} event A DOM2-normalized event object.
* @return {void}
*/
handleUserEvent = function(){
//clear any existing timeout
clearTimeout($.idleTimer.tId);
//if the idle timer is enabled
if (enabled){
//if it's idle, that means the user is no longer idle
if (idle){
toggleIdleState();
}
//set a new timeout
$.idleTimer.tId = setTimeout(toggleIdleState, timeout);
}
};
/**
* Starts the idle timer. This adds appropriate event handlers
* and starts the first timeout.
* @param {int} newTimeout (Optional) A new value for the timeout period in ms.
* @return {void}
* @method $.idleTimer
* @static
*/
f.olddate = f.olddate || +new Date;
//assign a new timeout if necessary
if (typeof newTimeout == "number"){
timeout = newTimeout;
} else if (newTimeout === 'destroy') {
stop();
return this;
} else if (newTimeout === 'getElapsedTime'){
return (+new Date) - f.olddate;
}
//assign appropriate event handlers
$(document).bind($.trim((events+' ').split(' ').join('.idleTimer ')),handleUserEvent);
//set a timeout to toggle state
$.idleTimer.tId = setTimeout(toggleIdleState, timeout);
// assume the user is active for the first x seconds.
$.data(document,'idleTimer',"active");
}; // end of $.idleTimer()
})(jQuery);
|
27182812/ChatGLM-LLaMA-chinese-insturct | 50,671 | src/transformers/utils/sentencepiece_model_pb2.py | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: sentencepiece_model.proto
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name="sentencepiece_model.proto",
package="sentencepiece",
syntax="proto2",
serialized_options=b"H\003",
create_key=_descriptor._internal_create_key,
serialized_pb=(
b'\n\x19sentencepiece_model.proto\x12\rsentencepiece"\xa1\n\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01'
b" \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02"
b" \x01(\t\x12\x41\n\nmodel_type\x18\x03"
b" \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04"
b" \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12"
b' \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n'
b" \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b"
b" \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12"
b' \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r'
b" \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e"
b" \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f"
b" \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12"
b" \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10"
b" \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11"
b" \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14"
b" \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15"
b" \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17"
b" \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16"
b" \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18"
b" \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19"
b" \x01(\x08:\x05\x66\x61lse\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e"
b" \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$"
b" \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18"
b' \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18"'
b" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18)"
b" \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+"
b" \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18."
b" \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30"
b" \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87"
b" \x12+\n\x1ctrain_extremely_large_corpus\x18\x31"
b' \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01'
b" \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03"
b" \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12"
b" \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06"
b' \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01'
b' \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01'
b" \x01(\t\x12\x10\n\x08\x65xpected\x18\x02"
b' \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01'
b" \x03(\x0b\x32'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02"
b" \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03"
b" \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04"
b" \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05"
b" \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01"
b" \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03"
b' \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'
),
)
_TRAINERSPEC_MODELTYPE = _descriptor.EnumDescriptor(
name="ModelType",
full_name="sentencepiece.TrainerSpec.ModelType",
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name="UNIGRAM",
index=0,
number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name="BPE",
index=1,
number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name="WORD",
index=2,
number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name="CHAR",
index=3,
number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
],
containing_type=None,
serialized_options=None,
serialized_start=1294,
serialized_end=1347,
)
_sym_db.RegisterEnumDescriptor(_TRAINERSPEC_MODELTYPE)
_MODELPROTO_SENTENCEPIECE_TYPE = _descriptor.EnumDescriptor(
name="Type",
full_name="sentencepiece.ModelProto.SentencePiece.Type",
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name="NORMAL",
index=0,
number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name="UNKNOWN",
index=1,
number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name="CONTROL",
index=2,
number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name="USER_DEFINED",
index=3,
number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name="BYTE",
index=4,
number=6,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name="UNUSED",
index=5,
number=5,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
],
containing_type=None,
serialized_options=None,
serialized_start=2100,
serialized_end=2184,
)
_sym_db.RegisterEnumDescriptor(_MODELPROTO_SENTENCEPIECE_TYPE)
_TRAINERSPEC = _descriptor.Descriptor(
name="TrainerSpec",
full_name="sentencepiece.TrainerSpec",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="input",
full_name="sentencepiece.TrainerSpec.input",
index=0,
number=1,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="input_format",
full_name="sentencepiece.TrainerSpec.input_format",
index=1,
number=7,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="model_prefix",
full_name="sentencepiece.TrainerSpec.model_prefix",
index=2,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="model_type",
full_name="sentencepiece.TrainerSpec.model_type",
index=3,
number=3,
type=14,
cpp_type=8,
label=1,
has_default_value=True,
default_value=1,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="vocab_size",
full_name="sentencepiece.TrainerSpec.vocab_size",
index=4,
number=4,
type=5,
cpp_type=1,
label=1,
has_default_value=True,
default_value=8000,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="accept_language",
full_name="sentencepiece.TrainerSpec.accept_language",
index=5,
number=5,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="self_test_sample_size",
full_name="sentencepiece.TrainerSpec.self_test_sample_size",
index=6,
number=6,
type=5,
cpp_type=1,
label=1,
has_default_value=True,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="character_coverage",
full_name="sentencepiece.TrainerSpec.character_coverage",
index=7,
number=10,
type=2,
cpp_type=6,
label=1,
has_default_value=True,
default_value=float(0.9995),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="input_sentence_size",
full_name="sentencepiece.TrainerSpec.input_sentence_size",
index=8,
number=11,
type=4,
cpp_type=4,
label=1,
has_default_value=True,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="shuffle_input_sentence",
full_name="sentencepiece.TrainerSpec.shuffle_input_sentence",
index=9,
number=19,
type=8,
cpp_type=7,
label=1,
has_default_value=True,
default_value=True,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="mining_sentence_size",
full_name="sentencepiece.TrainerSpec.mining_sentence_size",
index=10,
number=12,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=b"\030\001",
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="training_sentence_size",
full_name="sentencepiece.TrainerSpec.training_sentence_size",
index=11,
number=13,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=b"\030\001",
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="seed_sentencepiece_size",
full_name="sentencepiece.TrainerSpec.seed_sentencepiece_size",
index=12,
number=14,
type=5,
cpp_type=1,
label=1,
has_default_value=True,
default_value=1000000,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="shrinking_factor",
full_name="sentencepiece.TrainerSpec.shrinking_factor",
index=13,
number=15,
type=2,
cpp_type=6,
label=1,
has_default_value=True,
default_value=float(0.75),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="max_sentence_length",
full_name="sentencepiece.TrainerSpec.max_sentence_length",
index=14,
number=18,
type=5,
cpp_type=1,
label=1,
has_default_value=True,
default_value=4192,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="num_threads",
full_name="sentencepiece.TrainerSpec.num_threads",
index=15,
number=16,
type=5,
cpp_type=1,
label=1,
has_default_value=True,
default_value=16,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="num_sub_iterations",
full_name="sentencepiece.TrainerSpec.num_sub_iterations",
index=16,
number=17,
type=5,
cpp_type=1,
label=1,
has_default_value=True,
default_value=2,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="max_sentencepiece_length",
full_name="sentencepiece.TrainerSpec.max_sentencepiece_length",
index=17,
number=20,
type=5,
cpp_type=1,
label=1,
has_default_value=True,
default_value=16,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="split_by_unicode_script",
full_name="sentencepiece.TrainerSpec.split_by_unicode_script",
index=18,
number=21,
type=8,
cpp_type=7,
label=1,
has_default_value=True,
default_value=True,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="split_by_number",
full_name="sentencepiece.TrainerSpec.split_by_number",
index=19,
number=23,
type=8,
cpp_type=7,
label=1,
has_default_value=True,
default_value=True,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="split_by_whitespace",
full_name="sentencepiece.TrainerSpec.split_by_whitespace",
index=20,
number=22,
type=8,
cpp_type=7,
label=1,
has_default_value=True,
default_value=True,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="treat_whitespace_as_suffix",
full_name="sentencepiece.TrainerSpec.treat_whitespace_as_suffix",
index=21,
number=24,
type=8,
cpp_type=7,
label=1,
has_default_value=True,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="split_digits",
full_name="sentencepiece.TrainerSpec.split_digits",
index=22,
number=25,
type=8,
cpp_type=7,
label=1,
has_default_value=True,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="control_symbols",
full_name="sentencepiece.TrainerSpec.control_symbols",
index=23,
number=30,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="user_defined_symbols",
full_name="sentencepiece.TrainerSpec.user_defined_symbols",
index=24,
number=31,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="required_chars",
full_name="sentencepiece.TrainerSpec.required_chars",
index=25,
number=36,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="byte_fallback",
full_name="sentencepiece.TrainerSpec.byte_fallback",
index=26,
number=35,
type=8,
cpp_type=7,
label=1,
has_default_value=True,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="vocabulary_output_piece_score",
full_name="sentencepiece.TrainerSpec.vocabulary_output_piece_score",
index=27,
number=32,
type=8,
cpp_type=7,
label=1,
has_default_value=True,
default_value=True,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="hard_vocab_limit",
full_name="sentencepiece.TrainerSpec.hard_vocab_limit",
index=28,
number=33,
type=8,
cpp_type=7,
label=1,
has_default_value=True,
default_value=True,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="use_all_vocab",
full_name="sentencepiece.TrainerSpec.use_all_vocab",
index=29,
number=34,
type=8,
cpp_type=7,
label=1,
has_default_value=True,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="unk_id",
full_name="sentencepiece.TrainerSpec.unk_id",
index=30,
number=40,
type=5,
cpp_type=1,
label=1,
has_default_value=True,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="bos_id",
full_name="sentencepiece.TrainerSpec.bos_id",
index=31,
number=41,
type=5,
cpp_type=1,
label=1,
has_default_value=True,
default_value=1,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="eos_id",
full_name="sentencepiece.TrainerSpec.eos_id",
index=32,
number=42,
type=5,
cpp_type=1,
label=1,
has_default_value=True,
default_value=2,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="pad_id",
full_name="sentencepiece.TrainerSpec.pad_id",
index=33,
number=43,
type=5,
cpp_type=1,
label=1,
has_default_value=True,
default_value=-1,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="unk_piece",
full_name="sentencepiece.TrainerSpec.unk_piece",
index=34,
number=45,
type=9,
cpp_type=9,
label=1,
has_default_value=True,
default_value=b"<unk>".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="bos_piece",
full_name="sentencepiece.TrainerSpec.bos_piece",
index=35,
number=46,
type=9,
cpp_type=9,
label=1,
has_default_value=True,
default_value=b"<s>".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="eos_piece",
full_name="sentencepiece.TrainerSpec.eos_piece",
index=36,
number=47,
type=9,
cpp_type=9,
label=1,
has_default_value=True,
default_value=b"</s>".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="pad_piece",
full_name="sentencepiece.TrainerSpec.pad_piece",
index=37,
number=48,
type=9,
cpp_type=9,
label=1,
has_default_value=True,
default_value=b"<pad>".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="unk_surface",
full_name="sentencepiece.TrainerSpec.unk_surface",
index=38,
number=44,
type=9,
cpp_type=9,
label=1,
has_default_value=True,
default_value=b" \342\201\207 ".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="train_extremely_large_corpus",
full_name="sentencepiece.TrainerSpec.train_extremely_large_corpus",
index=39,
number=49,
type=8,
cpp_type=7,
label=1,
has_default_value=True,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[
_TRAINERSPEC_MODELTYPE,
],
serialized_options=None,
is_extendable=True,
syntax="proto2",
extension_ranges=[
(200, 536870912),
],
oneofs=[],
serialized_start=45,
serialized_end=1358,
)
_NORMALIZERSPEC = _descriptor.Descriptor(
name="NormalizerSpec",
full_name="sentencepiece.NormalizerSpec",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="name",
full_name="sentencepiece.NormalizerSpec.name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="precompiled_charsmap",
full_name="sentencepiece.NormalizerSpec.precompiled_charsmap",
index=1,
number=2,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"",
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="add_dummy_prefix",
full_name="sentencepiece.NormalizerSpec.add_dummy_prefix",
index=2,
number=3,
type=8,
cpp_type=7,
label=1,
has_default_value=True,
default_value=True,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="remove_extra_whitespaces",
full_name="sentencepiece.NormalizerSpec.remove_extra_whitespaces",
index=3,
number=4,
type=8,
cpp_type=7,
label=1,
has_default_value=True,
default_value=True,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="escape_whitespaces",
full_name="sentencepiece.NormalizerSpec.escape_whitespaces",
index=4,
number=5,
type=8,
cpp_type=7,
label=1,
has_default_value=True,
default_value=True,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="normalization_rule_tsv",
full_name="sentencepiece.NormalizerSpec.normalization_rule_tsv",
index=5,
number=6,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=True,
syntax="proto2",
extension_ranges=[
(200, 536870912),
],
oneofs=[],
serialized_start=1361,
serialized_end=1570,
)
_SELFTESTDATA_SAMPLE = _descriptor.Descriptor(
name="Sample",
full_name="sentencepiece.SelfTestData.Sample",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="input",
full_name="sentencepiece.SelfTestData.Sample.input",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="expected",
full_name="sentencepiece.SelfTestData.Sample.expected",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto2",
extension_ranges=[],
oneofs=[],
serialized_start=1641,
serialized_end=1682,
)
_SELFTESTDATA = _descriptor.Descriptor(
name="SelfTestData",
full_name="sentencepiece.SelfTestData",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="samples",
full_name="sentencepiece.SelfTestData.samples",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[
_SELFTESTDATA_SAMPLE,
],
enum_types=[],
serialized_options=None,
is_extendable=True,
syntax="proto2",
extension_ranges=[
(200, 536870912),
],
oneofs=[],
serialized_start=1572,
serialized_end=1693,
)
_MODELPROTO_SENTENCEPIECE = _descriptor.Descriptor(
name="SentencePiece",
full_name="sentencepiece.ModelProto.SentencePiece",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="piece",
full_name="sentencepiece.ModelProto.SentencePiece.piece",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="score",
full_name="sentencepiece.ModelProto.SentencePiece.score",
index=1,
number=2,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="type",
full_name="sentencepiece.ModelProto.SentencePiece.type",
index=2,
number=3,
type=14,
cpp_type=8,
label=1,
has_default_value=True,
default_value=1,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[
_MODELPROTO_SENTENCEPIECE_TYPE,
],
serialized_options=None,
is_extendable=True,
syntax="proto2",
extension_ranges=[
(200, 536870912),
],
oneofs=[],
serialized_start=1985,
serialized_end=2195,
)
_MODELPROTO = _descriptor.Descriptor(
name="ModelProto",
full_name="sentencepiece.ModelProto",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="pieces",
full_name="sentencepiece.ModelProto.pieces",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="trainer_spec",
full_name="sentencepiece.ModelProto.trainer_spec",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="normalizer_spec",
full_name="sentencepiece.ModelProto.normalizer_spec",
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="self_test_data",
full_name="sentencepiece.ModelProto.self_test_data",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="denormalizer_spec",
full_name="sentencepiece.ModelProto.denormalizer_spec",
index=4,
number=5,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[
_MODELPROTO_SENTENCEPIECE,
],
enum_types=[],
serialized_options=None,
is_extendable=True,
syntax="proto2",
extension_ranges=[
(200, 536870912),
],
oneofs=[],
serialized_start=1696,
serialized_end=2206,
)
_TRAINERSPEC.fields_by_name["model_type"].enum_type = _TRAINERSPEC_MODELTYPE
_TRAINERSPEC_MODELTYPE.containing_type = _TRAINERSPEC
_SELFTESTDATA_SAMPLE.containing_type = _SELFTESTDATA
_SELFTESTDATA.fields_by_name["samples"].message_type = _SELFTESTDATA_SAMPLE
_MODELPROTO_SENTENCEPIECE.fields_by_name["type"].enum_type = _MODELPROTO_SENTENCEPIECE_TYPE
_MODELPROTO_SENTENCEPIECE.containing_type = _MODELPROTO
_MODELPROTO_SENTENCEPIECE_TYPE.containing_type = _MODELPROTO_SENTENCEPIECE
_MODELPROTO.fields_by_name["pieces"].message_type = _MODELPROTO_SENTENCEPIECE
_MODELPROTO.fields_by_name["trainer_spec"].message_type = _TRAINERSPEC
_MODELPROTO.fields_by_name["normalizer_spec"].message_type = _NORMALIZERSPEC
_MODELPROTO.fields_by_name["self_test_data"].message_type = _SELFTESTDATA
_MODELPROTO.fields_by_name["denormalizer_spec"].message_type = _NORMALIZERSPEC
DESCRIPTOR.message_types_by_name["TrainerSpec"] = _TRAINERSPEC
DESCRIPTOR.message_types_by_name["NormalizerSpec"] = _NORMALIZERSPEC
DESCRIPTOR.message_types_by_name["SelfTestData"] = _SELFTESTDATA
DESCRIPTOR.message_types_by_name["ModelProto"] = _MODELPROTO
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
TrainerSpec = _reflection.GeneratedProtocolMessageType(
"TrainerSpec",
(_message.Message,),
{
"DESCRIPTOR": _TRAINERSPEC,
"__module__": "sentencepiece_model_pb2"
# @@protoc_insertion_point(class_scope:sentencepiece.TrainerSpec)
},
)
_sym_db.RegisterMessage(TrainerSpec)
NormalizerSpec = _reflection.GeneratedProtocolMessageType(
"NormalizerSpec",
(_message.Message,),
{
"DESCRIPTOR": _NORMALIZERSPEC,
"__module__": "sentencepiece_model_pb2"
# @@protoc_insertion_point(class_scope:sentencepiece.NormalizerSpec)
},
)
_sym_db.RegisterMessage(NormalizerSpec)
SelfTestData = _reflection.GeneratedProtocolMessageType(
"SelfTestData",
(_message.Message,),
{
"Sample": _reflection.GeneratedProtocolMessageType(
"Sample",
(_message.Message,),
{
"DESCRIPTOR": _SELFTESTDATA_SAMPLE,
"__module__": "sentencepiece_model_pb2"
# @@protoc_insertion_point(class_scope:sentencepiece.SelfTestData.Sample)
},
),
"DESCRIPTOR": _SELFTESTDATA,
"__module__": "sentencepiece_model_pb2"
# @@protoc_insertion_point(class_scope:sentencepiece.SelfTestData)
},
)
_sym_db.RegisterMessage(SelfTestData)
_sym_db.RegisterMessage(SelfTestData.Sample)
ModelProto = _reflection.GeneratedProtocolMessageType(
"ModelProto",
(_message.Message,),
{
"SentencePiece": _reflection.GeneratedProtocolMessageType(
"SentencePiece",
(_message.Message,),
{
"DESCRIPTOR": _MODELPROTO_SENTENCEPIECE,
"__module__": "sentencepiece_model_pb2"
# @@protoc_insertion_point(class_scope:sentencepiece.ModelProto.SentencePiece)
},
),
"DESCRIPTOR": _MODELPROTO,
"__module__": "sentencepiece_model_pb2"
# @@protoc_insertion_point(class_scope:sentencepiece.ModelProto)
},
)
_sym_db.RegisterMessage(ModelProto)
_sym_db.RegisterMessage(ModelProto.SentencePiece)
DESCRIPTOR._options = None
_TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
_TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# @@protoc_insertion_point(module_scope)
|
27182812/ChatGLM-LLaMA-chinese-insturct | 1,423 | src/transformers/data/__init__.py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeq2Seq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadV1Processor,
SquadV2Processor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
|
233zzh/TitanDataOperationSystem | 1,533 | 代码/web代码/titanApp/src/main/resources/static/src/assets/extra-libs/jquery-sessiontimeout/idle/session-timeout-idle-init.js | var UIIdleTimeout = function() {
return {
init: function() {
var o;
$("body").append(""), $.idleTimeout("#idle-timeout-dialog", ".modal-content button:last", {
idleAfter: 5,
timeout: 3e4,
pollingInterval: 5,
keepAliveURL: "/keep-alive",
serverResponseEquals: "OK",
onTimeout: function() {
window.location = "authentication-lockscreen.html"
},
onIdle: function() {
$("#idle-timeout-dialog").modal("show"), o = $("#idle-timeout-counter"), $("#idle-timeout-dialog-keepalive").on("click", function() {
$("#idle-timeout-dialog").modal("hide")
})
},
onCountdown: function(e) {
o.html(e)
}
})
}
}
}();
jQuery(function() {
UIIdleTimeout.init()
});
/*$.idleTimeout('#idletimeout', '#idletimeout a', {
idleAfter: 5,
pollingInterval: 2,
keepAliveURL: 'keep.php',
serverResponseEquals: 'OK',
onTimeout: function(){
$(this).slideUp();
window.location = "lock-screen.html";
},
onIdle: function(){
$(this).slideDown(); // show the warning bar
},
onCountdown: function( counter ){
$(this).find("span").html( counter ); // update the counter
},
onResume: function(){
$(this).slideUp(); // hide the warning bar
}
});*/ |
27182812/ChatGLM-LLaMA-chinese-insturct | 76,721 | src/transformers/data/data_collator.py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import warnings
from collections.abc import Mapping
from dataclasses import dataclass
from random import randint
from typing import Any, Callable, Dict, List, NewType, Optional, Tuple, Union
import numpy as np
from ..models.bert import BertTokenizer, BertTokenizerFast
from ..tokenization_utils_base import PreTrainedTokenizerBase
from ..utils import PaddingStrategy
InputDataClass = NewType("InputDataClass", Any)
"""
A DataCollator is a function that takes a list of samples from a Dataset and collate them into a batch, as a dictionary
of PyTorch/TensorFlow tensors or NumPy arrays.
"""
DataCollator = NewType("DataCollator", Callable[[List[InputDataClass]], Dict[str, Any]])
class DataCollatorMixin:
def __call__(self, features, return_tensors=None):
if return_tensors is None:
return_tensors = self.return_tensors
if return_tensors == "tf":
return self.tf_call(features)
elif return_tensors == "pt":
return self.torch_call(features)
elif return_tensors == "np":
return self.numpy_call(features)
else:
raise ValueError(f"Framework '{return_tensors}' not recognized!")
def default_data_collator(features: List[InputDataClass], return_tensors="pt") -> Dict[str, Any]:
"""
Very simple data collator that simply collates batches of dict-like objects and performs special handling for
potential keys named:
- `label`: handles a single value (int or float) per object
- `label_ids`: handles a list of values per object
Does not do any additional preprocessing: property names of the input object will be used as corresponding inputs
to the model. See glue and ner for example of how it's useful.
"""
# In this function we'll make the assumption that all `features` in the batch
# have the same attributes.
# So we will look at the first element as a proxy for what attributes exist
# on the whole batch.
if return_tensors == "pt":
return torch_default_data_collator(features)
elif return_tensors == "tf":
return tf_default_data_collator(features)
elif return_tensors == "np":
return numpy_default_data_collator(features)
@dataclass
class DefaultDataCollator(DataCollatorMixin):
"""
Very simple data collator that simply collates batches of dict-like objects and performs special handling for
potential keys named:
- `label`: handles a single value (int or float) per object
- `label_ids`: handles a list of values per object
Does not do any additional preprocessing: property names of the input object will be used as corresponding inputs
to the model. See glue and ner for example of how it's useful.
This is an object (like other data collators) rather than a pure function like default_data_collator. This can be
helpful if you need to set a return_tensors value at initialization.
Args:
return_tensors (`str`):
The type of Tensor to return. Allowable values are "np", "pt" and "tf".
"""
return_tensors: str = "pt"
def __call__(self, features: List[Dict[str, Any]], return_tensors=None) -> Dict[str, Any]:
if return_tensors is None:
return_tensors = self.return_tensors
return default_data_collator(features, return_tensors)
def torch_default_data_collator(features: List[InputDataClass]) -> Dict[str, Any]:
import torch
if not isinstance(features[0], Mapping):
features = [vars(f) for f in features]
first = features[0]
batch = {}
# Special handling for labels.
# Ensure that tensor is created with the correct type
# (it should be automatically the case, but let's make sure of it.)
if "label" in first and first["label"] is not None:
label = first["label"].item() if isinstance(first["label"], torch.Tensor) else first["label"]
dtype = torch.long if isinstance(label, int) else torch.float
batch["labels"] = torch.tensor([f["label"] for f in features], dtype=dtype)
elif "label_ids" in first and first["label_ids"] is not None:
if isinstance(first["label_ids"], torch.Tensor):
batch["labels"] = torch.stack([f["label_ids"] for f in features])
else:
dtype = torch.long if type(first["label_ids"][0]) is int else torch.float
batch["labels"] = torch.tensor([f["label_ids"] for f in features], dtype=dtype)
# Handling of all other possible keys.
# Again, we will use the first element to figure out which key/values are not None for this model.
for k, v in first.items():
if k not in ("label", "label_ids") and v is not None and not isinstance(v, str):
if isinstance(v, torch.Tensor):
batch[k] = torch.stack([f[k] for f in features])
elif isinstance(v, np.ndarray):
batch[k] = torch.tensor(np.stack([f[k] for f in features]))
else:
batch[k] = torch.tensor([f[k] for f in features])
return batch
def tf_default_data_collator(features: List[InputDataClass]) -> Dict[str, Any]:
import tensorflow as tf
if not isinstance(features[0], Mapping):
features = [vars(f) for f in features]
first = features[0]
batch = {}
# Special handling for labels.
# Ensure that tensor is created with the correct type
# (it should be automatically the case, but let's make sure of it.)
if "label" in first and first["label"] is not None:
label_col_name = "label"
elif "label_ids" in first and first["label_ids"] is not None:
label_col_name = "label_ids"
elif "labels" in first and first["labels"] is not None:
label_col_name = "labels"
else:
label_col_name = None
if label_col_name is not None:
if isinstance(first[label_col_name], tf.Tensor):
dtype = tf.int64 if first[label_col_name].dtype.is_integer else tf.float32
elif isinstance(first[label_col_name], np.ndarray) or isinstance(first[label_col_name], np.generic):
dtype = tf.int64 if np.issubdtype(first[label_col_name].dtype, np.integer) else tf.float32
elif isinstance(first[label_col_name], (tuple, list)):
dtype = tf.int64 if isinstance(first[label_col_name][0], int) else tf.float32
else:
dtype = tf.int64 if isinstance(first[label_col_name], int) else tf.float32
batch["labels"] = tf.convert_to_tensor([f[label_col_name] for f in features], dtype=dtype)
# Handling of all other possible keys.
# Again, we will use the first element to figure out which key/values are not None for this model.
for k, v in first.items():
if k not in ("label", "label_ids", "labels") and v is not None and not isinstance(v, str):
if isinstance(v, (tf.Tensor, np.ndarray)):
batch[k] = tf.stack([f[k] for f in features])
else:
batch[k] = tf.convert_to_tensor([f[k] for f in features])
return batch
def numpy_default_data_collator(features: List[InputDataClass]) -> Dict[str, Any]:
if not isinstance(features[0], Mapping):
features = [vars(f) for f in features]
first = features[0]
batch = {}
# Special handling for labels.
# Ensure that tensor is created with the correct type
# (it should be automatically the case, but let's make sure of it.)
if "label" in first and first["label"] is not None:
label = first["label"].item() if isinstance(first["label"], np.ndarray) else first["label"]
dtype = np.int64 if isinstance(label, int) else np.float32
batch["labels"] = np.array([f["label"] for f in features], dtype=dtype)
elif "label_ids" in first and first["label_ids"] is not None:
if isinstance(first["label_ids"], np.ndarray):
batch["labels"] = np.stack([f["label_ids"] for f in features])
else:
dtype = np.int64 if type(first["label_ids"][0]) is int else np.float32
batch["labels"] = np.array([f["label_ids"] for f in features], dtype=dtype)
# Handling of all other possible keys.
# Again, we will use the first element to figure out which key/values are not None for this model.
for k, v in first.items():
if k not in ("label", "label_ids") and v is not None and not isinstance(v, str):
if isinstance(v, np.ndarray):
batch[k] = np.stack([f[k] for f in features])
else:
batch[k] = np.array([f[k] for f in features])
return batch
@dataclass
class DataCollatorWithPadding:
"""
Data collator that will dynamically pad the inputs received.
Args:
tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):
The tokenizer used for encoding the data.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
among:
- `True` or `'longest'` (default): Pad to the longest sequence in the batch (or no padding if only a single
sequence is provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'`: No padding (i.e., can output a batch with sequences of different lengths).
max_length (`int`, *optional*):
Maximum length of the returned list and optionally padding length (see above).
pad_to_multiple_of (`int`, *optional*):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
7.5 (Volta).
return_tensors (`str`):
The type of Tensor to return. Allowable values are "np", "pt" and "tf".
"""
tokenizer: PreTrainedTokenizerBase
padding: Union[bool, str, PaddingStrategy] = True
max_length: Optional[int] = None
pad_to_multiple_of: Optional[int] = None
return_tensors: str = "pt"
def __call__(self, features: List[Dict[str, Any]]) -> Dict[str, Any]:
batch = self.tokenizer.pad(
features,
padding=self.padding,
max_length=self.max_length,
pad_to_multiple_of=self.pad_to_multiple_of,
return_tensors=self.return_tensors,
)
if "label" in batch:
batch["labels"] = batch["label"]
del batch["label"]
if "label_ids" in batch:
batch["labels"] = batch["label_ids"]
del batch["label_ids"]
return batch
@dataclass
class DataCollatorForTokenClassification(DataCollatorMixin):
"""
Data collator that will dynamically pad the inputs received, as well as the labels.
Args:
tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):
The tokenizer used for encoding the data.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
among:
- `True` or `'longest'` (default): Pad to the longest sequence in the batch (or no padding if only a single
sequence is provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'`: No padding (i.e., can output a batch with sequences of different lengths).
max_length (`int`, *optional*):
Maximum length of the returned list and optionally padding length (see above).
pad_to_multiple_of (`int`, *optional*):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
7.5 (Volta).
label_pad_token_id (`int`, *optional*, defaults to -100):
The id to use when padding the labels (-100 will be automatically ignore by PyTorch loss functions).
return_tensors (`str`):
The type of Tensor to return. Allowable values are "np", "pt" and "tf".
"""
tokenizer: PreTrainedTokenizerBase
padding: Union[bool, str, PaddingStrategy] = True
max_length: Optional[int] = None
pad_to_multiple_of: Optional[int] = None
label_pad_token_id: int = -100
return_tensors: str = "pt"
def torch_call(self, features):
import torch
label_name = "label" if "label" in features[0].keys() else "labels"
labels = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
no_labels_features = [{k: v for k, v in feature.items() if k != label_name} for feature in features]
batch = self.tokenizer.pad(
no_labels_features,
padding=self.padding,
max_length=self.max_length,
pad_to_multiple_of=self.pad_to_multiple_of,
return_tensors="pt",
)
if labels is None:
return batch
sequence_length = batch["input_ids"].shape[1]
padding_side = self.tokenizer.padding_side
def to_list(tensor_or_iterable):
if isinstance(tensor_or_iterable, torch.Tensor):
return tensor_or_iterable.tolist()
return list(tensor_or_iterable)
if padding_side == "right":
batch[label_name] = [
to_list(label) + [self.label_pad_token_id] * (sequence_length - len(label)) for label in labels
]
else:
batch[label_name] = [
[self.label_pad_token_id] * (sequence_length - len(label)) + to_list(label) for label in labels
]
batch[label_name] = torch.tensor(batch[label_name], dtype=torch.int64)
return batch
def tf_call(self, features):
import tensorflow as tf
label_name = "label" if "label" in features[0].keys() else "labels"
labels = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
batch = self.tokenizer.pad(
features,
padding=self.padding,
max_length=self.max_length,
pad_to_multiple_of=self.pad_to_multiple_of,
# Conversion to tensors will fail if we have labels as they are not of the same length yet.
return_tensors="tf" if labels is None else None,
)
if labels is None:
return batch
sequence_length = tf.convert_to_tensor(batch["input_ids"]).shape[1]
padding_side = self.tokenizer.padding_side
if padding_side == "right":
batch["labels"] = [
list(label) + [self.label_pad_token_id] * (sequence_length - len(label)) for label in labels
]
else:
batch["labels"] = [
[self.label_pad_token_id] * (sequence_length - len(label)) + list(label) for label in labels
]
batch = {k: tf.convert_to_tensor(v, dtype=tf.int64) for k, v in batch.items()}
return batch
def numpy_call(self, features):
label_name = "label" if "label" in features[0].keys() else "labels"
labels = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
batch = self.tokenizer.pad(
features,
padding=self.padding,
max_length=self.max_length,
pad_to_multiple_of=self.pad_to_multiple_of,
# Conversion to tensors will fail if we have labels as they are not of the same length yet.
return_tensors="np" if labels is None else None,
)
if labels is None:
return batch
sequence_length = np.array(batch["input_ids"]).shape[1]
padding_side = self.tokenizer.padding_side
if padding_side == "right":
batch["labels"] = [
list(label) + [self.label_pad_token_id] * (sequence_length - len(label)) for label in labels
]
else:
batch["labels"] = [
[self.label_pad_token_id] * (sequence_length - len(label)) + list(label) for label in labels
]
batch = {k: np.array(v, dtype=np.int64) for k, v in batch.items()}
return batch
def _torch_collate_batch(examples, tokenizer, pad_to_multiple_of: Optional[int] = None):
"""Collate `examples` into a batch, using the information in `tokenizer` for padding if necessary."""
import torch
# Tensorize if necessary.
if isinstance(examples[0], (list, tuple, np.ndarray)):
examples = [torch.tensor(e, dtype=torch.long) for e in examples]
length_of_first = examples[0].size(0)
# Check if padding is necessary.
are_tensors_same_length = all(x.size(0) == length_of_first for x in examples)
if are_tensors_same_length and (pad_to_multiple_of is None or length_of_first % pad_to_multiple_of == 0):
return torch.stack(examples, dim=0)
# If yes, check if we have a `pad_token`.
if tokenizer._pad_token is None:
raise ValueError(
"You are attempting to pad samples but the tokenizer you are using"
f" ({tokenizer.__class__.__name__}) does not have a pad token."
)
# Creating the full tensor and filling it with our data.
max_length = max(x.size(0) for x in examples)
if pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
result = examples[0].new_full([len(examples), max_length], tokenizer.pad_token_id)
for i, example in enumerate(examples):
if tokenizer.padding_side == "right":
result[i, : example.shape[0]] = example
else:
result[i, -example.shape[0] :] = example
return result
def _tf_collate_batch(examples, tokenizer, pad_to_multiple_of: Optional[int] = None):
import tensorflow as tf
"""Collate `examples` into a batch, using the information in `tokenizer` for padding if necessary."""
# Tensorize if necessary.
if isinstance(examples[0], (list, tuple)):
examples = [tf.convert_to_tensor(e, dtype=tf.int64) for e in examples]
# Check if padding is necessary.
length_of_first = len(examples[0])
are_tensors_same_length = all(len(x) == length_of_first for x in examples)
if are_tensors_same_length and (pad_to_multiple_of is None or length_of_first % pad_to_multiple_of == 0):
return tf.stack(examples, axis=0)
# If yes, check if we have a `pad_token`.
if tokenizer._pad_token is None:
raise ValueError(
"You are attempting to pad samples but the tokenizer you are using"
f" ({tokenizer.__class__.__name__}) does not have a pad token."
)
# Creating the full tensor and filling it with our data.
max_length = max(len(x) for x in examples)
if pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
# result = examples[0].new_full([len(examples), max_length], tokenizer.pad_token_id)
result = []
rank = tf.rank(examples[0])
paddings = np.zeros((rank, 2), dtype=np.int32)
for example in examples:
if tokenizer.padding_side == "right":
paddings[0, 1] = max_length - len(example)
else:
paddings[0, 0] = max_length - len(example)
result.append(tf.pad(example, paddings, constant_values=tokenizer.pad_token_id))
return tf.stack(result, axis=0)
def _numpy_collate_batch(examples, tokenizer, pad_to_multiple_of: Optional[int] = None):
"""Collate `examples` into a batch, using the information in `tokenizer` for padding if necessary."""
# Tensorize if necessary.
if isinstance(examples[0], (list, tuple)):
examples = [np.array(e, dtype=np.int64) for e in examples]
# Check if padding is necessary.
length_of_first = len(examples[0])
are_tensors_same_length = all(len(x) == length_of_first for x in examples)
if are_tensors_same_length and (pad_to_multiple_of is None or length_of_first % pad_to_multiple_of == 0):
return np.stack(examples, axis=0)
# If yes, check if we have a `pad_token`.
if tokenizer._pad_token is None:
raise ValueError(
"You are attempting to pad samples but the tokenizer you are using"
f" ({tokenizer.__class__.__name__}) does not have a pad token."
)
# Creating the full tensor and filling it with our data.
max_length = max(len(x) for x in examples)
if pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
result = np.full(shape=(len(examples), max_length), fill_value=tokenizer.pad_token_id, dtype=examples[0].dtype)
for i, example in enumerate(examples):
if tokenizer.padding_side == "right":
result[i, : example.shape[0]] = example
else:
result[i, -example.shape[0] :] = example
return result
def tolist(x):
if isinstance(x, list):
return x
elif hasattr(x, "numpy"): # Checks for TF tensors without needing the import
x = x.numpy()
return x.tolist()
@dataclass
class DataCollatorForSeq2Seq:
"""
Data collator that will dynamically pad the inputs received, as well as the labels.
Args:
tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):
The tokenizer used for encoding the data.
model ([`PreTrainedModel`]):
The model that is being trained. If set and has the *prepare_decoder_input_ids_from_labels*, use it to
prepare the *decoder_input_ids*
This is useful when using *label_smoothing* to avoid calculating loss twice.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
among:
- `True` or `'longest'` (default): Pad to the longest sequence in the batch (or no padding if only a single
sequence is provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'`: No padding (i.e., can output a batch with sequences of different lengths).
max_length (`int`, *optional*):
Maximum length of the returned list and optionally padding length (see above).
pad_to_multiple_of (`int`, *optional*):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
7.5 (Volta).
label_pad_token_id (`int`, *optional*, defaults to -100):
The id to use when padding the labels (-100 will be automatically ignored by PyTorch loss functions).
return_tensors (`str`):
The type of Tensor to return. Allowable values are "np", "pt" and "tf".
"""
tokenizer: PreTrainedTokenizerBase
model: Optional[Any] = None
padding: Union[bool, str, PaddingStrategy] = True
max_length: Optional[int] = None
pad_to_multiple_of: Optional[int] = None
label_pad_token_id: int = -100
return_tensors: str = "pt"
def __call__(self, features, return_tensors=None):
if return_tensors is None:
return_tensors = self.return_tensors
labels = [feature["labels"] for feature in features] if "labels" in features[0].keys() else None
# We have to pad the labels before calling `tokenizer.pad` as this method won't pad them and needs them of the
# same length to return tensors.
if labels is not None:
max_label_length = max(len(l) for l in labels)
if self.pad_to_multiple_of is not None:
max_label_length = (
(max_label_length + self.pad_to_multiple_of - 1)
// self.pad_to_multiple_of
* self.pad_to_multiple_of
)
padding_side = self.tokenizer.padding_side
for feature in features:
remainder = [self.label_pad_token_id] * (max_label_length - len(feature["labels"]))
if isinstance(feature["labels"], list):
feature["labels"] = (
feature["labels"] + remainder if padding_side == "right" else remainder + feature["labels"]
)
elif padding_side == "right":
feature["labels"] = np.concatenate([feature["labels"], remainder]).astype(np.int64)
else:
feature["labels"] = np.concatenate([remainder, feature["labels"]]).astype(np.int64)
features = self.tokenizer.pad(
features,
padding=self.padding,
max_length=self.max_length,
pad_to_multiple_of=self.pad_to_multiple_of,
return_tensors=return_tensors,
)
# prepare decoder_input_ids
if (
labels is not None
and self.model is not None
and hasattr(self.model, "prepare_decoder_input_ids_from_labels")
):
decoder_input_ids = self.model.prepare_decoder_input_ids_from_labels(labels=features["labels"])
features["decoder_input_ids"] = decoder_input_ids
return features
@dataclass
class DataCollatorForLanguageModeling(DataCollatorMixin):
"""
Data collator used for language modeling. Inputs are dynamically padded to the maximum length of a batch if they
are not all of the same length.
Args:
tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):
The tokenizer used for encoding the data.
mlm (`bool`, *optional*, defaults to `True`):
Whether or not to use masked language modeling. If set to `False`, the labels are the same as the inputs
with the padding tokens ignored (by setting them to -100). Otherwise, the labels are -100 for non-masked
tokens and the value to predict for the masked token.
mlm_probability (`float`, *optional*, defaults to 0.15):
The probability with which to (randomly) mask tokens in the input, when `mlm` is set to `True`.
pad_to_multiple_of (`int`, *optional*):
If set will pad the sequence to a multiple of the provided value.
return_tensors (`str`):
The type of Tensor to return. Allowable values are "np", "pt" and "tf".
<Tip>
For best performance, this data collator should be used with a dataset having items that are dictionaries or
BatchEncoding, with the `"special_tokens_mask"` key, as returned by a [`PreTrainedTokenizer`] or a
[`PreTrainedTokenizerFast`] with the argument `return_special_tokens_mask=True`.
</Tip>"""
tokenizer: PreTrainedTokenizerBase
mlm: bool = True
mlm_probability: float = 0.15
pad_to_multiple_of: Optional[int] = None
tf_experimental_compile: bool = False
return_tensors: str = "pt"
def __post_init__(self):
if self.mlm and self.tokenizer.mask_token is None:
raise ValueError(
"This tokenizer does not have a mask token which is necessary for masked language modeling. "
"You should pass `mlm=False` to train on causal language modeling instead."
)
if self.tf_experimental_compile:
import tensorflow as tf
self.tf_mask_tokens = tf.function(self.tf_mask_tokens, jit_compile=True)
@staticmethod
def tf_bernoulli(shape, probability):
import tensorflow as tf
prob_matrix = tf.fill(shape, probability)
return tf.cast(prob_matrix - tf.random.uniform(shape, 0, 1) >= 0, tf.bool)
def tf_mask_tokens(
self, inputs: Any, vocab_size, mask_token_id, special_tokens_mask: Optional[Any] = None
) -> Tuple[Any, Any]:
"""
Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original.
"""
import tensorflow as tf
input_shape = tf.shape(inputs)
# 1 for a special token, 0 for a normal token in the special tokens mask
# We sample a few tokens in each sequence for MLM training (with probability `self.mlm_probability`)
masked_indices = self.tf_bernoulli(input_shape, self.mlm_probability) & ~special_tokens_mask
# Replace unmasked indices with -100 in the labels since we only compute loss on masked tokens
labels = tf.where(masked_indices, inputs, -100)
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
indices_replaced = self.tf_bernoulli(input_shape, 0.8) & masked_indices
inputs = tf.where(indices_replaced, mask_token_id, inputs)
# 10% of the time, we replace masked input tokens with random word
indices_random = self.tf_bernoulli(input_shape, 0.5) & masked_indices & ~indices_replaced
random_words = tf.random.uniform(input_shape, maxval=vocab_size, dtype=tf.int64)
inputs = tf.where(indices_random, random_words, inputs)
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
return inputs, labels
def tf_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
import tensorflow as tf
# Handle dict or lists with proper padding and conversion to tensor.
if isinstance(examples[0], Mapping):
batch = self.tokenizer.pad(examples, return_tensors="tf", pad_to_multiple_of=self.pad_to_multiple_of)
else:
batch = {
"input_ids": _tf_collate_batch(examples, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
}
# If special token mask has been preprocessed, pop it from the dict.
special_tokens_mask = batch.pop("special_tokens_mask", None)
if self.mlm:
if special_tokens_mask is None:
special_tokens_mask = [
self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True)
for val in batch["input_ids"].numpy().tolist()
]
# Cannot directly create as bool
special_tokens_mask = tf.cast(tf.convert_to_tensor(special_tokens_mask, dtype=tf.int64), tf.bool)
else:
special_tokens_mask = tf.cast(special_tokens_mask, tf.bool)
batch["input_ids"], batch["labels"] = self.tf_mask_tokens(
tf.cast(batch["input_ids"], tf.int64),
special_tokens_mask=special_tokens_mask,
mask_token_id=self.tokenizer.mask_token_id,
vocab_size=len(self.tokenizer),
)
else:
labels = batch["input_ids"]
if self.tokenizer.pad_token_id is not None:
# Replace self.tokenizer.pad_token_id with -100
labels = tf.where(labels == self.tokenizer.pad_token_id, -100, labels)
else:
labels = tf.identity(labels) # Makes a copy, just in case
batch["labels"] = labels
return batch
def torch_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
# Handle dict or lists with proper padding and conversion to tensor.
if isinstance(examples[0], Mapping):
batch = self.tokenizer.pad(examples, return_tensors="pt", pad_to_multiple_of=self.pad_to_multiple_of)
else:
batch = {
"input_ids": _torch_collate_batch(examples, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
}
# If special token mask has been preprocessed, pop it from the dict.
special_tokens_mask = batch.pop("special_tokens_mask", None)
if self.mlm:
batch["input_ids"], batch["labels"] = self.torch_mask_tokens(
batch["input_ids"], special_tokens_mask=special_tokens_mask
)
else:
labels = batch["input_ids"].clone()
if self.tokenizer.pad_token_id is not None:
labels[labels == self.tokenizer.pad_token_id] = -100
batch["labels"] = labels
return batch
def torch_mask_tokens(self, inputs: Any, special_tokens_mask: Optional[Any] = None) -> Tuple[Any, Any]:
"""
Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original.
"""
import torch
labels = inputs.clone()
# We sample a few tokens in each sequence for MLM training (with probability `self.mlm_probability`)
probability_matrix = torch.full(labels.shape, self.mlm_probability)
if special_tokens_mask is None:
special_tokens_mask = [
self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
]
special_tokens_mask = torch.tensor(special_tokens_mask, dtype=torch.bool)
else:
special_tokens_mask = special_tokens_mask.bool()
probability_matrix.masked_fill_(special_tokens_mask, value=0.0)
masked_indices = torch.bernoulli(probability_matrix).bool()
labels[~masked_indices] = -100 # We only compute loss on masked tokens
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
# 10% of the time, we replace masked input tokens with random word
indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long)
inputs[indices_random] = random_words[indices_random]
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
return inputs, labels
def numpy_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
# Handle dict or lists with proper padding and conversion to tensor.
if isinstance(examples[0], Mapping):
batch = self.tokenizer.pad(examples, return_tensors="np", pad_to_multiple_of=self.pad_to_multiple_of)
else:
batch = {
"input_ids": _numpy_collate_batch(examples, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
}
# If special token mask has been preprocessed, pop it from the dict.
special_tokens_mask = batch.pop("special_tokens_mask", None)
if self.mlm:
batch["input_ids"], batch["labels"] = self.numpy_mask_tokens(
batch["input_ids"], special_tokens_mask=special_tokens_mask
)
else:
labels = np.copy(batch["input_ids"])
if self.tokenizer.pad_token_id is not None:
labels[labels == self.tokenizer.pad_token_id] = -100
batch["labels"] = labels
return batch
def numpy_mask_tokens(self, inputs: Any, special_tokens_mask: Optional[Any] = None) -> Tuple[Any, Any]:
"""
Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original.
"""
labels = np.copy(inputs)
# We sample a few tokens in each sequence for MLM training (with probability `self.mlm_probability`)
probability_matrix = np.full(labels.shape, self.mlm_probability)
if special_tokens_mask is None:
special_tokens_mask = [
self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
]
special_tokens_mask = np.array(special_tokens_mask, dtype=bool)
else:
special_tokens_mask = special_tokens_mask.astype(bool)
probability_matrix[special_tokens_mask] = 0
# Numpy doesn't have bernoulli, so we use a binomial with 1 trial
masked_indices = np.random.binomial(1, probability_matrix, size=probability_matrix.shape).astype(bool)
labels[~masked_indices] = -100 # We only compute loss on masked tokens
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
indices_replaced = np.random.binomial(1, 0.8, size=labels.shape).astype(bool) & masked_indices
inputs[indices_replaced] = self.tokenizer.mask_token_id
# 10% of the time, we replace masked input tokens with random word
# indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
indices_random = (
np.random.binomial(1, 0.5, size=labels.shape).astype(bool) & masked_indices & ~indices_replaced
)
random_words = np.random.randint(
low=0, high=len(self.tokenizer), size=np.count_nonzero(indices_random), dtype=np.int64
)
inputs[indices_random] = random_words
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
return inputs, labels
@dataclass
class DataCollatorForWholeWordMask(DataCollatorForLanguageModeling):
"""
Data collator used for language modeling that masks entire words.
- collates batches of tensors, honoring their tokenizer's pad_token
- preprocesses batches for masked language modeling
<Tip>
This collator relies on details of the implementation of subword tokenization by [`BertTokenizer`], specifically
that subword tokens are prefixed with *##*. For tokenizers that do not adhere to this scheme, this collator will
produce an output that is roughly equivalent to [`.DataCollatorForLanguageModeling`].
</Tip>"""
def torch_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
if isinstance(examples[0], Mapping):
input_ids = [e["input_ids"] for e in examples]
else:
input_ids = examples
examples = [{"input_ids": e} for e in examples]
batch_input = _torch_collate_batch(input_ids, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
mask_labels = []
for e in examples:
ref_tokens = []
for id in tolist(e["input_ids"]):
token = self.tokenizer._convert_id_to_token(id)
ref_tokens.append(token)
# For Chinese tokens, we need extra inf to mark sub-word, e.g [喜,欢]-> [喜,##欢]
if "chinese_ref" in e:
ref_pos = tolist(e["chinese_ref"])
len_seq = len(e["input_ids"])
for i in range(len_seq):
if i in ref_pos:
ref_tokens[i] = "##" + ref_tokens[i]
mask_labels.append(self._whole_word_mask(ref_tokens))
batch_mask = _torch_collate_batch(mask_labels, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
inputs, labels = self.torch_mask_tokens(batch_input, batch_mask)
return {"input_ids": inputs, "labels": labels}
def tf_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
if isinstance(examples[0], Mapping):
input_ids = [e["input_ids"] for e in examples]
else:
input_ids = examples
examples = [{"input_ids": e} for e in examples]
batch_input = _tf_collate_batch(input_ids, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
mask_labels = []
for e in examples:
ref_tokens = []
for id in tolist(e["input_ids"]):
token = self.tokenizer._convert_id_to_token(id)
ref_tokens.append(token)
# For Chinese tokens, we need extra inf to mark sub-word, e.g [喜,欢]-> [喜,##欢]
if "chinese_ref" in e:
ref_pos = tolist(e["chinese_ref"])
len_seq = len(e["input_ids"])
for i in range(len_seq):
if i in ref_pos:
ref_tokens[i] = "##" + ref_tokens[i]
mask_labels.append(self._whole_word_mask(ref_tokens))
batch_mask = _tf_collate_batch(mask_labels, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
inputs, labels = self.tf_mask_tokens(batch_input, batch_mask)
return {"input_ids": inputs, "labels": labels}
def numpy_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
if isinstance(examples[0], Mapping):
input_ids = [e["input_ids"] for e in examples]
else:
input_ids = examples
examples = [{"input_ids": e} for e in examples]
batch_input = _numpy_collate_batch(input_ids, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
mask_labels = []
for e in examples:
ref_tokens = []
for id in tolist(e["input_ids"]):
token = self.tokenizer._convert_id_to_token(id)
ref_tokens.append(token)
# For Chinese tokens, we need extra inf to mark sub-word, e.g [喜,欢]-> [喜,##欢]
if "chinese_ref" in e:
ref_pos = tolist(e["chinese_ref"])
len_seq = len(e["input_ids"])
for i in range(len_seq):
if i in ref_pos:
ref_tokens[i] = "##" + ref_tokens[i]
mask_labels.append(self._whole_word_mask(ref_tokens))
batch_mask = _numpy_collate_batch(mask_labels, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
inputs, labels = self.numpy_mask_tokens(batch_input, batch_mask)
return {"input_ids": inputs, "labels": labels}
def _whole_word_mask(self, input_tokens: List[str], max_predictions=512):
"""
Get 0/1 labels for masked tokens with whole word mask proxy
"""
if not isinstance(self.tokenizer, (BertTokenizer, BertTokenizerFast)):
warnings.warn(
"DataCollatorForWholeWordMask is only suitable for BertTokenizer-like tokenizers. "
"Please refer to the documentation for more information."
)
cand_indexes = []
for i, token in enumerate(input_tokens):
if token == "[CLS]" or token == "[SEP]":
continue
if len(cand_indexes) >= 1 and token.startswith("##"):
cand_indexes[-1].append(i)
else:
cand_indexes.append([i])
random.shuffle(cand_indexes)
num_to_predict = min(max_predictions, max(1, int(round(len(input_tokens) * self.mlm_probability))))
masked_lms = []
covered_indexes = set()
for index_set in cand_indexes:
if len(masked_lms) >= num_to_predict:
break
# If adding a whole-word mask would exceed the maximum number of
# predictions, then just skip this candidate.
if len(masked_lms) + len(index_set) > num_to_predict:
continue
is_any_index_covered = False
for index in index_set:
if index in covered_indexes:
is_any_index_covered = True
break
if is_any_index_covered:
continue
for index in index_set:
covered_indexes.add(index)
masked_lms.append(index)
if len(covered_indexes) != len(masked_lms):
raise ValueError("Length of covered_indexes is not equal to length of masked_lms.")
mask_labels = [1 if i in covered_indexes else 0 for i in range(len(input_tokens))]
return mask_labels
def torch_mask_tokens(self, inputs: Any, mask_labels: Any) -> Tuple[Any, Any]:
"""
Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. Set
'mask_labels' means we use whole word mask (wwm), we directly mask idxs according to it's ref.
"""
import torch
if self.tokenizer.mask_token is None:
raise ValueError(
"This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the"
" --mlm flag if you want to use this tokenizer."
)
labels = inputs.clone()
# We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)
probability_matrix = mask_labels
special_tokens_mask = [
self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
]
probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0)
if self.tokenizer._pad_token is not None:
padding_mask = labels.eq(self.tokenizer.pad_token_id)
probability_matrix.masked_fill_(padding_mask, value=0.0)
masked_indices = probability_matrix.bool()
labels[~masked_indices] = -100 # We only compute loss on masked tokens
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
# 10% of the time, we replace masked input tokens with random word
indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long)
inputs[indices_random] = random_words[indices_random]
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
return inputs, labels
def tf_mask_tokens(self, inputs: Any, mask_labels: Any) -> Tuple[Any, Any]:
"""
Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. Set
'mask_labels' means we use whole word mask (wwm), we directly mask idxs according to it's ref.
"""
import tensorflow as tf
input_shape = tf.shape(inputs)
if self.tokenizer.mask_token is None:
raise ValueError(
"This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the"
" --mlm flag if you want to use this tokenizer."
)
labels = tf.identity(inputs)
# We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)
masked_indices = tf.cast(mask_labels, tf.bool)
special_tokens_mask = [
self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels
]
masked_indices = masked_indices & ~tf.cast(special_tokens_mask, dtype=tf.bool)
if self.tokenizer._pad_token is not None:
padding_mask = inputs == self.tokenizer.pad_token_id
masked_indices = masked_indices & ~padding_mask
# Replace unmasked indices with -100 in the labels since we only compute loss on masked tokens
labels = tf.where(masked_indices, inputs, -100)
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
indices_replaced = self.tf_bernoulli(input_shape, 0.8) & masked_indices
inputs = tf.where(indices_replaced, self.tokenizer.mask_token_id, inputs)
# 10% of the time, we replace masked input tokens with random word
indices_random = self.tf_bernoulli(input_shape, 0.5) & masked_indices & ~indices_replaced
random_words = tf.random.uniform(input_shape, maxval=len(self.tokenizer), dtype=tf.int64)
inputs = tf.where(indices_random, random_words, inputs)
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
return inputs, labels
def numpy_mask_tokens(self, inputs: Any, mask_labels: Any) -> Tuple[Any, Any]:
"""
Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. Set
'mask_labels' means we use whole word mask (wwm), we directly mask idxs according to it's ref.
"""
if self.tokenizer.mask_token is None:
raise ValueError(
"This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the"
" --mlm flag if you want to use this tokenizer."
)
labels = np.copy(inputs)
# We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)
masked_indices = mask_labels.astype(bool)
special_tokens_mask = [
self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
]
masked_indices[np.array(special_tokens_mask, dtype=bool)] = 0
if self.tokenizer._pad_token is not None:
padding_mask = labels == self.tokenizer.pad_token_id
masked_indices[padding_mask] = 0
labels[~masked_indices] = -100 # We only compute loss on masked tokens
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
indices_replaced = np.random.binomial(1, 0.8, size=labels.shape).astype(bool) & masked_indices
inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
# 10% of the time, we replace masked input tokens with random word
# indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
indices_random = (
np.random.binomial(1, 0.5, size=labels.shape).astype(bool) & masked_indices & ~indices_replaced
)
random_words = np.random.randint(low=0, high=len(self.tokenizer), size=labels.shape, dtype=np.int64)
inputs[indices_random] = random_words[indices_random]
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
return inputs, labels
@dataclass
class DataCollatorForSOP(DataCollatorForLanguageModeling):
"""
Data collator used for sentence order prediction task.
- collates batches of tensors, honoring their tokenizer's pad_token
- preprocesses batches for both masked language modeling and sentence order prediction
"""
def __init__(self, *args, **kwargs):
warnings.warn(
"DataCollatorForSOP is deprecated and will be removed in a future version, you can now use "
"DataCollatorForLanguageModeling instead.",
FutureWarning,
)
def __call__(self, examples: List[Dict[str, Any]]) -> Dict[str, Any]:
import torch
from torch.nn.utils.rnn import pad_sequence
input_ids = [example["input_ids"] for example in examples]
input_ids = _torch_collate_batch(input_ids, self.tokenizer)
input_ids, labels, attention_mask = self.mask_tokens(input_ids)
token_type_ids = [example["token_type_ids"] for example in examples]
# size of segment_ids varied because randomness, padding zero to the end as the original implementation
token_type_ids = pad_sequence(token_type_ids, batch_first=True, padding_value=self.tokenizer.pad_token_id)
sop_label_list = [example["sentence_order_label"] for example in examples]
sentence_order_label = torch.stack(sop_label_list)
return {
"input_ids": input_ids,
"labels": labels,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
"sentence_order_label": sentence_order_label,
}
def mask_tokens(self, inputs: Any) -> Tuple[Any, Any, Any]:
"""
Prepare masked tokens inputs/labels/attention_mask for masked language modeling: 80% MASK, 10% random, 10%
original. N-gram not applied yet.
"""
import torch
if self.tokenizer.mask_token is None:
raise ValueError(
"This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the"
" --mlm flag if you want to use this tokenizer."
)
labels = inputs.clone()
# We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)
probability_matrix = torch.full(labels.shape, self.mlm_probability)
special_tokens_mask = [
self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
]
probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0)
if self.tokenizer._pad_token is not None:
padding_mask = labels.eq(self.tokenizer.pad_token_id)
probability_matrix.masked_fill_(padding_mask, value=0.0)
masked_indices = torch.bernoulli(probability_matrix).bool()
# probability be `1` (masked), however in albert model attention mask `0` means masked, revert the value
attention_mask = (~masked_indices).float()
if self.tokenizer._pad_token is not None:
attention_padding_mask = labels.eq(self.tokenizer.pad_token_id)
attention_mask.masked_fill_(attention_padding_mask, value=1.0)
labels[~masked_indices] = -100 # We only compute loss on masked tokens, -100 is default for CE compute
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
# 10% of the time, we replace masked input tokens with random word
indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long)
inputs[indices_random] = random_words[indices_random]
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
return inputs, labels, attention_mask
@dataclass
class DataCollatorForPermutationLanguageModeling(DataCollatorMixin):
"""
Data collator used for permutation language modeling.
- collates batches of tensors, honoring their tokenizer's pad_token
- preprocesses batches for permutation language modeling with procedures specific to XLNet
"""
tokenizer: PreTrainedTokenizerBase
plm_probability: float = 1 / 6
max_span_length: int = 5 # maximum length of a span of masked tokens
return_tensors: str = "pt"
def torch_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
if isinstance(examples[0], Mapping):
examples = [e["input_ids"] for e in examples]
batch = _torch_collate_batch(examples, self.tokenizer)
inputs, perm_mask, target_mapping, labels = self.torch_mask_tokens(batch)
return {"input_ids": inputs, "perm_mask": perm_mask, "target_mapping": target_mapping, "labels": labels}
def tf_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
if isinstance(examples[0], Mapping):
examples = [e["input_ids"] for e in examples]
batch = _tf_collate_batch(examples, self.tokenizer)
inputs, perm_mask, target_mapping, labels = self.tf_mask_tokens(batch)
return {"input_ids": inputs, "perm_mask": perm_mask, "target_mapping": target_mapping, "labels": labels}
def numpy_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
if isinstance(examples[0], Mapping):
examples = [e["input_ids"] for e in examples]
batch = _numpy_collate_batch(examples, self.tokenizer)
inputs, perm_mask, target_mapping, labels = self.numpy_mask_tokens(batch)
return {"input_ids": inputs, "perm_mask": perm_mask, "target_mapping": target_mapping, "labels": labels}
def torch_mask_tokens(self, inputs: Any) -> Tuple[Any, Any, Any, Any]:
"""
The masked tokens to be predicted for a particular sequence are determined by the following algorithm:
0. Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far).
1. Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked)
2. Reserve a context of length `context_length = span_length / plm_probability` to surround span to be
masked
3. Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length -
span_length]` and mask tokens `start_index:start_index + span_length`
4. Set `cur_len = cur_len + context_length`. If `cur_len < max_len` (i.e. there are tokens remaining in the
sequence to be processed), repeat from Step 1.
"""
import torch
if self.tokenizer.mask_token is None:
raise ValueError(
"This tokenizer does not have a mask token which is necessary for permutation language modeling."
" Please add a mask token if you want to use this tokenizer."
)
if inputs.size(1) % 2 != 0:
raise ValueError(
"This collator requires that sequence lengths be even to create a leakage-free perm_mask. Please see"
" relevant comments in source code for details."
)
labels = inputs.clone()
# Creating the mask and target_mapping tensors
masked_indices = torch.full(labels.shape, 0, dtype=torch.bool)
target_mapping = torch.zeros((labels.size(0), labels.size(1), labels.size(1)), dtype=torch.float32)
for i in range(labels.size(0)):
# Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far).
cur_len = 0
max_len = labels.size(1)
while cur_len < max_len:
# Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked)
span_length = torch.randint(1, self.max_span_length + 1, (1,)).item()
# Reserve a context of length `context_length = span_length / plm_probability` to surround the span to be masked
context_length = int(span_length / self.plm_probability)
# Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length - span_length]` and mask tokens `start_index:start_index + span_length`
start_index = cur_len + torch.randint(context_length - span_length + 1, (1,)).item()
masked_indices[i, start_index : start_index + span_length] = 1
# Set `cur_len = cur_len + context_length`
cur_len += context_length
# Since we're replacing non-masked tokens with -100 in the labels tensor instead of skipping them altogether,
# the i-th predict corresponds to the i-th token.
target_mapping[i] = torch.eye(labels.size(1))
special_tokens_mask = torch.tensor(
[self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()],
dtype=torch.bool,
)
masked_indices.masked_fill_(special_tokens_mask, value=0.0)
if self.tokenizer._pad_token is not None:
padding_mask = labels.eq(self.tokenizer.pad_token_id)
masked_indices.masked_fill_(padding_mask, value=0.0)
# Mask indicating non-functional tokens, where functional tokens are [SEP], [CLS], padding, etc.
non_func_mask = ~(padding_mask | special_tokens_mask)
inputs[masked_indices] = self.tokenizer.mask_token_id
labels[~masked_indices] = -100 # We only compute loss on masked tokens
perm_mask = torch.zeros((labels.size(0), labels.size(1), labels.size(1)), dtype=torch.float32)
for i in range(labels.size(0)):
# Generate permutation indices i.e. sample a random factorisation order for the sequence. This will
# determine which tokens a given token can attend to (encoded in `perm_mask`).
# Note: Length of token sequence being permuted has to be less than or equal to reused sequence length
# (see documentation for `mems`), otherwise information may leak through due to reuse. In this implementation,
# we assume that reused length is half of sequence length and permutation length is equal to reused length.
# This requires that the sequence length be even.
# Create a linear factorisation order
perm_index = torch.arange(labels.size(1))
# Split this into two halves, assuming that half the sequence is reused each time
perm_index = perm_index.reshape((-1, labels.size(1) // 2)).transpose(0, 1)
# Permute the two halves such that they do not cross over
perm_index = perm_index[torch.randperm(labels.size(1) // 2)]
# Flatten this out into the desired permuted factorisation order
perm_index = torch.flatten(perm_index.transpose(0, 1))
# Set the permutation indices of non-masked (non-functional) tokens to the
# smallest index (-1) so that:
# (1) They can be seen by all other positions
# (2) They cannot see masked positions, so there won't be information leak
perm_index.masked_fill_(~masked_indices[i] & non_func_mask[i], -1)
# The logic for whether the i-th token can attend on the j-th token based on the factorisation order:
# 0 (can attend): If perm_index[i] > perm_index[j] or j is neither masked nor a functional token
# 1 (cannot attend): If perm_index[i] <= perm_index[j] and j is either masked or a functional token
perm_mask[i] = (
perm_index.reshape((labels.size(1), 1)) <= perm_index.reshape((1, labels.size(1)))
) & masked_indices[i]
return inputs.long(), perm_mask, target_mapping, labels.long()
def tf_mask_tokens(self, inputs: Any) -> Tuple[Any, Any, Any, Any]:
"""
The masked tokens to be predicted for a particular sequence are determined by the following algorithm:
0. Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far).
1. Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked)
2. Reserve a context of length `context_length = span_length / plm_probability` to surround span to be
masked
3. Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length -
span_length]` and mask tokens `start_index:start_index + span_length`
4. Set `cur_len = cur_len + context_length`. If `cur_len < max_len` (i.e. there are tokens remaining in the
sequence to be processed), repeat from Step 1.
"""
import tensorflow as tf
if self.tokenizer.mask_token is None:
raise ValueError(
"This tokenizer does not have a mask token which is necessary for permutation language modeling."
" Please add a mask token if you want to use this tokenizer."
)
if tf.shape(inputs)[1] % 2 != 0:
raise ValueError(
"This collator requires that sequence lengths be even to create a leakage-free perm_mask. Please see"
" relevant comments in source code for details."
)
labels = tf.identity(inputs)
# Creating the mask and target_mapping tensors
masked_indices = np.full(labels.shape.as_list(), 0, dtype=bool)
labels_shape = tf.shape(labels)
target_mapping = np.zeros((labels_shape[0], labels_shape[1], labels_shape[1]), dtype=np.float32)
for i in range(len(labels)):
# Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far).
cur_len = 0
max_len = tf.shape(labels)[1]
while cur_len < max_len:
# Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked)
span_length = randint(1, self.max_span_length + 1)
# Reserve a context of length `context_length = span_length / plm_probability` to surround the span to be masked
context_length = int(span_length / self.plm_probability)
# Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length - span_length]` and mask tokens `start_index:start_index + span_length`
start_index = cur_len + randint(0, context_length - span_length + 1)
masked_indices[i, start_index : start_index + span_length] = 1
# Set `cur_len = cur_len + context_length`
cur_len += context_length
# Since we're replacing non-masked tokens with -100 in the labels tensor instead of skipping them altogether,
# the i-th predict corresponds to the i-th token.
target_mapping[i] = np.eye(labels_shape[1])
masked_indices = tf.cast(tf.convert_to_tensor(masked_indices), dtype=tf.bool)
target_mapping = tf.convert_to_tensor(target_mapping)
special_tokens_mask = tf.convert_to_tensor(
[
self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True)
for val in labels.numpy().tolist()
],
)
special_tokens_mask = tf.cast(special_tokens_mask, dtype=tf.bool)
masked_indices = masked_indices & ~special_tokens_mask
if self.tokenizer._pad_token is not None:
padding_mask = labels == self.tokenizer.pad_token_id
masked_indices = masked_indices & ~padding_mask
# Mask indicating non-functional tokens, where functional tokens are [SEP], [CLS], padding, etc.
non_func_mask = ~(padding_mask | special_tokens_mask)
inputs = tf.where(masked_indices, self.tokenizer.mask_token_id, inputs)
labels = tf.where(masked_indices, labels, -100) # We only compute loss on masked tokens
perm_mask = []
for i in range(len(labels)):
# Generate permutation indices i.e. sample a random factorisation order for the sequence. This will
# determine which tokens a given token can attend to (encoded in `perm_mask`).
# Note: Length of token sequence being permuted has to be less than or equal to reused sequence length
# (see documentation for `mems`), otherwise information may leak through due to reuse. In this implementation,
# we assume that reused length is half of sequence length and permutation length is equal to reused length.
# This requires that the sequence length be even.
# Create a linear factorisation order
# tf.range is the equivalent of torch.arange
perm_index = tf.range(labels_shape[1])
# Split this into two halves, assuming that half the sequence is reused each time
perm_index = tf.transpose(tf.reshape(perm_index, (-1, labels_shape[1] // 2)))
# Permute the two halves such that they do not cross over
perm_index = tf.random.shuffle(perm_index) # Shuffles along the first dimension
# Flatten this out into the desired permuted factorisation order
perm_index = tf.reshape(tf.transpose(perm_index), (-1,))
# Set the permutation indices of non-masked (non-functional) tokens to the
# smallest index (-1) so that:
# (1) They can be seen by all other positions
# (2) They cannot see masked positions, so there won't be information leak
perm_index = tf.where(~masked_indices[i] & non_func_mask[i], -1, perm_index)
# The logic for whether the i-th token can attend on the j-th token based on the factorisation order:
# 0 (can attend): If perm_index[i] > perm_index[j] or j is neither masked nor a functional token
# 1 (cannot attend): If perm_index[i] <= perm_index[j] and j is either masked or a functional token
perm_mask.append(
(tf.reshape(perm_index, (labels_shape[1], 1)) <= tf.reshape(perm_index, (1, labels_shape[1])))
& masked_indices[i]
)
perm_mask = tf.stack(perm_mask, axis=0)
return tf.cast(inputs, tf.int64), tf.cast(perm_mask, tf.float32), target_mapping, tf.cast(labels, tf.int64)
def numpy_mask_tokens(self, inputs: Any) -> Tuple[Any, Any, Any, Any]:
"""
The masked tokens to be predicted for a particular sequence are determined by the following algorithm:
0. Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far).
1. Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked)
2. Reserve a context of length `context_length = span_length / plm_probability` to surround span to be
masked
3. Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length -
span_length]` and mask tokens `start_index:start_index + span_length`
4. Set `cur_len = cur_len + context_length`. If `cur_len < max_len` (i.e. there are tokens remaining in the
sequence to be processed), repeat from Step 1.
"""
if self.tokenizer.mask_token is None:
raise ValueError(
"This tokenizer does not have a mask token which is necessary for permutation language modeling."
" Please add a mask token if you want to use this tokenizer."
)
if inputs.shape[1] % 2 != 0:
raise ValueError(
"This collator requires that sequence lengths be even to create a leakage-free perm_mask. Please see"
" relevant comments in source code for details."
)
labels = np.copy(inputs)
# Creating the mask and target_mapping tensors
masked_indices = np.full(labels.shape, 0, dtype=bool)
target_mapping = np.zeros((labels.shape[0], labels.shape[1], labels.shape[1]), dtype=np.float32)
for i in range(labels.shape[0]):
# Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far).
cur_len = 0
max_len = labels.shape[1]
while cur_len < max_len:
# Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked)
span_length = randint(1, self.max_span_length + 1)
# Reserve a context of length `context_length = span_length / plm_probability` to surround the span to be masked
context_length = int(span_length / self.plm_probability)
# Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length - span_length]` and mask tokens `start_index:start_index + span_length`
start_index = cur_len + randint(0, context_length - span_length + 1)
masked_indices[i, start_index : start_index + span_length] = 1
# Set `cur_len = cur_len + context_length`
cur_len += context_length
# Since we're replacing non-masked tokens with -100 in the labels tensor instead of skipping them altogether,
# the i-th predict corresponds to the i-th token.
target_mapping[i] = np.eye(labels.shape[1])
special_tokens_mask = np.array(
[self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()],
dtype=bool,
)
masked_indices[special_tokens_mask] = 0
if self.tokenizer._pad_token is not None:
padding_mask = labels == self.tokenizer.pad_token_id
masked_indices[padding_mask] = 0.0
# Mask indicating non-functional tokens, where functional tokens are [SEP], [CLS], padding, etc.
non_func_mask = ~(padding_mask | special_tokens_mask)
inputs[masked_indices] = self.tokenizer.mask_token_id
labels[~masked_indices] = -100 # We only compute loss on masked tokens
perm_mask = np.zeros((labels.shape[0], labels.shape[1], labels.shape[1]), dtype=np.float32)
for i in range(labels.shape[0]):
# Generate permutation indices i.e. sample a random factorisation order for the sequence. This will
# determine which tokens a given token can attend to (encoded in `perm_mask`).
# Note: Length of token sequence being permuted has to be less than or equal to reused sequence length
# (see documentation for `mems`), otherwise information may leak through due to reuse. In this implementation,
# we assume that reused length is half of sequence length and permutation length is equal to reused length.
# This requires that the sequence length be even.
# Create a linear factorisation order
perm_index = np.arange(labels.shape[1])
# Split this into two halves, assuming that half the sequence is reused each time
perm_index = perm_index.reshape((-1, labels.shape[1] // 2)).T
# Permute the two halves such that they do not cross over
np.random.shuffle(perm_index)
# Flatten this out into the desired permuted factorisation order
perm_index = perm_index.T.flatten()
# Set the permutation indices of non-masked (non-functional) tokens to the
# smallest index (-1) so that:
# (1) They can be seen by all other positions
# (2) They cannot see masked positions, so there won't be information leak
perm_index[~masked_indices[i] & non_func_mask[i]] = -1
# The logic for whether the i-th token can attend on the j-th token based on the factorisation order:
# 0 (can attend): If perm_index[i] > perm_index[j] or j is neither masked nor a functional token
# 1 (cannot attend): If perm_index[i] <= perm_index[j] and j is either masked or a functional token
perm_mask[i] = (
perm_index.reshape((labels.shape[1], 1)) <= perm_index.reshape((1, labels.shape[1]))
) & masked_indices[i]
return inputs.astype(np.int64), perm_mask, target_mapping, labels.astype(np.int64)
|
27182812/ChatGLM-LLaMA-chinese-insturct | 3,432 | src/transformers/data/test_generation_utils.py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import unittest
import timeout_decorator
from ..testing_utils import require_torch
from ..utils import cached_property, is_torch_available
if is_torch_available():
import torch
from ..models.marian import MarianConfig, MarianMTModel
@require_torch
class GenerationUtilsTest(unittest.TestCase):
@cached_property
def config(self):
config = MarianConfig.from_pretrained("sshleifer/tiny-marian-en-de")
return config
@cached_property
def model(self):
return MarianMTModel(self.config)
def test_postprocess_next_token_scores(self):
config = self.config
model = self.model
# Initialize an input id tensor with batch size 8 and sequence length 12
input_ids = torch.arange(0, 96, 1).view((8, 12))
eos = config.eos_token_id
bad_words_ids_test_cases = [[[299]], [[23, 24], [54]], [[config.eos_token_id]], []]
masked_scores = [
[(0, 299), (1, 299), (2, 299), (3, 299), (4, 299), (5, 299), (6, 299), (7, 299)],
[(1, 24), (0, 54), (1, 54), (2, 54), (3, 54), (4, 54), (5, 54), (6, 54), (7, 54)],
[(0, eos), (1, eos), (2, eos), (3, eos), (4, eos), (5, eos), (6, eos), (7, eos)],
[],
]
for test_case_index, bad_words_ids in enumerate(bad_words_ids_test_cases):
# Initialize a scores tensor with batch size 8 and vocabulary size 300
scores = torch.rand((8, 300))
output = model.postprocess_next_token_scores(
scores,
input_ids,
0,
bad_words_ids,
13,
15,
config.max_length,
config.eos_token_id,
config.repetition_penalty,
32,
5,
)
for masked_score in masked_scores[test_case_index]:
self.assertTrue(output[masked_score[0], masked_score[1]] == -float("inf"))
@timeout_decorator.timeout(10)
def test_postprocess_next_token_scores_large_bad_words_list(self):
config = self.config
model = self.model
# Initialize an input id tensor with batch size 8 and sequence length 12
input_ids = torch.arange(0, 96, 1).view((8, 12))
bad_words_ids = []
for _ in range(100):
length_bad_word = random.randint(1, 4)
bad_words_ids.append(random.sample(range(1, 300), length_bad_word))
scores = torch.rand((8, 300))
_ = model.postprocess_next_token_scores(
scores,
input_ids,
0,
bad_words_ids,
13,
15,
config.max_length,
config.eos_token_id,
config.repetition_penalty,
32,
5,
)
|
233zzh/TitanDataOperationSystem | 4,946 | 代码/web代码/titanApp/src/main/resources/static/src/assets/extra-libs/jquery-sessiontimeout/idle/jquery.idletimeout.js | /*
* jQuery Idle Timeout 1.2
* Copyright (c) 2011 Eric Hynds
*
* http://www.erichynds.com/jquery/a-new-and-improved-jquery-idle-timeout-plugin/
*
* Depends:
* - jQuery 1.4.2+
* - jQuery Idle Timer (by Paul Irish, http://paulirish.com/2009/jquery-idletimer-plugin/)
*
* Dual licensed under the MIT and GPL licenses:
* http://www.opensource.org/licenses/mit-license.php
* http://www.gnu.org/licenses/gpl.html
*
*/
(function($, win){
var idleTimeout = {
init: function( element, resume, options ){
var self = this, elem;
this.warning = elem = $(element);
this.resume = $(resume);
this.options = options;
this.countdownOpen = false;
this.failedRequests = options.failedRequests;
this._startTimer();
this.title = document.title;
// expose obj to data cache so peeps can call internal methods
$.data( elem[0], 'idletimeout', this );
// start the idle timer
$.idleTimer(options.idleAfter * 1000);
// once the user becomes idle
$(document).bind("idle.idleTimer", function(){
// if the user is idle and a countdown isn't already running
if( $.data(document, 'idleTimer') === 'idle' && !self.countdownOpen ){
self._stopTimer();
self.countdownOpen = true;
self._idle();
}
});
// bind continue link
this.resume.bind("click", function(e){
e.preventDefault();
win.clearInterval(self.countdown); // stop the countdown
self.countdownOpen = false; // stop countdown
self._startTimer(); // start up the timer again
self._keepAlive( false ); // ping server
options.onResume.call( self.warning ); // call the resume callback
});
},
_idle: function(){
var self = this,
options = this.options,
warning = this.warning[0],
counter = options.warningLength;
// fire the onIdle function
options.onIdle.call(warning);
// set inital value in the countdown placeholder
options.onCountdown.call(warning, counter);
// create a timer that runs every second
this.countdown = win.setInterval(function(){
if(--counter === 0){
window.clearInterval(self.countdown);
options.onTimeout.call(warning);
} else {
options.onCountdown.call(warning, counter);
document.title = options.titleMessage.replace('%s', counter) + self.title;
}
}, 1000);
},
_startTimer: function(){
var self = this;
this.timer = win.setTimeout(function(){
self._keepAlive();
}, this.options.pollingInterval * 1000);
},
_stopTimer: function(){
// reset the failed requests counter
this.failedRequests = this.options.failedRequests;
win.clearTimeout(this.timer);
},
_keepAlive: function( recurse ){
var self = this,
options = this.options;
//Reset the title to what it was.
document.title = self.title;
// assume a startTimer/keepAlive loop unless told otherwise
if( typeof recurse === "undefined" ){
recurse = true;
}
// if too many requests failed, abort
if( !this.failedRequests ){
this._stopTimer();
options.onAbort.call( this.warning[0] );
return;
}
$.ajax({
timeout: options.AJAXTimeout,
url: options.keepAliveURL,
error: function(){
self.failedRequests--;
},
success: function(response){
if($.trim(response) !== options.serverResponseEquals){
self.failedRequests--;
}
},
complete: function(){
if( recurse ){
self._startTimer();
}
}
});
}
};
// expose
$.idleTimeout = function(element, resume, options){
idleTimeout.init( element, resume, $.extend($.idleTimeout.options, options) );
return this;
};
// options
$.idleTimeout.options = {
// number of seconds after user is idle to show the warning
warningLength: 30,
// url to call to keep the session alive while the user is active
keepAliveURL: "",
// the response from keepAliveURL must equal this text:
serverResponseEquals: "OK",
// user is considered idle after this many seconds. 10 minutes default
idleAfter: 600,
// a polling request will be sent to the server every X seconds
pollingInterval: 60,
// number of failed polling requests until we abort this script
failedRequests: 5,
// the $.ajax timeout in MILLISECONDS!
AJAXTimeout: 250,
// %s will be replaced by the counter value
titleMessage: 'Warning: %s seconds until log out | ',
/*
Callbacks
"this" refers to the element found by the first selector passed to $.idleTimeout.
*/
// callback to fire when the session times out
onTimeout: $.noop,
// fires when the user becomes idle
onIdle: $.noop,
// fires during each second of warningLength
onCountdown: $.noop,
// fires when the user resumes the session
onResume: $.noop,
// callback to fire when the script is aborted due to too many failed requests
onAbort: $.noop
};
})(jQuery, window); |
233zzh/TitanDataOperationSystem | 19,415 | 代码/web代码/titanApp/src/main/resources/static/src/assets/extra-libs/flot.tooltip/js/excanvas.min.js | if(!document.createElement("canvas").getContext){(function(){var z=Math;var K=z.round;var J=z.sin;var U=z.cos;var b=z.abs;var k=z.sqrt;var D=10;var F=D/2;function T(){return this.context_||(this.context_=new W(this))}var O=Array.prototype.slice;function G(i,j,m){var Z=O.call(arguments,2);return function(){return i.apply(j,Z.concat(O.call(arguments)))}}function AD(Z){return String(Z).replace(/&/g,"&").replace(/"/g,""")}function r(i){if(!i.namespaces.g_vml_){i.namespaces.add("g_vml_","urn:schemas-microsoft-com:vml","#default#VML")}if(!i.namespaces.g_o_){i.namespaces.add("g_o_","urn:schemas-microsoft-com:office:office","#default#VML")}if(!i.styleSheets.ex_canvas_){var Z=i.createStyleSheet();Z.owningElement.id="ex_canvas_";Z.cssText="canvas{display:inline-block;overflow:hidden;text-align:left;width:300px;height:150px}"}}r(document);var E={init:function(Z){if(/MSIE/.test(navigator.userAgent)&&!window.opera){var i=Z||document;i.createElement("canvas");i.attachEvent("onreadystatechange",G(this.init_,this,i))}},init_:function(m){var j=m.getElementsByTagName("canvas");for(var Z=0;Z<j.length;Z++){this.initElement(j[Z])}},initElement:function(i){if(!i.getContext){i.getContext=T;r(i.ownerDocument);i.innerHTML="";i.attachEvent("onpropertychange",S);i.attachEvent("onresize",w);var Z=i.attributes;if(Z.width&&Z.width.specified){i.style.width=Z.width.nodeValue+"px"}else{i.width=i.clientWidth}if(Z.height&&Z.height.specified){i.style.height=Z.height.nodeValue+"px"}else{i.height=i.clientHeight}}return i}};function S(i){var Z=i.srcElement;switch(i.propertyName){case"width":Z.getContext().clearRect();Z.style.width=Z.attributes.width.nodeValue+"px";Z.firstChild.style.width=Z.clientWidth+"px";break;case"height":Z.getContext().clearRect();Z.style.height=Z.attributes.height.nodeValue+"px";Z.firstChild.style.height=Z.clientHeight+"px";break}}function w(i){var Z=i.srcElement;if(Z.firstChild){Z.firstChild.style.width=Z.clientWidth+"px";Z.firstChild.style.height=Z.clientHeight+"px"}}E.init();var I=[];for(var AC=0;AC<16;AC++){for(var AB=0;AB<16;AB++){I[AC*16+AB]=AC.toString(16)+AB.toString(16)}}function V(){return[[1,0,0],[0,1,0],[0,0,1]]}function d(m,j){var i=V();for(var Z=0;Z<3;Z++){for(var AF=0;AF<3;AF++){var p=0;for(var AE=0;AE<3;AE++){p+=m[Z][AE]*j[AE][AF]}i[Z][AF]=p}}return i}function Q(i,Z){Z.fillStyle=i.fillStyle;Z.lineCap=i.lineCap;Z.lineJoin=i.lineJoin;Z.lineWidth=i.lineWidth;Z.miterLimit=i.miterLimit;Z.shadowBlur=i.shadowBlur;Z.shadowColor=i.shadowColor;Z.shadowOffsetX=i.shadowOffsetX;Z.shadowOffsetY=i.shadowOffsetY;Z.strokeStyle=i.strokeStyle;Z.globalAlpha=i.globalAlpha;Z.font=i.font;Z.textAlign=i.textAlign;Z.textBaseline=i.textBaseline;Z.arcScaleX_=i.arcScaleX_;Z.arcScaleY_=i.arcScaleY_;Z.lineScale_=i.lineScale_}var B={aliceblue:"#F0F8FF",antiquewhite:"#FAEBD7",aquamarine:"#7FFFD4",azure:"#F0FFFF",beige:"#F5F5DC",bisque:"#FFE4C4",black:"#000000",blanchedalmond:"#FFEBCD",blueviolet:"#8A2BE2",brown:"#A52A2A",burlywood:"#DEB887",cadetblue:"#5F9EA0",chartreuse:"#7FFF00",chocolate:"#D2691E",coral:"#FF7F50",cornflowerblue:"#6495ED",cornsilk:"#FFF8DC",crimson:"#DC143C",cyan:"#00FFFF",darkblue:"#00008B",darkcyan:"#008B8B",darkgoldenrod:"#B8860B",darkgray:"#A9A9A9",darkgreen:"#006400",darkgrey:"#A9A9A9",darkkhaki:"#BDB76B",darkmagenta:"#8B008B",darkolivegreen:"#556B2F",darkorange:"#FF8C00",darkorchid:"#9932CC",darkred:"#8B0000",darksalmon:"#E9967A",darkseagreen:"#8FBC8F",darkslateblue:"#483D8B",darkslategray:"#2F4F4F",darkslategrey:"#2F4F4F",darkturquoise:"#00CED1",darkviolet:"#9400D3",deeppink:"#FF1493",deepskyblue:"#00BFFF",dimgray:"#696969",dimgrey:"#696969",dodgerblue:"#1E90FF",firebrick:"#B22222",floralwhite:"#FFFAF0",forestgreen:"#228B22",gainsboro:"#DCDCDC",ghostwhite:"#F8F8FF",gold:"#FFD700",goldenrod:"#DAA520",grey:"#808080",greenyellow:"#ADFF2F",honeydew:"#F0FFF0",hotpink:"#FF69B4",indianred:"#CD5C5C",indigo:"#4B0082",ivory:"#FFFFF0",khaki:"#F0E68C",lavender:"#E6E6FA",lavenderblush:"#FFF0F5",lawngreen:"#7CFC00",lemonchiffon:"#FFFACD",lightblue:"#ADD8E6",lightcoral:"#F08080",lightcyan:"#E0FFFF",lightgoldenrodyellow:"#FAFAD2",lightgreen:"#90EE90",lightgrey:"#D3D3D3",lightpink:"#FFB6C1",lightsalmon:"#FFA07A",lightseagreen:"#20B2AA",lightskyblue:"#87CEFA",lightslategray:"#778899",lightslategrey:"#778899",lightsteelblue:"#B0C4DE",lightyellow:"#FFFFE0",limegreen:"#32CD32",linen:"#FAF0E6",magenta:"#FF00FF",mediumaquamarine:"#66CDAA",mediumblue:"#0000CD",mediumorchid:"#BA55D3",mediumpurple:"#9370DB",mediumseagreen:"#3CB371",mediumslateblue:"#7B68EE",mediumspringgreen:"#00FA9A",mediumturquoise:"#48D1CC",mediumvioletred:"#C71585",midnightblue:"#191970",mintcream:"#F5FFFA",mistyrose:"#FFE4E1",moccasin:"#FFE4B5",navajowhite:"#FFDEAD",oldlace:"#FDF5E6",olivedrab:"#6B8E23",orange:"#FFA500",orangered:"#FF4500",orchid:"#DA70D6",palegoldenrod:"#EEE8AA",palegreen:"#98FB98",paleturquoise:"#AFEEEE",palevioletred:"#DB7093",papayawhip:"#FFEFD5",peachpuff:"#FFDAB9",peru:"#CD853F",pink:"#FFC0CB",plum:"#DDA0DD",powderblue:"#B0E0E6",rosybrown:"#BC8F8F",royalblue:"#4169E1",saddlebrown:"#8B4513",salmon:"#FA8072",sandybrown:"#F4A460",seagreen:"#2E8B57",seashell:"#FFF5EE",sienna:"#A0522D",skyblue:"#87CEEB",slateblue:"#6A5ACD",slategray:"#708090",slategrey:"#708090",snow:"#FFFAFA",springgreen:"#00FF7F",steelblue:"#4682B4",tan:"#D2B48C",thistle:"#D8BFD8",tomato:"#FF6347",turquoise:"#40E0D0",violet:"#EE82EE",wheat:"#F5DEB3",whitesmoke:"#F5F5F5",yellowgreen:"#9ACD32"};function g(i){var m=i.indexOf("(",3);var Z=i.indexOf(")",m+1);var j=i.substring(m+1,Z).split(",");if(j.length==4&&i.substr(3,1)=="a"){alpha=Number(j[3])}else{j[3]=1}return j}function C(Z){return parseFloat(Z)/100}function N(i,j,Z){return Math.min(Z,Math.max(j,i))}function c(AF){var j,i,Z;h=parseFloat(AF[0])/360%360;if(h<0){h++}s=N(C(AF[1]),0,1);l=N(C(AF[2]),0,1);if(s==0){j=i=Z=l}else{var m=l<0.5?l*(1+s):l+s-l*s;var AE=2*l-m;j=A(AE,m,h+1/3);i=A(AE,m,h);Z=A(AE,m,h-1/3)}return"#"+I[Math.floor(j*255)]+I[Math.floor(i*255)]+I[Math.floor(Z*255)]}function A(i,Z,j){if(j<0){j++}if(j>1){j--}if(6*j<1){return i+(Z-i)*6*j}else{if(2*j<1){return Z}else{if(3*j<2){return i+(Z-i)*(2/3-j)*6}else{return i}}}}function Y(Z){var AE,p=1;Z=String(Z);if(Z.charAt(0)=="#"){AE=Z}else{if(/^rgb/.test(Z)){var m=g(Z);var AE="#",AF;for(var j=0;j<3;j++){if(m[j].indexOf("%")!=-1){AF=Math.floor(C(m[j])*255)}else{AF=Number(m[j])}AE+=I[N(AF,0,255)]}p=m[3]}else{if(/^hsl/.test(Z)){var m=g(Z);AE=c(m);p=m[3]}else{AE=B[Z]||Z}}}return{color:AE,alpha:p}}var L={style:"normal",variant:"normal",weight:"normal",size:10,family:"sans-serif"};var f={};function X(Z){if(f[Z]){return f[Z]}var m=document.createElement("div");var j=m.style;try{j.font=Z}catch(i){}return f[Z]={style:j.fontStyle||L.style,variant:j.fontVariant||L.variant,weight:j.fontWeight||L.weight,size:j.fontSize||L.size,family:j.fontFamily||L.family}}function P(j,i){var Z={};for(var AF in j){Z[AF]=j[AF]}var AE=parseFloat(i.currentStyle.fontSize),m=parseFloat(j.size);if(typeof j.size=="number"){Z.size=j.size}else{if(j.size.indexOf("px")!=-1){Z.size=m}else{if(j.size.indexOf("em")!=-1){Z.size=AE*m}else{if(j.size.indexOf("%")!=-1){Z.size=(AE/100)*m}else{if(j.size.indexOf("pt")!=-1){Z.size=m/0.75}else{Z.size=AE}}}}}Z.size*=0.981;return Z}function AA(Z){return Z.style+" "+Z.variant+" "+Z.weight+" "+Z.size+"px "+Z.family}function t(Z){switch(Z){case"butt":return"flat";case"round":return"round";case"square":default:return"square"}}function W(i){this.m_=V();this.mStack_=[];this.aStack_=[];this.currentPath_=[];this.strokeStyle="#000";this.fillStyle="#000";this.lineWidth=1;this.lineJoin="miter";this.lineCap="butt";this.miterLimit=D*1;this.globalAlpha=1;this.font="10px sans-serif";this.textAlign="left";this.textBaseline="alphabetic";this.canvas=i;var Z=i.ownerDocument.createElement("div");Z.style.width=i.clientWidth+"px";Z.style.height=i.clientHeight+"px";Z.style.overflow="hidden";Z.style.position="absolute";i.appendChild(Z);this.element_=Z;this.arcScaleX_=1;this.arcScaleY_=1;this.lineScale_=1}var M=W.prototype;M.clearRect=function(){if(this.textMeasureEl_){this.textMeasureEl_.removeNode(true);this.textMeasureEl_=null}this.element_.innerHTML=""};M.beginPath=function(){this.currentPath_=[]};M.moveTo=function(i,Z){var j=this.getCoords_(i,Z);this.currentPath_.push({type:"moveTo",x:j.x,y:j.y});this.currentX_=j.x;this.currentY_=j.y};M.lineTo=function(i,Z){var j=this.getCoords_(i,Z);this.currentPath_.push({type:"lineTo",x:j.x,y:j.y});this.currentX_=j.x;this.currentY_=j.y};M.bezierCurveTo=function(j,i,AI,AH,AG,AE){var Z=this.getCoords_(AG,AE);var AF=this.getCoords_(j,i);var m=this.getCoords_(AI,AH);e(this,AF,m,Z)};function e(Z,m,j,i){Z.currentPath_.push({type:"bezierCurveTo",cp1x:m.x,cp1y:m.y,cp2x:j.x,cp2y:j.y,x:i.x,y:i.y});Z.currentX_=i.x;Z.currentY_=i.y}M.quadraticCurveTo=function(AG,j,i,Z){var AF=this.getCoords_(AG,j);var AE=this.getCoords_(i,Z);var AH={x:this.currentX_+2/3*(AF.x-this.currentX_),y:this.currentY_+2/3*(AF.y-this.currentY_)};var m={x:AH.x+(AE.x-this.currentX_)/3,y:AH.y+(AE.y-this.currentY_)/3};e(this,AH,m,AE)};M.arc=function(AJ,AH,AI,AE,i,j){AI*=D;var AN=j?"at":"wa";var AK=AJ+U(AE)*AI-F;var AM=AH+J(AE)*AI-F;var Z=AJ+U(i)*AI-F;var AL=AH+J(i)*AI-F;if(AK==Z&&!j){AK+=0.125}var m=this.getCoords_(AJ,AH);var AG=this.getCoords_(AK,AM);var AF=this.getCoords_(Z,AL);this.currentPath_.push({type:AN,x:m.x,y:m.y,radius:AI,xStart:AG.x,yStart:AG.y,xEnd:AF.x,yEnd:AF.y})};M.rect=function(j,i,Z,m){this.moveTo(j,i);this.lineTo(j+Z,i);this.lineTo(j+Z,i+m);this.lineTo(j,i+m);this.closePath()};M.strokeRect=function(j,i,Z,m){var p=this.currentPath_;this.beginPath();this.moveTo(j,i);this.lineTo(j+Z,i);this.lineTo(j+Z,i+m);this.lineTo(j,i+m);this.closePath();this.stroke();this.currentPath_=p};M.fillRect=function(j,i,Z,m){var p=this.currentPath_;this.beginPath();this.moveTo(j,i);this.lineTo(j+Z,i);this.lineTo(j+Z,i+m);this.lineTo(j,i+m);this.closePath();this.fill();this.currentPath_=p};M.createLinearGradient=function(i,m,Z,j){var p=new v("gradient");p.x0_=i;p.y0_=m;p.x1_=Z;p.y1_=j;return p};M.createRadialGradient=function(m,AE,j,i,p,Z){var AF=new v("gradientradial");AF.x0_=m;AF.y0_=AE;AF.r0_=j;AF.x1_=i;AF.y1_=p;AF.r1_=Z;return AF};M.drawImage=function(AO,j){var AH,AF,AJ,AV,AM,AK,AQ,AX;var AI=AO.runtimeStyle.width;var AN=AO.runtimeStyle.height;AO.runtimeStyle.width="auto";AO.runtimeStyle.height="auto";var AG=AO.width;var AT=AO.height;AO.runtimeStyle.width=AI;AO.runtimeStyle.height=AN;if(arguments.length==3){AH=arguments[1];AF=arguments[2];AM=AK=0;AQ=AJ=AG;AX=AV=AT}else{if(arguments.length==5){AH=arguments[1];AF=arguments[2];AJ=arguments[3];AV=arguments[4];AM=AK=0;AQ=AG;AX=AT}else{if(arguments.length==9){AM=arguments[1];AK=arguments[2];AQ=arguments[3];AX=arguments[4];AH=arguments[5];AF=arguments[6];AJ=arguments[7];AV=arguments[8]}else{throw Error("Invalid number of arguments")}}}var AW=this.getCoords_(AH,AF);var m=AQ/2;var i=AX/2;var AU=[];var Z=10;var AE=10;AU.push(" <g_vml_:group",' coordsize="',D*Z,",",D*AE,'"',' coordorigin="0,0"',' style="width:',Z,"px;height:",AE,"px;position:absolute;");if(this.m_[0][0]!=1||this.m_[0][1]||this.m_[1][1]!=1||this.m_[1][0]){var p=[];p.push("M11=",this.m_[0][0],",","M12=",this.m_[1][0],",","M21=",this.m_[0][1],",","M22=",this.m_[1][1],",","Dx=",K(AW.x/D),",","Dy=",K(AW.y/D),"");var AS=AW;var AR=this.getCoords_(AH+AJ,AF);var AP=this.getCoords_(AH,AF+AV);var AL=this.getCoords_(AH+AJ,AF+AV);AS.x=z.max(AS.x,AR.x,AP.x,AL.x);AS.y=z.max(AS.y,AR.y,AP.y,AL.y);AU.push("padding:0 ",K(AS.x/D),"px ",K(AS.y/D),"px 0;filter:progid:DXImageTransform.Microsoft.Matrix(",p.join(""),", sizingmethod='clip');")}else{AU.push("top:",K(AW.y/D),"px;left:",K(AW.x/D),"px;")}AU.push(' ">','<g_vml_:image src="',AO.src,'"',' style="width:',D*AJ,"px;"," height:",D*AV,'px"',' cropleft="',AM/AG,'"',' croptop="',AK/AT,'"',' cropright="',(AG-AM-AQ)/AG,'"',' cropbottom="',(AT-AK-AX)/AT,'"'," />","</g_vml_:group>");this.element_.insertAdjacentHTML("BeforeEnd",AU.join(""))};M.stroke=function(AM){var m=10;var AN=10;var AE=5000;var AG={x:null,y:null};var AL={x:null,y:null};for(var AH=0;AH<this.currentPath_.length;AH+=AE){var AK=[];var AF=false;AK.push("<g_vml_:shape",' filled="',!!AM,'"',' style="position:absolute;width:',m,"px;height:",AN,'px;"',' coordorigin="0,0"',' coordsize="',D*m,",",D*AN,'"',' stroked="',!AM,'"',' path="');var AO=false;for(var AI=AH;AI<Math.min(AH+AE,this.currentPath_.length);AI++){if(AI%AE==0&&AI>0){AK.push(" m ",K(this.currentPath_[AI-1].x),",",K(this.currentPath_[AI-1].y))}var Z=this.currentPath_[AI];var AJ;switch(Z.type){case"moveTo":AJ=Z;AK.push(" m ",K(Z.x),",",K(Z.y));break;case"lineTo":AK.push(" l ",K(Z.x),",",K(Z.y));break;case"close":AK.push(" x ");Z=null;break;case"bezierCurveTo":AK.push(" c ",K(Z.cp1x),",",K(Z.cp1y),",",K(Z.cp2x),",",K(Z.cp2y),",",K(Z.x),",",K(Z.y));break;case"at":case"wa":AK.push(" ",Z.type," ",K(Z.x-this.arcScaleX_*Z.radius),",",K(Z.y-this.arcScaleY_*Z.radius)," ",K(Z.x+this.arcScaleX_*Z.radius),",",K(Z.y+this.arcScaleY_*Z.radius)," ",K(Z.xStart),",",K(Z.yStart)," ",K(Z.xEnd),",",K(Z.yEnd));break}if(Z){if(AG.x==null||Z.x<AG.x){AG.x=Z.x}if(AL.x==null||Z.x>AL.x){AL.x=Z.x}if(AG.y==null||Z.y<AG.y){AG.y=Z.y}if(AL.y==null||Z.y>AL.y){AL.y=Z.y}}}AK.push(' ">');if(!AM){R(this,AK)}else{a(this,AK,AG,AL)}AK.push("</g_vml_:shape>");this.element_.insertAdjacentHTML("beforeEnd",AK.join(""))}};function R(j,AE){var i=Y(j.strokeStyle);var m=i.color;var p=i.alpha*j.globalAlpha;var Z=j.lineScale_*j.lineWidth;if(Z<1){p*=Z}AE.push("<g_vml_:stroke",' opacity="',p,'"',' joinstyle="',j.lineJoin,'"',' miterlimit="',j.miterLimit,'"',' endcap="',t(j.lineCap),'"',' weight="',Z,'px"',' color="',m,'" />')}function a(AO,AG,Ah,AP){var AH=AO.fillStyle;var AY=AO.arcScaleX_;var AX=AO.arcScaleY_;var Z=AP.x-Ah.x;var m=AP.y-Ah.y;if(AH instanceof v){var AL=0;var Ac={x:0,y:0};var AU=0;var AK=1;if(AH.type_=="gradient"){var AJ=AH.x0_/AY;var j=AH.y0_/AX;var AI=AH.x1_/AY;var Aj=AH.y1_/AX;var Ag=AO.getCoords_(AJ,j);var Af=AO.getCoords_(AI,Aj);var AE=Af.x-Ag.x;var p=Af.y-Ag.y;AL=Math.atan2(AE,p)*180/Math.PI;if(AL<0){AL+=360}if(AL<0.000001){AL=0}}else{var Ag=AO.getCoords_(AH.x0_,AH.y0_);Ac={x:(Ag.x-Ah.x)/Z,y:(Ag.y-Ah.y)/m};Z/=AY*D;m/=AX*D;var Aa=z.max(Z,m);AU=2*AH.r0_/Aa;AK=2*AH.r1_/Aa-AU}var AS=AH.colors_;AS.sort(function(Ak,i){return Ak.offset-i.offset});var AN=AS.length;var AR=AS[0].color;var AQ=AS[AN-1].color;var AW=AS[0].alpha*AO.globalAlpha;var AV=AS[AN-1].alpha*AO.globalAlpha;var Ab=[];for(var Ae=0;Ae<AN;Ae++){var AM=AS[Ae];Ab.push(AM.offset*AK+AU+" "+AM.color)}AG.push('<g_vml_:fill type="',AH.type_,'"',' method="none" focus="100%"',' color="',AR,'"',' color2="',AQ,'"',' colors="',Ab.join(","),'"',' opacity="',AV,'"',' g_o_:opacity2="',AW,'"',' angle="',AL,'"',' focusposition="',Ac.x,",",Ac.y,'" />')}else{if(AH instanceof u){if(Z&&m){var AF=-Ah.x;var AZ=-Ah.y;AG.push("<g_vml_:fill",' position="',AF/Z*AY*AY,",",AZ/m*AX*AX,'"',' type="tile"',' src="',AH.src_,'" />')}}else{var Ai=Y(AO.fillStyle);var AT=Ai.color;var Ad=Ai.alpha*AO.globalAlpha;AG.push('<g_vml_:fill color="',AT,'" opacity="',Ad,'" />')}}}M.fill=function(){this.stroke(true)};M.closePath=function(){this.currentPath_.push({type:"close"})};M.getCoords_=function(j,i){var Z=this.m_;return{x:D*(j*Z[0][0]+i*Z[1][0]+Z[2][0])-F,y:D*(j*Z[0][1]+i*Z[1][1]+Z[2][1])-F}};M.save=function(){var Z={};Q(this,Z);this.aStack_.push(Z);this.mStack_.push(this.m_);this.m_=d(V(),this.m_)};M.restore=function(){if(this.aStack_.length){Q(this.aStack_.pop(),this);this.m_=this.mStack_.pop()}};function H(Z){return isFinite(Z[0][0])&&isFinite(Z[0][1])&&isFinite(Z[1][0])&&isFinite(Z[1][1])&&isFinite(Z[2][0])&&isFinite(Z[2][1])}function y(i,Z,j){if(!H(Z)){return }i.m_=Z;if(j){var p=Z[0][0]*Z[1][1]-Z[0][1]*Z[1][0];i.lineScale_=k(b(p))}}M.translate=function(j,i){var Z=[[1,0,0],[0,1,0],[j,i,1]];y(this,d(Z,this.m_),false)};M.rotate=function(i){var m=U(i);var j=J(i);var Z=[[m,j,0],[-j,m,0],[0,0,1]];y(this,d(Z,this.m_),false)};M.scale=function(j,i){this.arcScaleX_*=j;this.arcScaleY_*=i;var Z=[[j,0,0],[0,i,0],[0,0,1]];y(this,d(Z,this.m_),true)};M.transform=function(p,m,AF,AE,i,Z){var j=[[p,m,0],[AF,AE,0],[i,Z,1]];y(this,d(j,this.m_),true)};M.setTransform=function(AE,p,AG,AF,j,i){var Z=[[AE,p,0],[AG,AF,0],[j,i,1]];y(this,Z,true)};M.drawText_=function(AK,AI,AH,AN,AG){var AM=this.m_,AQ=1000,i=0,AP=AQ,AF={x:0,y:0},AE=[];var Z=P(X(this.font),this.element_);var j=AA(Z);var AR=this.element_.currentStyle;var p=this.textAlign.toLowerCase();switch(p){case"left":case"center":case"right":break;case"end":p=AR.direction=="ltr"?"right":"left";break;case"start":p=AR.direction=="rtl"?"right":"left";break;default:p="left"}switch(this.textBaseline){case"hanging":case"top":AF.y=Z.size/1.75;break;case"middle":break;default:case null:case"alphabetic":case"ideographic":case"bottom":AF.y=-Z.size/2.25;break}switch(p){case"right":i=AQ;AP=0.05;break;case"center":i=AP=AQ/2;break}var AO=this.getCoords_(AI+AF.x,AH+AF.y);AE.push('<g_vml_:line from="',-i,' 0" to="',AP,' 0.05" ',' coordsize="100 100" coordorigin="0 0"',' filled="',!AG,'" stroked="',!!AG,'" style="position:absolute;width:1px;height:1px;">');if(AG){R(this,AE)}else{a(this,AE,{x:-i,y:0},{x:AP,y:Z.size})}var AL=AM[0][0].toFixed(3)+","+AM[1][0].toFixed(3)+","+AM[0][1].toFixed(3)+","+AM[1][1].toFixed(3)+",0,0";var AJ=K(AO.x/D)+","+K(AO.y/D);AE.push('<g_vml_:skew on="t" matrix="',AL,'" ',' offset="',AJ,'" origin="',i,' 0" />','<g_vml_:path textpathok="true" />','<g_vml_:textpath on="true" string="',AD(AK),'" style="v-text-align:',p,";font:",AD(j),'" /></g_vml_:line>');this.element_.insertAdjacentHTML("beforeEnd",AE.join(""))};M.fillText=function(j,Z,m,i){this.drawText_(j,Z,m,i,false)};M.strokeText=function(j,Z,m,i){this.drawText_(j,Z,m,i,true)};M.measureText=function(j){if(!this.textMeasureEl_){var Z='<span style="position:absolute;top:-20000px;left:0;padding:0;margin:0;border:none;white-space:pre;"></span>';this.element_.insertAdjacentHTML("beforeEnd",Z);this.textMeasureEl_=this.element_.lastChild}var i=this.element_.ownerDocument;this.textMeasureEl_.innerHTML="";this.textMeasureEl_.style.font=this.font;this.textMeasureEl_.appendChild(i.createTextNode(j));return{width:this.textMeasureEl_.offsetWidth}};M.clip=function(){};M.arcTo=function(){};M.createPattern=function(i,Z){return new u(i,Z)};function v(Z){this.type_=Z;this.x0_=0;this.y0_=0;this.r0_=0;this.x1_=0;this.y1_=0;this.r1_=0;this.colors_=[]}v.prototype.addColorStop=function(i,Z){Z=Y(Z);this.colors_.push({offset:i,color:Z.color,alpha:Z.alpha})};function u(i,Z){q(i);switch(Z){case"repeat":case null:case"":this.repetition_="repeat";break;case"repeat-x":case"repeat-y":case"no-repeat":this.repetition_=Z;break;default:n("SYNTAX_ERR")}this.src_=i.src;this.width_=i.width;this.height_=i.height}function n(Z){throw new o(Z)}function q(Z){if(!Z||Z.nodeType!=1||Z.tagName!="IMG"){n("TYPE_MISMATCH_ERR")}if(Z.readyState!="complete"){n("INVALID_STATE_ERR")}}function o(Z){this.code=this[Z];this.message=Z+": DOM Exception "+this.code}var x=o.prototype=new Error;x.INDEX_SIZE_ERR=1;x.DOMSTRING_SIZE_ERR=2;x.HIERARCHY_REQUEST_ERR=3;x.WRONG_DOCUMENT_ERR=4;x.INVALID_CHARACTER_ERR=5;x.NO_DATA_ALLOWED_ERR=6;x.NO_MODIFICATION_ALLOWED_ERR=7;x.NOT_FOUND_ERR=8;x.NOT_SUPPORTED_ERR=9;x.INUSE_ATTRIBUTE_ERR=10;x.INVALID_STATE_ERR=11;x.SYNTAX_ERR=12;x.INVALID_MODIFICATION_ERR=13;x.NAMESPACE_ERR=14;x.INVALID_ACCESS_ERR=15;x.VALIDATION_ERR=16;x.TYPE_MISMATCH_ERR=17;G_vmlCanvasManager=E;CanvasRenderingContext2D=W;CanvasGradient=v;CanvasPattern=u;DOMException=o})()}; |
27182812/ChatGLM-LLaMA-chinese-insturct | 177,290 | src/transformers/generation/tf_utils.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import inspect
import warnings
from dataclasses import dataclass
from typing import Any, Dict, Optional, Tuple, Union
import numpy as np
import tensorflow as tf
from tensorflow.compiler.tf2xla.python.xla import dynamic_update_slice
from ..modeling_tf_outputs import TFCausalLMOutputWithPast, TFSeq2SeqLMOutput
from ..models.auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING,
TF_MODEL_FOR_VISION_2_SEQ_MAPPING,
)
from ..tf_utils import shape_list, stable_softmax
from ..utils import ModelOutput, logging
from .configuration_utils import GenerationConfig
from .tf_logits_process import (
TFForcedBOSTokenLogitsProcessor,
TFForcedEOSTokenLogitsProcessor,
TFForceTokensLogitsProcessor,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
TFNoBadWordsLogitsProcessor,
TFNoRepeatNGramLogitsProcessor,
TFRepetitionPenaltyLogitsProcessor,
TFSuppressTokensAtBeginLogitsProcessor,
TFSuppressTokensLogitsProcessor,
TFTemperatureLogitsWarper,
TFTopKLogitsWarper,
TFTopPLogitsWarper,
)
logger = logging.get_logger(__name__)
@dataclass
class TFGreedySearchDecoderOnlyOutput(ModelOutput):
"""
Base class for outputs of decoder-only generation models using greedy search.
Args:
sequences (`tf.Tensor` of shape `(batch_size, sequence_length)`):
The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter
if all batches finished early due to the `eos_token_id`.
scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax)
at each generation step. Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each
generated token), with each tensor of shape `(batch_size, config.vocab_size)`.
attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`tf.Tensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`.
hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`tf.Tensor` of shape `(batch_size, generated_length, hidden_size)`.
"""
sequences: tf.Tensor = None
scores: Optional[Tuple[tf.Tensor]] = None
attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None
hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None
@dataclass
class TFGreedySearchEncoderDecoderOutput(ModelOutput):
"""
Base class for outputs of encoder-decoder generation models using greedy search. Hidden states and attention
weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the
encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes)
Args:
sequences (`tf.Tensor` of shape `(batch_size, sequence_length)`):
The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter
if all batches finished early due to the `eos_token_id`.
scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax)
at each generation step. Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each
generated token), with each tensor of shape `(batch_size, config.vocab_size)`.
encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
Tuple of `tf.Tensor` (one for each layer of the decoder) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
encoder_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
`(batch_size, sequence_length, hidden_size)`.
decoder_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`tf.Tensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`.
cross_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`tf.Tensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`.
decoder_hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`tf.Tensor` of shape `(batch_size, generated_length, hidden_size)`.
"""
sequences: tf.Tensor = None
scores: Optional[Tuple[tf.Tensor]] = None
encoder_attentions: Optional[Tuple[tf.Tensor]] = None
encoder_hidden_states: Optional[Tuple[tf.Tensor]] = None
decoder_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None
cross_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None
decoder_hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None
@dataclass
class TFSampleDecoderOnlyOutput(ModelOutput):
"""
Base class for outputs of decoder-only generation models using sampling.
Args:
sequences (`tf.Tensor` of shape `(batch_size*num_return_sequences, sequence_length)`):
The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter
if all batches finished early due to the `eos_token_id`.
scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax)
at each generation step. Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each
generated token), with each tensor of shape `(batch_size*num_return_sequences, config.vocab_size)`.
attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`tf.Tensor` of shape `(num_return_sequences*batch_size, num_heads, generated_length, sequence_length)`.
hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`tf.Tensor` of shape `(num_return_sequences*batch_size, generated_length, hidden_size)`.
"""
sequences: tf.Tensor = None
scores: Optional[Tuple[tf.Tensor]] = None
attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None
hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None
@dataclass
class TFSampleEncoderDecoderOutput(ModelOutput):
"""
Base class for outputs of encoder-decoder generation models using sampling. Hidden states and attention weights of
the decoder (respectively the encoder) can be accessed via the encoder_attentions and the encoder_hidden_states
attributes (respectively the decoder_attentions and the decoder_hidden_states attributes)
Args:
sequences (`tf.Tensor` of shape `(batch_size*num_return_sequences, sequence_length)`):
The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter
if all batches finished early due to the `eos_token_id`.
scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax)
at each generation step. Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each
generated token), with each tensor of shape `(batch_size*num_return_sequences, config.vocab_size)`.
encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
Tuple of `tf.Tensor` (one for each layer of the decoder) of shape `(batch_size*num_return_sequences,
num_heads, sequence_length, sequence_length)`.
encoder_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
`(batch_size*num_return_sequences, sequence_length, hidden_size)`.
decoder_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`tf.Tensor` of shape `(batch_size*num_return_sequences, num_heads, generated_length, sequence_length)`.
cross_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`tf.Tensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`.
decoder_hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`tf.Tensor` of shape `(batch_size*num_return_sequences, generated_length, hidden_size)`.
"""
sequences: tf.Tensor = None
scores: Optional[Tuple[tf.Tensor]] = None
encoder_attentions: Optional[Tuple[tf.Tensor]] = None
encoder_hidden_states: Optional[Tuple[tf.Tensor]] = None
decoder_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None
cross_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None
decoder_hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None
@dataclass
class TFBeamSearchDecoderOnlyOutput(ModelOutput):
"""
Base class for outputs of decoder-only generation models using beam search.
Args:
sequences (`tf.Tensor` of shape `(batch_size*num_return_sequences, sequence_length)`):
The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter
if all batches finished early due to the `eos_token_id`.
sequences_scores (`tf.Tensor` of shape `(batch_size*num_return_sequences)`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
Final beam scores of the generated `sequences`.
scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
Processed beam scores for each vocabulary token at each generation step. Beam scores consisting of log
softmax scores for each vocabulary token and sum of log softmax of previously generated tokens in this
beam. Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each generated token),
with each tensor of shape `(batch_size*num_beams*num_return_sequences, config.vocab_size)`.
beam_indices (`tf.Tensor`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
Beam indices of generated token id at each generation step. `tf.Tensor` of shape
`(batch_size*num_return_sequences, sequence_length)`.
attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`tf.Tensor` of shape `(batch_size*num_beams, num_heads, generated_length, sequence_length)`.
hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`tf.Tensor` of shape `(batch_size*num_beams*num_return_sequences, generated_length, hidden_size)`.
"""
sequences: tf.Tensor = None
sequences_scores: Optional[tf.Tensor] = None
scores: Optional[Tuple[tf.Tensor]] = None
beam_indices: Optional[tf.Tensor] = None
attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None
hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None
@dataclass
class TFBeamSearchEncoderDecoderOutput(ModelOutput):
"""
Base class for outputs of encoder-decoder generation models using beam search. Hidden states and attention weights
of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the encoder_hidden_states
attributes (respectively the decoder_attentions and the decoder_hidden_states attributes)
Args:
sequences (`tf.Tensor` of shape `(batch_size*num_return_sequences, sequence_length)`):
The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter
if all batches finished early due to the `eos_token_id`.
sequences_scores (`tf.Tensor` of shape `(batch_size*num_return_sequences)`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
Final beam scores of the generated `sequences`.
scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
Processed beam scores for each vocabulary token at each generation step. Beam scores consisting of log
softmax scores for each vocabulary token and sum of log softmax of previously generated tokens in this
beam. `Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each generated token),
with each tensor of shape `(batch_size*num_beams, config.vocab_size)`.
beam_indices (`tf.Tensor`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
Beam indices of generated token id at each generation step. `tf.Tensor` of shape
`(batch_size*num_return_sequences, sequence_length)`.
encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
Tuple of `tf.Tensor` (one for each layer of the decoder) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
encoder_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
`(batch_size*num_beams*num_return_sequences, sequence_length, hidden_size)`.
decoder_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`tf.Tensor` of shape `(batch_size*num_beams*num_return_sequences, num_heads, generated_length,
sequence_length)`.
cross_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`tf.Tensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`.
decoder_hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`tf.Tensor` of shape `(batch_size*num_beams*num_return_sequences, generated_length, hidden_size)`.
"""
sequences: tf.Tensor = None
sequences_scores: Optional[tf.Tensor] = None
scores: Optional[Tuple[tf.Tensor]] = None
beam_indices: Optional[tf.Tensor] = None
encoder_attentions: Optional[Tuple[tf.Tensor]] = None
encoder_hidden_states: Optional[Tuple[tf.Tensor]] = None
decoder_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None
cross_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None
decoder_hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None
@dataclass
class TFBeamSampleDecoderOnlyOutput(ModelOutput):
"""
Base class for outputs of decoder-only generation models using beam sample.
Args:
sequences (`tf.Tensor` of shape `(batch_size*num_return_sequences, sequence_length)`):
The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter
if all batches finished early due to the `eos_token_id`.
sequences_scores (`tf.Tensor` of shape `(batch_size * num_return_sequence)`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
Final beam scores of the generated `sequences`.
scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
Processed beam scores for each vocabulary token at each generation step. Beam scores consisting of log
softmax scores for each vocabulary token and sum of log softmax of previously generated tokens in this
beam. Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each generated token),
with each tensor of shape `(batch_size*num_beams*num_return_sequences, config.vocab_size)`.
beam_indices (`tf.Tensor`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
Beam indices of generated token id at each generation step. `tf.Tensor` of shape
`(batch_size*num_return_sequences, sequence_length)`.
attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`tf.Tensor` of shape `(batch_size*num_beams, num_heads, generated_length, sequence_length)`.
hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`tf.Tensor` of shape `(batch_size*num_beams, generated_length, hidden_size)`.
"""
sequences: tf.Tensor = None
sequences_scores: Optional[tf.Tensor] = None
scores: Optional[Tuple[tf.Tensor]] = None
beam_indices: Optional[tf.Tensor] = None
attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None
hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None
@dataclass
class TFBeamSampleEncoderDecoderOutput(ModelOutput):
"""
Base class for outputs of encoder-decoder generation models using beam sampling. Hidden states and attention
weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the
encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes)
Args:
sequences (`tf.Tensor` of shape `(batch_size*num_beams, sequence_length)`):
The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter
if all batches finished early due to the `eos_token_id`.
sequences_scores (`tf.Tensor` of shape `(batch_size * num_return_sequence)`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
Final beam scores of the generated `sequences`.
scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
Processed beam scores for each vocabulary token at each generation step. Beam scores consisting of log
softmax scores for each vocabulary token and sum of log softmax of previously generated tokens in this
beam. Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each generated token),
with each tensor of shape `(batch_size*num_beams, config.vocab_size)`.
beam_indices (`tf.Tensor`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
Beam indices of generated token id at each generation step. `tf.Tensor` of shape
`(batch_size*num_return_sequences, sequence_length)`.
encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
Tuple of `tf.Tensor` (one for each layer of the decoder) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
encoder_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
`(batch_size*num_beams, sequence_length, hidden_size)`.
decoder_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`tf.Tensor` of shape `(batch_size*num_beams, num_heads, generated_length, sequence_length)`.
cross_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`tf.Tensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`.
decoder_hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`tf.Tensor` of shape `(batch_size*num_beams, generated_length, hidden_size)`.
"""
sequences: tf.Tensor = None
sequences_scores: Optional[tf.Tensor] = None
scores: Optional[Tuple[tf.Tensor]] = None
beam_indices: Optional[tf.Tensor] = None
encoder_attentions: Optional[Tuple[tf.Tensor]] = None
encoder_hidden_states: Optional[Tuple[tf.Tensor]] = None
decoder_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None
cross_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None
decoder_hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None
@dataclass
class TFContrastiveSearchDecoderOnlyOutput(ModelOutput):
"""
Base class for outputs of decoder-only generation models using contrastive search.
Args:
sequences (`tf.Tensor` of shape `(batch_size, sequence_length)`):
The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter
if all batches finished early due to the `eos_token_id`.
scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax)
at each generation step. Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each
generated token), with each tensor of shape `(batch_size, config.vocab_size)`.
attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`tf.Tensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`.
hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`tf.Tensor` of shape `(batch_size, generated_length, hidden_size)`.
"""
sequences: tf.Tensor = None
scores: Optional[Tuple[tf.Tensor]] = None
attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None
hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None
@dataclass
class TFContrastiveSearchEncoderDecoderOutput(ModelOutput):
"""
Base class for outputs of encoder-decoder generation models using contrastive search. Hidden states and attention
weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the
encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes)
Args:
sequences (`tf.Tensor` of shape `(batch_size, sequence_length)`):
The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter
if all batches finished early due to the `eos_token_id`.
scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax)
at each generation step. Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each
generated token), with each tensor of shape `(batch_size, config.vocab_size)`.
encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
Tuple of `tf.Tensor` (one for each layer of the decoder) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
encoder_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
`(batch_size, sequence_length, hidden_size)`.
decoder_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`tf.Tensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`.
cross_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`tf.Tensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`.
decoder_hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`tf.Tensor` of shape `(batch_size, generated_length, hidden_size)`.
"""
sequences: tf.Tensor = None
scores: Optional[Tuple[tf.Tensor]] = None
encoder_attentions: Optional[Tuple[tf.Tensor]] = None
encoder_hidden_states: Optional[Tuple[tf.Tensor]] = None
decoder_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None
cross_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None
decoder_hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None
TFGreedySearchOutput = Union[TFGreedySearchEncoderDecoderOutput, TFGreedySearchDecoderOnlyOutput]
TFSampleOutput = Union[TFSampleEncoderDecoderOutput, TFSampleDecoderOnlyOutput]
TFBeamSearchOutput = Union[TFBeamSearchEncoderDecoderOutput, TFBeamSearchDecoderOnlyOutput]
TFBeamSampleOutput = Union[TFBeamSampleEncoderDecoderOutput, TFBeamSampleDecoderOnlyOutput]
TFContrastiveSearchOutput = Union[TFContrastiveSearchEncoderDecoderOutput, TFContrastiveSearchDecoderOnlyOutput]
TFGenerateOutput = Union[
TFGreedySearchOutput, TFSampleOutput, TFBeamSearchOutput, TFBeamSampleOutput, TFContrastiveSearchOutput
]
class TFGenerationMixin:
"""
A class containing all of the functions supporting generation, to be used as a mixin in [`TFPreTrainedModel`].
The class exposes [`~generation.TFGenerationMixin.generate`], which can be used for:
- *greedy decoding* by calling [`~generation.TFGenerationMixin.greedy_search`] if `num_beams=1` and
`do_sample=False`
- *contrastive search* by calling [`~generation.TFGenerationMixin.contrastive_search`] if `penalty_alpha>0` and
`top_k>1`
- *multinomial sampling* by calling [`~generation.TFGenerationMixin.sample`] if `num_beams=1` and
`do_sample=True`
- *beam-search decoding* by calling [`~generation.TFGenerationMixin.beam_search`] if `num_beams>1`
You do not need to call any of the above methods directly. Pass custom parameter values to 'generate' instead. To
learn more about decoding strategies refer to the [text generation strategies guide](./generation_strategies).
"""
_seed_generator = None
@property
def seed_generator(self):
warnings.warn("`seed_generator` is deprecated and will be removed in a future version.", UserWarning)
if self._seed_generator is None:
self._seed_generator = tf.random.Generator.from_non_deterministic_state()
return self._seed_generator
supports_xla_generation = True
def prepare_inputs_for_generation(self, *args, **kwargs):
raise NotImplementedError(
"A model class needs to define a `prepare_inputs_for_generation` method in order to use `generate`."
)
def adjust_logits_during_generation(
self, logits, cur_len, max_length, forced_bos_token_id, forced_eos_token_id, **kwargs
):
"""
Implement in subclasses of [`PreTrainedModel`] for custom behavior to adjust the logits in the generate method.
"""
vocab_size = getattr(self.config, "vocab_size", None)
if vocab_size is None and self.config.is_encoder_decoder:
decoder_config = getattr(self.config, "decoder", None)
if decoder_config is not None:
vocab_size = getattr(self.config.decoder, "vocab_size", None)
if cur_len == 1 and forced_bos_token_id is not None:
vocab_range = tf.constant(range(vocab_size))
return tf.where(vocab_range != forced_bos_token_id, -1e8, logits)
elif cur_len == max_length - 1 and forced_eos_token_id is not None:
vocab_range = tf.constant(range(vocab_size))
return tf.where(vocab_range != forced_eos_token_id, -1e8, logits)
else:
return logits
def compute_transition_scores(
self,
sequences: tf.Tensor,
scores: Tuple[tf.Tensor],
beam_indices: Optional[tf.Tensor] = None,
normalize_logits: bool = False,
) -> tf.Tensor:
"""
Computes the transition scores of sequences given the generation scores (and beam indices, if beam search was
used). This is a convenient method to quicky obtain the scores of the selected tokens at generation time.
Parameters:
sequences (`tf.Tensor`):
The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or
shorter if all batches finished early due to the `eos_token_id`.
scores (`tuple(tf.Tensor)`):
Transition scores for each vocabulary token at each generation step. Beam transition scores consisting
of log probabilities of tokens conditioned on log softmax of previously generated tokens Tuple of
`tf.Tensor` with up to `max_new_tokens` elements (one element for each generated token), with each
tensor of shape `(batch_size*num_beams, config.vocab_size)`.
beam_indices (`tf.Tensor`, *optional*):
Beam indices of generated token id at each generation step. `tf.Tensor` of shape
`(batch_size*num_return_sequences, sequence_length)`. Only required if a `num_beams>1` at
generate-time.
normalize_logits (`bool`, *optional*, defaults to `False`):
Whether to normalize the logits (which, for legacy reasons, may be unnormalized).
Return:
`tf.Tensor`: A `tf.Tensor` of shape `(batch_size*num_return_sequences, sequence_length)` containing
the transition scores (logits)
Examples:
```python
>>> from transformers import GPT2Tokenizer, TFAutoModelForCausalLM
>>> import numpy as np
>>> tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
>>> model = TFAutoModelForCausalLM.from_pretrained("gpt2")
>>> tokenizer.pad_token_id = tokenizer.eos_token_id
>>> inputs = tokenizer(["Today is"], return_tensors="tf")
>>> # Example 1: Print the scores for each token generated with Greedy Search
>>> outputs = model.generate(**inputs, max_new_tokens=5, return_dict_in_generate=True, output_scores=True)
>>> transition_scores = model.compute_transition_scores(
... outputs.sequences, outputs.scores, normalize_logits=True
... )
>>> # input_length is the length of the input prompt for decoder-only models, like the GPT family, and 1 for
>>> # encoder-decoder models, like BART or T5.
>>> input_length = 1 if model.config.is_encoder_decoder else inputs.input_ids.shape[1]
>>> generated_tokens = outputs.sequences[:, input_length:]
>>> for tok, score in zip(generated_tokens[0], transition_scores[0]):
... # | token | token string | logits | probability
... print(f"| {tok:5d} | {tokenizer.decode(tok):8s} | {score.numpy():.3f} | {np.exp(score.numpy()):.2%}")
| 262 | the | -1.413 | 24.33%
| 1110 | day | -2.609 | 7.36%
| 618 | when | -2.009 | 13.41%
| 356 | we | -1.859 | 15.58%
| 460 | can | -2.508 | 8.14%
>>> # Example 2: Reconstruct the sequence scores from Beam Search
>>> outputs = model.generate(
... **inputs,
... max_new_tokens=5,
... num_beams=4,
... num_return_sequences=4,
... return_dict_in_generate=True,
... output_scores=True,
... )
>>> transition_scores = model.compute_transition_scores(
... outputs.sequences, outputs.scores, outputs.beam_indices, normalize_logits=False
... )
>>> # If you sum the generated tokens' scores and apply the length penalty, you'll get the sequence scores.
>>> # Tip: recomputing the scores is only guaranteed to match with `normalize_logits=False`. Depending on the
>>> # use case, you might want to recompute it with `normalize_logits=True`.
>>> output_length = input_length + np.sum(transition_scores.numpy() < 0, axis=1)
>>> length_penalty = model.generation_config.length_penalty
>>> reconstructed_scores = np.sum(transition_scores, axis=1) / (output_length**length_penalty)
>>> print(np.allclose(outputs.sequences_scores, reconstructed_scores))
True
```"""
# 1. In absence of `beam_indices`, we can assume that we come from e.g. greedy search, which is equivalent
# to a beam search approach were the first (and only) beam is always selected
if beam_indices is None:
beam_indices = tf.tile(tf.expand_dims(tf.range(scores[0].shape[0]), axis=1), [1, len(scores)])
# 2. reshape scores as [batch_size, vocab_size, # generation steps] with # generation steps being
# seq_len - input_length
scores = tf.transpose(tf.reshape(tf.stack(scores), (len(scores), -1)), (1, 0))
scores = tf.reshape(scores, (-1, self.config.vocab_size, scores.shape[-1]))
# 3. Optionally normalize the logits (across the vocab dimension)
if normalize_logits:
scores = tf.nn.log_softmax(scores, axis=1)
# 4. cut beam_indices to longest beam length
beam_indices_mask = beam_indices < 0
max_beam_length = tf.math.reduce_max(
tf.math.reduce_sum((1 - tf.cast(beam_indices_mask, dtype=tf.int32)), axis=-1)
)
beam_indices = beam_indices[:, -max_beam_length:]
beam_indices_mask = beam_indices_mask[:, -max_beam_length:]
# 5. Set indices of beams that finished early to 0; such indices will be masked correctly afterwards
beam_indices = tf.where(beam_indices_mask, 0, beam_indices)
# 6. Define which indices contributed to scores
cut_idx = sequences.shape[-1] - max_beam_length
token_indices = sequences[:, cut_idx:]
gen_step_idx = tf.broadcast_to(tf.range(scores.shape[-1]), token_indices.shape)
indices = tf.stack([beam_indices, token_indices, gen_step_idx], axis=-1)
# 7. Compute scores
transition_scores = tf.gather_nd(scores, indices)
# 8. Mask out transition_scores of beams that stopped early
transition_scores = tf.where(beam_indices_mask, 0, transition_scores)
return transition_scores
def _validate_model_class(self):
"""
Confirms that the model class is compatible with generation. If not, raises an exception that points to the
right class to use.
"""
if not self.can_generate():
generate_compatible_mappings = [
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_VISION_2_SEQ_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING,
]
generate_compatible_classes = set()
for model_mapping in generate_compatible_mappings:
supported_models = model_mapping.get(type(self.config), default=None)
if supported_models is not None:
generate_compatible_classes.add(supported_models.__name__)
exception_message = (
f"The current model class ({self.__class__.__name__}) is not compatible with `.generate()`, as "
"it doesn't have a language model head."
)
if generate_compatible_classes:
exception_message += f" Please use one of the following classes instead: {generate_compatible_classes}"
raise TypeError(exception_message)
def _validate_model_kwargs(self, model_kwargs: Dict[str, Any]):
"""Validates model kwargs for generation. Generate argument typos will also be caught here."""
# Excludes arguments that are handled before calling any model function
if self.config.is_encoder_decoder:
for key in ["decoder_input_ids"]:
model_kwargs.pop(key, None)
unused_model_args = []
model_args = set(inspect.signature(self.prepare_inputs_for_generation).parameters)
# `kwargs`/`model_kwargs` is often used to handle optional forward pass inputs like `attention_mask`. If
# `prepare_inputs_for_generation` doesn't accept them, then a stricter check can be made ;)
if "kwargs" in model_args or "model_kwargs" in model_args:
model_args |= set(inspect.signature(self.call).parameters)
for key, value in model_kwargs.items():
if value is not None and key not in model_args:
unused_model_args.append(key)
if unused_model_args:
raise ValueError(
f"The following `model_kwargs` are not used by the model: {unused_model_args} (note: typos in the"
" generate arguments will also show up in this list)"
)
def generate(
self,
inputs: Optional[tf.Tensor] = None,
generation_config: Optional[GenerationConfig] = None,
logits_processor: Optional[TFLogitsProcessorList] = None,
seed=None,
**kwargs,
) -> Union[TFGenerateOutput, tf.Tensor]:
r"""
Generates sequences of token ids for models with a language modeling head.
<Tip warning={true}>
Most generation-controlling parameters are set in `generation_config` which, if not passed, will be set to the
model's default generation configuration. You can override any `generation_config` by passing the corresponding
parameters to generate, e.g. `.generate(inputs, num_beams=4, do_sample=True)`.
For an overview of generation strategies and code examples, check out the [following
guide](./generation_strategies).
</Tip>
Parameters:
inputs (`tf.Tensor` of varying shape depending on the modality, *optional*):
The sequence used as a prompt for the generation or as model inputs to the encoder. If `None` the
method initializes it with `bos_token_id` and a batch size of 1. For decoder-only models `inputs`
should of in the format of `input_ids`. For encoder-decoder models *inputs* can represent any of
`input_ids`, `input_values`, `input_features`, or `pixel_values`.
generation_config (`~generation.GenerationConfig`, *optional*):
The generation configuration to be used as base parametrization for the generation call. `**kwargs`
passed to generate matching the attributes of `generation_config` will override them. If
`generation_config` is not provided, the default will be used, which had the following loading
priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model
configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s
default values, whose documentation should be checked to parameterize generation.
logits_processor (`LogitsProcessorList`, *optional*):
Custom logits processors that complement the default logits processors built from arguments and
generation config. If a logit processor is passed that is already created with the arguments or a
generation config an error is thrown. This feature is intended for advanced users.
seed (`List[int]`, *optional*):
Random seed to control sampling, containing two integers, used when `do_sample` is `True`. See the
`seed` argument from stateless functions in `tf.random`.
kwargs:
Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be
forwarded to the `forward` function of the model. If the model is an encoder-decoder model, encoder
specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder_*.
Return:
[`~utils.ModelOutput`] or `tf.Tensor`: A [`~utils.ModelOutput`] (if `return_dict_in_generate=True` or when
`config.return_dict_in_generate=True`) or a `tf.Tensor`.
If the model is *not* an encoder-decoder model (`model.config.is_encoder_decoder=False`), the possible
[`~utils.ModelOutput`] types are:
- [`~generation.TFGreedySearchDecoderOnlyOutput`],
- [`~generation.TFSampleDecoderOnlyOutput`],
- [`~generation.TFBeamSearchDecoderOnlyOutput`],
- [`~generation.TFBeamSampleDecoderOnlyOutput`]
If the model is an encoder-decoder model (`model.config.is_encoder_decoder=True`), the possible
[`~utils.ModelOutput`] types are:
- [`~generation.TFGreedySearchEncoderDecoderOutput`],
- [`~generation.TFSampleEncoderDecoderOutput`],
- [`~generation.TFBeamSearchEncoderDecoderOutput`],
- [`~generation.TFBeamSampleEncoderDecoderOutput`]
"""
# 1. Handle `generation_config` and kwargs that might update it, and validate the `.generate()` call
self._validate_model_class()
# priority: `generation_config` argument > `model.generation_config` (the default generation config)
if generation_config is None:
# legacy: users may modify the model configuration to control generation -- update the generation config
# model attribute accordingly, if it was created from the model config
if self.generation_config._from_model_config:
new_generation_config = GenerationConfig.from_model_config(self.config)
if new_generation_config != self.generation_config:
warnings.warn(
"You have modified the pretrained model configuration to control generation. This is a"
" deprecated strategy to control generation and will be removed soon, in a future version."
" Please use a generation configuration file (see"
" https://huggingface.co/docs/transformers/main_classes/text_generation)"
)
self.generation_config = new_generation_config
generation_config = self.generation_config
generation_config = copy.deepcopy(generation_config)
model_kwargs = generation_config.update(**kwargs) # All unused kwargs must be model kwargs
generation_config.validate()
self._validate_model_kwargs(model_kwargs.copy())
# 2. Cast input dtypes to tf.int32 unless they're floats (which happens for some image models)
if inputs is not None:
if isinstance(inputs, tf.Tensor) and inputs.dtype.is_floating:
pass
elif isinstance(inputs, np.ndarray) and np.issubdtype(inputs.dtype, np.floating):
pass
else:
inputs = tf.cast(inputs, tf.int32)
if model_kwargs.get("attention_mask") is not None:
model_kwargs["attention_mask"] = tf.cast(model_kwargs["attention_mask"], tf.int32)
if "decoder_input_ids" in model_kwargs:
if (
isinstance(model_kwargs["decoder_input_ids"], tf.Tensor)
and model_kwargs["decoder_input_ids"].dtype.is_floating
):
pass
elif isinstance(model_kwargs["decoder_input_ids"], np.ndarray) and np.issubdtype(
model_kwargs["decoder_input_ids"].dtype, np.floating
):
pass
else:
model_kwargs["decoder_input_ids"] = tf.cast(model_kwargs["decoder_input_ids"], tf.int32)
# 3. Set generation parameters if not already defined
logits_processor = logits_processor if logits_processor is not None else TFLogitsProcessorList()
if generation_config.pad_token_id is None and generation_config.eos_token_id is not None:
if model_kwargs.get("attention_mask") is None:
logger.warning(
"The attention mask and the pad token id were not set. As a consequence, you may observe "
"unexpected behavior. Please pass your input's `attention_mask` to obtain reliable results."
)
eos_token_id = generation_config.eos_token_id
if isinstance(eos_token_id, list):
eos_token_id = eos_token_id[0]
logger.warning(f"Setting `pad_token_id` to `eos_token_id`:{eos_token_id} for open-end generation.")
generation_config.pad_token_id = eos_token_id
use_xla = not tf.executing_eagerly()
if use_xla and not self.supports_xla_generation:
raise ValueError(
"The selected model does not support Graph mode nor XLA generation (e.g. from tf.function())"
)
# 4. Define model inputs
inputs_tensor, model_input_name, model_kwargs = self._prepare_model_inputs(
inputs, generation_config.bos_token_id, model_kwargs
)
# inputs_ids now has to be defined and cannot be None anymore
batch_size = shape_list(inputs_tensor)[0]
# 5. Prepare other model kwargs
model_kwargs["output_attentions"] = generation_config.output_attentions
model_kwargs["output_hidden_states"] = generation_config.output_hidden_states
model_kwargs["use_cache"] = generation_config.use_cache
accepts_attention_mask = "attention_mask" in set(inspect.signature(self.call).parameters.keys())
requires_attention_mask = "encoder_outputs" not in model_kwargs
if model_kwargs.get("attention_mask", None) is None and requires_attention_mask and accepts_attention_mask:
model_kwargs["attention_mask"] = self._prepare_attention_mask_for_generation(
inputs_tensor, generation_config.pad_token_id, generation_config.eos_token_id
)
# decoder-only models should use left-padding for generation
if not self.config.is_encoder_decoder:
if generation_config.pad_token_id is not None and tf.math.reduce_any(
inputs_tensor[:, -1] == generation_config.pad_token_id
):
logger.warning(
"A decoder-only architecture is being used, but right-padding was detected! For correct "
"generation results, please set `padding_side='left'` when initializing the tokenizer."
)
if self.config.is_encoder_decoder and "encoder_outputs" not in model_kwargs:
# if model is encoder decoder encoder_outputs are created and added to `model_kwargs`
model_kwargs = self._prepare_encoder_decoder_kwargs_for_generation(
inputs_tensor, model_kwargs, model_input_name
)
# 6. Prepare model inputs which will be used for auto-regressive generation
if self.config.is_encoder_decoder:
# if encoder-decoder then `input_ids` come from `decoder_start_token_id`
input_ids = self._prepare_decoder_input_ids_for_generation(
batch_size,
decoder_start_token_id=generation_config.decoder_start_token_id,
bos_token_id=generation_config.bos_token_id,
model_kwargs=model_kwargs,
)
else:
input_ids = inputs_tensor if model_input_name == "input_ids" else model_kwargs.pop("input_ids")
# 7. Prepare `max_length` depending on other stopping criteria.
input_ids_seq_length = shape_list(input_ids)[-1]
has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None
if has_default_max_length and generation_config.max_new_tokens is None:
warnings.warn(
f"Using `max_length`'s default ({generation_config.max_length}) to control the generation length. "
"This behaviour is deprecated and will be removed from the config in v5 of Transformers -- we"
" recommend using `max_new_tokens` to control the maximum length of the generation.",
UserWarning,
)
elif generation_config.max_new_tokens is not None:
generation_config.max_length = generation_config.max_new_tokens + input_ids_seq_length
if not has_default_max_length:
logger.warn(
f"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(="
f"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. "
"Please refer to the documentation for more information. "
"(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)",
UserWarning,
)
# If the input length is a tensor (i.e. dynamic length), skip length checks
if not isinstance(input_ids_seq_length, tf.Tensor):
if (
generation_config.min_length is not None
and generation_config.min_length > generation_config.max_length
):
raise ValueError(
f"Unfeasable length constraints: the minimum length ({generation_config.min_length}) is larger"
f" than the maximum length ({generation_config.max_length})"
)
if input_ids_seq_length >= generation_config.max_length:
input_ids_string = "decoder_input_ids" if self.config.is_encoder_decoder else "input_ids"
logger.warning(
f"Input length of {input_ids_string} is {input_ids_seq_length}, but `max_length` is set to"
f" {generation_config.max_length}. This can lead to unexpected behavior. You should consider"
" increasing`max_new_tokens`."
)
# 8. determine generation mode
is_contrastive_search_gen_mode = (
generation_config.top_k is not None
and generation_config.top_k > 1
and generation_config.do_sample is False
and generation_config.penalty_alpha is not None
and generation_config.penalty_alpha > 0
)
is_greedy_gen_mode = (
not is_contrastive_search_gen_mode
and (generation_config.num_beams == 1)
and generation_config.do_sample is False
)
is_beam_gen_mode = (
not is_contrastive_search_gen_mode
and (generation_config.num_beams > 1)
and generation_config.do_sample is False
)
is_sample_gen_mode = (generation_config.num_beams == 1) and generation_config.do_sample is True
is_beam_sample_gen_mode = (generation_config.num_beams > 1) and generation_config.do_sample is True
# 9. prepare distribution pre_processing samplers
logits_processor = self._get_logits_processor(
generation_config=generation_config,
input_ids_seq_length=input_ids_seq_length,
logits_processor=logits_processor,
)
# 10. go into different generation modes
if is_greedy_gen_mode:
if generation_config.num_return_sequences > 1:
raise ValueError(
f"num_return_sequences has to be 1, but is {generation_config.num_return_sequences} when doing"
" greedy search."
)
# 11. run greedy search
return self.greedy_search(
input_ids,
max_length=generation_config.max_length,
pad_token_id=generation_config.pad_token_id,
eos_token_id=generation_config.eos_token_id,
logits_processor=logits_processor,
output_scores=generation_config.output_scores,
return_dict_in_generate=generation_config.return_dict_in_generate,
**model_kwargs,
)
elif is_contrastive_search_gen_mode:
if generation_config.num_return_sequences > 1:
raise ValueError(
f"num_return_sequences has to be 1, but is {generation_config.num_return_sequences} when doing"
" contrastive search."
)
# 11. run contrastive search
return self.contrastive_search(
input_ids,
top_k=generation_config.top_k,
penalty_alpha=generation_config.penalty_alpha,
logits_processor=logits_processor,
max_length=generation_config.max_length,
pad_token_id=generation_config.pad_token_id,
eos_token_id=generation_config.eos_token_id,
output_scores=generation_config.output_scores,
return_dict_in_generate=generation_config.return_dict_in_generate,
**model_kwargs,
)
elif is_sample_gen_mode:
# 11. prepare logits warper
logits_warper = self._get_logits_warper(generation_config=generation_config)
# 12. expand input_ids with `num_return_sequences` additional sequences per batch
input_ids, model_kwargs = self._expand_inputs_for_generation(
input_ids=input_ids,
expand_size=generation_config.num_return_sequences,
is_encoder_decoder=self.config.is_encoder_decoder,
**model_kwargs,
)
# 13. run sample
return self.sample(
input_ids,
logits_processor=logits_processor,
logits_warper=logits_warper,
max_length=generation_config.max_length,
pad_token_id=generation_config.pad_token_id,
eos_token_id=generation_config.eos_token_id,
seed=seed,
output_scores=generation_config.output_scores,
return_dict_in_generate=generation_config.return_dict_in_generate,
**model_kwargs,
)
elif is_beam_gen_mode:
if generation_config.num_beams < generation_config.num_return_sequences:
raise ValueError(
"Beam search decoding cannot return more sequences than it has beams. Please set num_beams >="
f" num_return_sequences, got {generation_config.num_beams} and"
f" {generation_config.num_return_sequences} (respectivelly)"
)
# 11. broadcast inputs to the desired number of beams
input_ids, model_kwargs = self._expand_inputs_for_generation(
input_ids=input_ids,
expand_size=generation_config.num_beams,
is_encoder_decoder=self.config.is_encoder_decoder,
expand_in_new_axis=True,
**model_kwargs,
)
# 12. run beam search
return self.beam_search(
input_ids,
max_length=generation_config.max_length,
pad_token_id=generation_config.pad_token_id,
eos_token_id=generation_config.eos_token_id,
length_penalty=generation_config.length_penalty,
early_stopping=generation_config.early_stopping,
logits_processor=logits_processor,
output_scores=generation_config.output_scores,
return_dict_in_generate=generation_config.return_dict_in_generate,
num_return_sequences=generation_config.num_return_sequences,
**model_kwargs,
)
elif is_beam_sample_gen_mode:
if generation_config.num_beams < generation_config.num_return_sequences:
raise ValueError(
"Beam search decoding cannot return more sequences than it has beams. Please set num_beams >="
f" num_return_sequences, got {generation_config.num_beams} and"
f" {generation_config.num_return_sequences} (respectivelly)"
)
# 11. prepare logits warper
logits_warper = self._get_logits_warper(generation_config=generation_config)
# 12. broadcast inputs to the desired number of beams
input_ids, model_kwargs = self._expand_inputs_for_generation(
input_ids=input_ids,
expand_size=generation_config.num_beams,
is_encoder_decoder=self.config.is_encoder_decoder,
expand_in_new_axis=True,
**model_kwargs,
)
# 13. run beam sample (beam search with sampling)
return self.beam_search(
input_ids,
do_sample=True,
max_length=generation_config.max_length,
pad_token_id=generation_config.pad_token_id,
eos_token_id=generation_config.eos_token_id,
length_penalty=generation_config.length_penalty,
early_stopping=generation_config.early_stopping,
logits_processor=logits_processor,
logits_warper=logits_warper,
output_scores=generation_config.output_scores,
return_dict_in_generate=generation_config.return_dict_in_generate,
num_return_sequences=generation_config.num_return_sequences,
**model_kwargs,
)
def _prepare_attention_mask_for_generation(
self,
inputs: tf.Tensor,
pad_token_id: Optional[int],
eos_token_id: Optional[int],
) -> tf.Tensor:
is_input_ids = len(inputs.shape) == 2 and inputs.dtype in (tf.int32, tf.int64)
is_pad_token_in_inputs = (pad_token_id is not None) and tf.math.reduce_any(inputs == pad_token_id)
is_pad_token_not_equal_to_eos_token_id = (eos_token_id is None) or (pad_token_id != eos_token_id)
# Check if input is input_ids and padded -> only then is attention_mask defined
if is_input_ids and is_pad_token_in_inputs and is_pad_token_not_equal_to_eos_token_id:
return tf.cast(tf.math.not_equal(inputs, pad_token_id), dtype=tf.int32)
else:
return tf.ones(inputs.shape[:2], dtype=tf.int32)
def _prepare_encoder_decoder_kwargs_for_generation(
self, inputs_tensor: tf.Tensor, model_kwargs, model_input_name: Optional[str] = None
) -> Dict[str, Any]:
# 1. get encoder and store encoder outputs
encoder = self.get_encoder()
# 2. prepare encoder args and encoder kwargs from model kwargs
irrelevant_prefix = ["decoder_", "cross_attn", "use_cache"]
encoder_kwargs = {
argument: value
for argument, value in model_kwargs.items()
if not any(argument.startswith(p) for p in irrelevant_prefix)
}
encoder_signature = set(inspect.signature(encoder.call).parameters)
encoder_accepts_wildcard = "kwargs" in encoder_signature or "model_kwargs" in encoder_signature
if not encoder_accepts_wildcard:
encoder_kwargs = {
argument: value for argument, value in encoder_kwargs.items() if argument in encoder_signature
}
# 3. vision models don't use `attention_mask`.
encoder_kwargs["return_dict"] = True
encoder_kwargs[model_input_name] = inputs_tensor
if model_input_name != self.main_input_name: # in Keras, the first input must always be passed
encoder_kwargs[self.main_input_name] = None
encoder_outputs = encoder(**encoder_kwargs)
model_kwargs["encoder_outputs"] = encoder_outputs
return model_kwargs
def _prepare_decoder_input_ids_for_generation(
self,
batch_size: int,
decoder_start_token_id: int = None,
bos_token_id: int = None,
model_kwargs: Optional[Dict[str, tf.Tensor]] = None,
) -> tf.Tensor:
# prepare `input_ids` for decoder if model is encoder-decoder
if model_kwargs is not None and "decoder_input_ids" in model_kwargs:
return model_kwargs.pop("decoder_input_ids")
else:
decoder_start_token_id = self._get_decoder_start_token_id(decoder_start_token_id, bos_token_id)
return tf.ones((batch_size, 1), dtype=tf.int32) * decoder_start_token_id
def _get_decoder_start_token_id(self, decoder_start_token_id: int = None, bos_token_id: int = None) -> int:
# retrieve decoder_start_token_id for encoder-decoder models
# fall back to bos_token_id if necessary
decoder_start_token_id = (
decoder_start_token_id
if decoder_start_token_id is not None
else self.generation_config.decoder_start_token_id
)
bos_token_id = bos_token_id if bos_token_id is not None else self.generation_config.bos_token_id
if decoder_start_token_id is not None:
return decoder_start_token_id
elif bos_token_id is not None:
return bos_token_id
raise ValueError(
"`decoder_start_token_id` or `bos_token_id` has to be defined for encoder-decoder generation."
)
@staticmethod
def _expand_inputs_for_generation(
expand_size: int = 1,
is_encoder_decoder: bool = False,
input_ids: Optional[tf.Tensor] = None,
expand_in_new_axis: bool = False,
**model_kwargs,
) -> Tuple[tf.Tensor, Dict[str, Any]]:
"""
Expands tensors from [batch_size, ...] to [batch_size * expand_size, ...] or [batch_size, expand_size, ...],
depending on `expand_in_new_axis`. Beam-based approaches expect this function to be used with
`expand_in_new_axis=True`
"""
def _expand_tensor(tensor: tf.Tensor):
if expand_in_new_axis:
shape = shape_list(tensor)
return tf.broadcast_to(tensor[:, None], (shape[0], expand_size) + tuple(shape[1:]))
else:
return tf.repeat(tensor, expand_size, axis=0)
def _expand_dict_for_generation(dict_to_expand):
for key in dict_to_expand:
if dict_to_expand[key] is not None and isinstance(dict_to_expand[key], tf.Tensor):
dict_to_expand[key] = _expand_tensor(dict_to_expand[key])
return dict_to_expand
if input_ids is not None:
input_ids = _expand_tensor(input_ids)
model_kwargs = _expand_dict_for_generation(model_kwargs)
if is_encoder_decoder:
if model_kwargs.get("encoder_outputs") is None:
raise ValueError("If `is_encoder_decoder` is True, make sure that `encoder_outputs` is defined.")
model_kwargs["encoder_outputs"] = _expand_dict_for_generation(model_kwargs["encoder_outputs"])
return input_ids, model_kwargs
def _prepare_model_inputs(
self,
inputs: Optional[tf.Tensor] = None,
bos_token_id: Optional[int] = None,
model_kwargs: Optional[Dict[str, tf.Tensor]] = None,
) -> Tuple[tf.Tensor, Optional[str], Dict[str, tf.Tensor]]:
"""
This function extracts the model-specific `inputs` for generation.
"""
# 1. retrieve all kwargs that are non-None or non-model input related.
# some encoder-decoder models have different names for model and encoder
if (
self.config.is_encoder_decoder
and hasattr(self, "encoder")
and hasattr(self.encoder, "main_input_name")
and self.encoder.main_input_name != self.main_input_name
):
input_name = self.encoder.main_input_name
else:
input_name = self.main_input_name
model_kwargs = {k: v for k, v in model_kwargs.items() if v is not None or k != input_name}
# 2. check whether model_input_name is passed as kwarg
# if yes and `inputs` is None use kwarg inputs
inputs_kwarg = model_kwargs.pop(input_name, None)
if inputs_kwarg is not None and inputs is not None:
raise ValueError(
f"`inputs`: {inputs}` were passed alongside {input_name} which is not allowed."
f"Make sure to either pass {inputs} or {input_name}=..."
)
elif inputs_kwarg is not None:
inputs = inputs_kwarg
# 3. In the presence of `inputs_embeds` for text models:
# - decoder-only models should complain if the user attempts to pass `inputs_embeds`, but the model
# doesn't have its forwarding implemented. `inputs_embeds` is kept in `model_kwargs` and can coexist with
# input_ids (`inputs_embeds` will be used in the 1st generation step, as opposed to `input_ids`)
# - encoder-decoder models should complain if the user attempts to pass `inputs_embeds` and `input_ids`, and
# pull the former to inputs. It will be used in place of `input_ids` to get the encoder hidden states.
if input_name == "input_ids" and "inputs_embeds" in model_kwargs:
if not self.config.is_encoder_decoder:
has_inputs_embeds_forwarding = "inputs_embeds" in set(
inspect.signature(self.prepare_inputs_for_generation).parameters.keys()
)
if not has_inputs_embeds_forwarding:
raise ValueError(
f"You passed `inputs_embeds` to `.generate()`, but the model class {self.__class__.__name__} "
"doesn't have its forwarding implemented. See the GPT2 implementation for an example "
"(https://github.com/huggingface/transformers/pull/21405), and feel free to open a PR with it!"
)
# In this case, `input_ids` is moved to the `model_kwargs`, so a few automations (like the creation of
# the attention mask) can rely on the actual model input.
model_kwargs["input_ids"] = self._maybe_initialize_input_ids_for_generation(
inputs, bos_token_id, model_kwargs=model_kwargs
)
else:
if inputs is not None:
raise ValueError("You passed `inputs_embeds` and `input_ids` to `.generate()`. Please pick one.")
inputs, input_name = model_kwargs["inputs_embeds"], "inputs_embeds"
# 4. if `inputs` is still None, try to create `input_ids` from BOS token
inputs = self._maybe_initialize_input_ids_for_generation(inputs, bos_token_id, model_kwargs)
return inputs, input_name, model_kwargs
def _maybe_initialize_input_ids_for_generation(
self,
inputs: Optional[tf.Tensor] = None,
bos_token_id: Optional[int] = None,
model_kwargs: Optional[Dict[str, tf.Tensor]] = None,
) -> tf.Tensor:
"""Initializes input ids for generation, if necessary."""
if inputs is not None:
return inputs
encoder_outputs = model_kwargs.get("encoder_outputs")
if self.config.is_encoder_decoder and encoder_outputs is not None:
# make dummy input_ids with value -100, as a sanity check ensuring that they won't be used for encoding
shape = encoder_outputs.last_hidden_state.shape[:-1]
return tf.ones(shape, dtype=tf.int32) * -100
if bos_token_id is None:
raise ValueError("`bos_token_id` has to be defined when no `input_ids` are provided.")
# If there is some tensor in `model_kwargs`, we can infer the batch size from it. This is helpful with
# soft-prompting or in multimodal implementations built on top of decoder-only language models.
batch_size = 1
for value in model_kwargs.values():
if isinstance(value, tf.Tensor):
batch_size = value.shape[0]
break
return tf.ones((batch_size, 1), dtype=tf.int32) * bos_token_id
@staticmethod
def _extract_past_from_model_output(outputs: ModelOutput):
past_key_values = None
if "past_key_values" in outputs:
past_key_values = outputs.past_key_values
elif "mems" in outputs:
past_key_values = outputs.mems
elif "past_buckets_states" in outputs:
past_key_values = outputs.past_buckets_states
return past_key_values
def _update_model_kwargs_for_generation(
self, outputs: ModelOutput, model_kwargs: Dict[str, Any], is_encoder_decoder: bool = False
) -> Dict[str, Any]:
# update past_key_values
model_kwargs["past_key_values"] = self._extract_past_from_model_output(outputs)
# update attention mask
if not is_encoder_decoder:
if "attention_mask" in model_kwargs:
attention_mask = model_kwargs["attention_mask"]
model_kwargs["attention_mask"] = tf.concat(
[attention_mask, tf.ones((shape_list(attention_mask)[0], 1), dtype=tf.int32)], axis=-1
)
return model_kwargs
def _update_model_kwargs_for_xla_generation(
self,
model_outputs: ModelOutput,
model_kwargs: Dict[str, Any],
cur_len: int,
max_length: int,
batch_size: int,
is_encoder_decoder: bool = False,
batch_axis: int = 0,
):
def _initialize_attention(model_kwargs, num_padding_values, is_encoder_decoder):
"""initializes the appropriate attention mask -- encoder-decoder models use `decoder_attention_mask`"""
if is_encoder_decoder:
# One 1 for decoder_start_token_id, 0s for the currently-unfilled locations in the past_key_values tensor,
# 1s for the actual input_ids
decoder_attention_mask = tf.concat(
[
tf.ones((batch_size, 1), dtype=tf.int32),
tf.zeros((batch_size, num_padding_values), dtype=tf.int32),
tf.ones((batch_size, 1), dtype=tf.int32),
],
axis=1,
)
mask = {"decoder_attention_mask": decoder_attention_mask}
else:
attention_mask = model_kwargs.pop("attention_mask")
# 0s for the currently-unfilled locations in the past_key_values tensor, 1s for the actual input_ids
attention_mask = tf.concat(
[
attention_mask,
tf.zeros((batch_size, num_padding_values), dtype=attention_mask.dtype),
tf.ones((batch_size, 1), dtype=attention_mask.dtype),
],
axis=1,
)
mask = {"attention_mask": attention_mask}
return mask
def _update_attention(model_kwargs, new_past_index, is_encoder_decoder):
"""updates the appropriate attention mask -- encoder-decoder models use `decoder_attention_mask`"""
update_start = tf.constant([0, 1], dtype=tf.int32) * new_past_index
if is_encoder_decoder:
decoder_attention_mask = model_kwargs.pop("decoder_attention_mask")
decoder_attention_mask_update_slice = tf.ones((batch_size, 1), dtype=decoder_attention_mask.dtype)
decoder_attention_mask = dynamic_update_slice(
decoder_attention_mask, decoder_attention_mask_update_slice, update_start
)
mask = {"decoder_attention_mask": decoder_attention_mask}
else:
attention_mask = model_kwargs.pop("attention_mask")
attention_mask_update_slice = tf.ones((batch_size, 1), dtype=attention_mask.dtype)
attention_mask = dynamic_update_slice(attention_mask, attention_mask_update_slice, update_start)
mask = {"attention_mask": attention_mask}
return mask
def _initialize_past(past_key_values, num_padding_values, batch_axis):
"""initialize past_key_values with zeros -- the structure depends on `batch_axis`"""
if batch_axis == 0:
padding_values = tf.constant([[0, 0], [0, 0], [0, num_padding_values], [0, 0]], dtype=tf.int32)
new_past = ()
for past_layer in past_key_values:
new_past_layer = list(past_layer)
for i in range(len(new_past_layer[:2])):
new_past_layer[i] = tf.pad(past_layer[i], padding_values)
new_past += (tuple(new_past_layer),)
else:
padding_values = tf.scatter_nd(indices=[[3, 1]], updates=[num_padding_values], shape=(5, 2))
new_past = list(past_key_values)
for i in range(len(past_key_values)):
new_past[i] = tf.pad(past_key_values[i], padding_values)
return new_past
def _update_past(past_key_values, new_past_index, batch_axis):
if batch_axis == 0:
slice_start_base = tf.constant([0, 0, 1, 0])
new_past = ()
for past_layer in past_key_values:
new_past_layer = list(past_layer)
for i in range(len(new_past_layer[:2])):
update_slice = past_layer[i][:, :, -1:]
# Write the last slice to the first open location in the padded past_key_values array
# and then truncate the last slice off the array
new_past_layer[i] = dynamic_update_slice(
past_layer[i][:, :, :-1], update_slice, slice_start_base * new_past_index
)
new_past += (tuple(new_past_layer),)
else:
slice_start_base = tf.constant([0, 0, 0, 1, 0])
new_past = [None for _ in range(len(past_key_values))]
for i in range(len(past_key_values)):
update_slice = past_key_values[i][:, :, :, -1:]
# Write the last slice to the first open location in the padded past_key_values array
# and then truncate the last slice off the array
new_past[i] = dynamic_update_slice(
past_key_values[i][:, :, :, :-1], update_slice, slice_start_base * new_past_index
)
return new_past
past_key_values = self._extract_past_from_model_output(model_outputs)
if past_key_values is None:
raise ValueError(
"No known `past_key_values variable` found in model outputs (model outputs keys:"
f" {list(model_outputs.keys())})"
)
is_past_initialized = model_kwargs.pop("past_key_values", None) is not None
if not is_past_initialized:
# The padded version of `past_key_values` has a length of `max_length - 1`, as `past_key_values` holds information relative to
# previous autoregressive generation steps (step 0 has no past_key_values, step 1 has 1 past_key_values value, ..., the last step
# has `max_length - 1` past_key_values values).
num_padding_values = max_length - cur_len - 1
mask = _initialize_attention(model_kwargs, num_padding_values, is_encoder_decoder)
new_past = _initialize_past(past_key_values, num_padding_values, batch_axis)
else:
# The new index of past_key_values to be filled corresponds to the current length of the sequence, with two
# subtractions: -1 because past_key_values holds information regarding previous generation steps (read comment above)
# and -1 again because in an array the index is the length of the array minus 1.
new_past_index = cur_len - 2
mask = _update_attention(model_kwargs, new_past_index, is_encoder_decoder)
new_past = _update_past(past_key_values, new_past_index, batch_axis)
# sets the updated variables (mask and past_key_values)
model_kwargs.update(mask)
model_kwargs["past_key_values"] = tuple(new_past)
return model_kwargs
def _get_logits_warper(
self,
generation_config: GenerationConfig,
) -> TFLogitsProcessorList:
"""
This class returns a [`TFLogitsProcessorList`] list object that contains all relevant [`TFLogitsWarper`]
instances used for multinomial sampling.
"""
# instantiate warpers list
warpers = TFLogitsProcessorList()
# the following idea is largely copied from this PR: https://github.com/huggingface/transformers/pull/5420/files
# all samplers can be found in `generation_utils_samplers.py`
if generation_config.temperature is not None and generation_config.temperature != 1.0:
warpers.append(TFTemperatureLogitsWarper(generation_config.temperature))
if generation_config.top_k is not None and generation_config.top_k != 0:
warpers.append(TFTopKLogitsWarper(top_k=generation_config.top_k, min_tokens_to_keep=1))
if generation_config.top_p is not None and generation_config.top_p < 1.0:
warpers.append(TFTopPLogitsWarper(top_p=generation_config.top_p, min_tokens_to_keep=1))
return warpers
def _get_logits_processor(
self,
generation_config: GenerationConfig,
input_ids_seq_length: int,
logits_processor: Optional[TFLogitsProcessorList],
) -> TFLogitsProcessorList:
"""
This class returns a [`TFLogitsProcessorList`] list object that contains all relevant [`TFLogitsProcessor`]
instances used to modify the scores of the language model head.
"""
processors = TFLogitsProcessorList()
# instantiate processors list
if generation_config.repetition_penalty is not None and generation_config.repetition_penalty != 1.0:
processors.append(TFRepetitionPenaltyLogitsProcessor(penalty=generation_config.repetition_penalty))
if generation_config.no_repeat_ngram_size is not None and generation_config.no_repeat_ngram_size > 0:
processors.append(TFNoRepeatNGramLogitsProcessor(generation_config.no_repeat_ngram_size))
if generation_config.bad_words_ids is not None:
processors.append(
TFNoBadWordsLogitsProcessor(generation_config.bad_words_ids, generation_config.eos_token_id)
)
if (
generation_config.min_length is not None
and generation_config.eos_token_id is not None
and generation_config.min_length > 0
):
processors.append(TFMinLengthLogitsProcessor(generation_config.min_length, generation_config.eos_token_id))
if generation_config.forced_bos_token_id is not None:
processors.append(TFForcedBOSTokenLogitsProcessor(generation_config.forced_bos_token_id))
if generation_config.forced_eos_token_id is not None:
processors.append(
TFForcedEOSTokenLogitsProcessor(generation_config.max_length, generation_config.forced_eos_token_id)
)
if generation_config.suppress_tokens is not None:
processors.append(TFSuppressTokensLogitsProcessor(generation_config.suppress_tokens))
if generation_config.begin_suppress_tokens is not None:
begin_index = input_ids_seq_length
begin_index = (
begin_index
if (input_ids_seq_length > 1 or generation_config.forced_bos_token_id is None)
else begin_index + 1
)
if generation_config.forced_decoder_ids is not None:
begin_index += generation_config.forced_decoder_ids[-1][
0
] # generation starts after the last token that is forced
processors.append(
TFSuppressTokensAtBeginLogitsProcessor(generation_config.begin_suppress_tokens, begin_index)
)
if generation_config.forced_decoder_ids is not None:
processors.append(TFForceTokensLogitsProcessor(generation_config.forced_decoder_ids))
processors = self._merge_criteria_processor_list(processors, logits_processor)
return processors
def _merge_criteria_processor_list(
self,
default_list: TFLogitsProcessorList,
custom_list: TFLogitsProcessorList,
) -> TFLogitsProcessorList:
if len(custom_list) == 0:
return default_list
for default in default_list:
for custom in custom_list:
if type(custom) is type(default):
object_type = "logits processor"
raise ValueError(
f"A custom {object_type} of type {type(custom)} with values {custom} has been passed to"
f" `generate`, but it has already been created with the values {default}. {default} has been"
" created by passing the corresponding arguments to generate or by the model's config default"
f" values. If you just want to change the default values of {object_type} consider passing"
f" them as arguments to `generate` instead of using a custom {object_type}."
)
default_list.extend(custom_list)
return default_list
def greedy_search(
self,
input_ids: tf.Tensor,
max_length: Optional[int] = None,
pad_token_id: Optional[int] = None,
eos_token_id: Optional[int] = None,
logits_processor: Optional[TFLogitsProcessorList] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
output_scores: Optional[bool] = None,
return_dict_in_generate: Optional[bool] = None,
**model_kwargs,
) -> Union[TFGreedySearchOutput, tf.Tensor]:
r"""
Generates sequences for models with a language modeling head using greedy decoding.
Parameters:
input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
The sequence used as a prompt for the generation.
logits_processor (`TFLogitsProcessorList`, *optional*):
An instance of [`TFLogitsProcessorList`]. List of instances of class derived from [`TFLogitsProcessor`]
used to modify the prediction scores of the language modeling head applied at each generation step.
max_length (`int`, *optional*, defaults to 20):
The maximum length of the sequence to be generated.
pad_token_id (`int`, *optional*):
The id of the *padding* token.
eos_token_id (`Union[int, List[int]]`, *optional*):
The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
output_attentions (`bool`, *optional*, defaults to `False`):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more details.
output_hidden_states (`bool`, *optional*, defaults to `False`):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more details.
output_scores (`bool`, *optional*, defaults to `False`):
Whether or not to return the prediction scores. See `scores` under returned tensors for more details.
return_dict_in_generate (`bool`, *optional*, defaults to `False`):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
model_kwargs:
Additional model specific keyword arguments will be forwarded to the `call` function of the model. If
model is an encoder-decoder model the kwargs should include `encoder_outputs`.
Return:
[`~generation.TFGreedySearchDecoderOnlyOutput`], [`~generation.TFGreedySearchEncoderDecoderOutput`] or
`tf.Tensor`: A `tf.Tensor` containing the generated tokens (default behaviour) or a
[`~generation.TFGreedySearchDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and
`return_dict_in_generate=True` or a [`~generation.TFGreedySearchEncoderDecoderOutput`] if
`model.config.is_encoder_decoder=True`.
Examples:
```python
>>> from transformers import (
... AutoTokenizer,
... TFAutoModelForCausalLM,
... TFLogitsProcessorList,
... TFMinLengthLogitsProcessor,
... )
>>> tokenizer = AutoTokenizer.from_pretrained("gpt2")
>>> model = TFAutoModelForCausalLM.from_pretrained("gpt2")
>>> # set pad_token_id to eos_token_id because GPT2 does not have a PAD token
>>> model.generation_config.pad_token_id = model.generation_config.eos_token_id
>>> input_prompt = "Today is a beautiful day, and"
>>> input_ids = tokenizer(input_prompt, return_tensors="tf").input_ids
>>> # instantiate logits processors
>>> logits_processor = TFLogitsProcessorList(
... [
... TFMinLengthLogitsProcessor(15, eos_token_id=model.generation_config.eos_token_id),
... ]
... )
>>> outputs = model.greedy_search(input_ids, logits_processor=logits_processor)
>>> tokenizer.batch_decode(outputs, skip_special_tokens=True)
["Today is a beautiful day, and I'm so happy to be here. I'm so happy to"]
```"""
# 1. init greedy_search values
logits_processor = logits_processor if logits_processor is not None else TFLogitsProcessorList()
max_length = max_length if max_length is not None else self.generation_config.max_length
pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id
eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id
if isinstance(eos_token_id, int):
eos_token_id = [eos_token_id]
output_scores = output_scores if output_scores is not None else self.generation_config.output_scores
output_attentions = (
output_attentions if output_attentions is not None else self.generation_config.output_attentions
)
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states
)
return_dict_in_generate = (
return_dict_in_generate
if return_dict_in_generate is not None
else self.generation_config.return_dict_in_generate
)
use_cache = model_kwargs.pop("use_cache", self.generation_config.use_cache)
use_xla = not tf.executing_eagerly()
# TODO (Joao): fix cache format or find programatic way to detect cache index
# GPT2 and other models has a slightly different cache structure, with a different batch axis
model_name = str(self.decoder) if "EncoderDecoder" in str(self) else str(self)
cache_batch_axis = 1 if any([model_prefix in model_name for model_prefix in ("TFGPT2", "TFCTRL")]) else 0
# some models, like XLNet, need more than the last token in the presence of past_key_values
needs_full_input = "use_mems" in set(inspect.signature(self.prepare_inputs_for_generation).parameters.keys())
# 2. init `attentions`, `hidden_states`, and `scores` tuples
scores = [] if (return_dict_in_generate and output_scores) else None
decoder_attentions = [] if (return_dict_in_generate and output_attentions) else None
cross_attentions = [] if (return_dict_in_generate and output_attentions) else None
decoder_hidden_states = [] if (return_dict_in_generate and output_hidden_states) else None
# 3. init tensors to use for "xla-compileable" generate function
batch_size, cur_len = shape_list(input_ids)
# initialize `generated` (`input_ids` padded with `pad_token_id`), `finished_sequences`
input_ids_padding = tf.ones((batch_size, max_length - cur_len), dtype=tf.int32) * (pad_token_id or 0)
generated = tf.concat([input_ids, input_ids_padding], axis=-1)
finished_sequences = tf.zeros((batch_size,), dtype=tf.bool)
# 4. define "xla-compile-able" stop-condition and auto-regressive function
# define condition fn
def greedy_search_cond_fn(generated, finished_sequences, cur_len, model_kwargs):
"""state termination condition fn."""
return ~tf.reduce_all(finished_sequences)
# define condition fn
def greedy_search_body_fn(generated, finished_sequences, cur_len, model_kwargs):
"""state update fn."""
if model_kwargs.get("past_key_values") is None or needs_full_input:
input_ids = generated[:, :cur_len]
else:
input_ids = tf.expand_dims(generated[:, cur_len - 1], -1)
model_inputs = self.prepare_inputs_for_generation(input_ids, use_cache=use_cache, **model_kwargs)
# forward pass to get next token logits
model_outputs = self(
**model_inputs,
return_dict=True,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
next_token_logits = model_outputs.logits[:, -1]
# pre-process distribution
next_tokens_scores = logits_processor(generated, next_token_logits, cur_len)
# Store scores, attentions and hidden_states when required
if not use_xla and return_dict_in_generate:
if output_scores:
scores.append(next_tokens_scores)
if output_attentions and self.config.is_encoder_decoder:
decoder_attentions.append(model_outputs.decoder_attentions)
elif output_attentions and not self.config.is_encoder_decoder:
decoder_attentions.append(model_outputs.attentions)
if self.config.is_encoder_decoder:
cross_attentions.append(model_outputs.cross_attentions)
if output_hidden_states and self.config.is_encoder_decoder:
decoder_hidden_states.append(model_outputs.decoder_hidden_states)
elif output_hidden_states and self.config.is_encoder_decoder:
decoder_hidden_states.append(model_outputs.hidden_states)
# argmax
next_tokens = tf.argmax(next_tokens_scores, axis=-1, output_type=tf.int32)
if eos_token_id is not None:
if pad_token_id is None:
raise ValueError("If `eos_token_id` is defined, make sure that `pad_token_id` is defined.")
unfinished_seq = 1 - tf.cast(finished_sequences, tf.int32)
next_tokens = next_tokens * unfinished_seq + pad_token_id * (1 - unfinished_seq)
next_token_is_eos = tf.math.reduce_any(
tf.equal(
tf.broadcast_to(next_tokens, (len(eos_token_id), batch_size)), tf.expand_dims(eos_token_id, -1)
),
axis=0,
)
finished_sequences = finished_sequences | next_token_is_eos
# update `generated` and `cur_len`
update_indices = tf.stack([tf.range(batch_size), tf.broadcast_to(cur_len, [batch_size])], axis=-1)
generated = tf.tensor_scatter_nd_update(tensor=generated, indices=update_indices, updates=next_tokens)
cur_len += 1
# update model_kwargs
if use_xla:
model_kwargs = self._update_model_kwargs_for_xla_generation(
model_outputs=model_outputs,
model_kwargs=model_kwargs,
cur_len=cur_len,
max_length=max_length,
batch_size=batch_size,
is_encoder_decoder=self.config.is_encoder_decoder,
batch_axis=cache_batch_axis,
)
else:
model_kwargs = self._update_model_kwargs_for_generation(
model_outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder
)
# if we don't cache past_key_values key values we need the whole input
if model_kwargs.get("past_key_values", None) is None:
# let's throw out `past_key_values` since we don't want `None` tensors
model_kwargs.pop("past_key_values", None)
return generated, finished_sequences, cur_len, model_kwargs
# 5. run generation
# 1st generation step has to be run before to initialize `past_key_values`
generated, finished_sequences, cur_len, model_kwargs = greedy_search_body_fn(
generated, finished_sequences, cur_len, model_kwargs
)
# 2-to-n generation steps can then be run in autoregressive fashion
# only in case 1st generation step does NOT yield EOS token though
if greedy_search_cond_fn(generated, finished_sequences, cur_len, model_kwargs):
maximum_iterations = max_length - cur_len
generated, _, cur_len, _ = tf.while_loop(
greedy_search_cond_fn,
greedy_search_body_fn,
(generated, finished_sequences, cur_len, model_kwargs),
maximum_iterations=maximum_iterations,
)
# 6. prepare outputs
if not use_xla:
# cut for backward compatibility
generated = generated[:, :cur_len]
if return_dict_in_generate:
if self.config.is_encoder_decoder:
# if model is an encoder-decoder, retrieve encoder attention weights
# and hidden states
encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None
encoder_hidden_states = (
model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None
)
scores = tuple(scores) if scores is not None else None
decoder_attentions = tuple(decoder_attentions) if decoder_attentions is not None else None
cross_attentions = tuple(cross_attentions) if cross_attentions is not None else None
decoder_hidden_states = tuple(decoder_hidden_states) if decoder_hidden_states is not None else None
return TFGreedySearchEncoderDecoderOutput(
sequences=generated,
scores=scores,
encoder_attentions=encoder_attentions,
encoder_hidden_states=encoder_hidden_states,
decoder_attentions=decoder_attentions,
cross_attentions=cross_attentions,
decoder_hidden_states=decoder_hidden_states,
)
else:
return TFGreedySearchDecoderOnlyOutput(
sequences=generated,
scores=scores,
attentions=decoder_attentions,
hidden_states=decoder_hidden_states,
)
else:
return generated
def sample(
self,
input_ids: tf.Tensor,
logits_processor: Optional[TFLogitsProcessorList] = None,
logits_warper: Optional[TFLogitsProcessorList] = None,
max_length: Optional[int] = None,
pad_token_id: Optional[int] = None,
eos_token_id: Optional[int] = None,
seed: Optional[Tuple[int, int]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
output_scores: Optional[bool] = None,
return_dict_in_generate: Optional[bool] = None,
**model_kwargs,
) -> Union[TFSampleOutput, tf.Tensor]:
r"""
Generates sequences for models with a language modeling head using multinomial sampling.
Parameters:
input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
The sequence used as a prompt for the generation.
logits_processor (`TFLogitsProcessorList`, *optional*):
An instance of [`TFLogitsProcessorList`]. List of instances of class derived from [`TFLogitsProcessor`]
used to modify the prediction scores of the language modeling head applied at each generation step.
logits_warper (`TFLogitsProcessorList`, *optional*):
An instance of [`TFLogitsProcessorList`]. List of instances of class derived from [`TFLogitsWarper`]
used to warp the prediction score distribution of the language modeling head applied before multinomial
sampling at each generation step.
max_length (`int`, *optional*, defaults to 20):
The maximum length of the sequence to be generated.
pad_token_id (`int`, *optional*):
The id of the *padding* token.
eos_token_id (`Union[int, List[int]]`, *optional*):
The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
seed (`List[int]`, *optional*):
Random seed to control sampling, containing two integers, used when `do_sample` is `True`. See the
`seed` argument from stateless functions in `tf.random`.
output_attentions (`bool`, *optional*, defaults to `False`):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more details.
output_hidden_states (`bool`, *optional*, defaults to `False`):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more details.
output_scores (`bool`, *optional*, defaults to `False`):
Whether or not to return the prediction scores. See `scores` under returned tensors for more details.
return_dict_in_generate (`bool`, *optional*, defaults to `False`):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
model_kwargs:
Additional model specific kwargs will be forwarded to the `call` function of the model. If model is an
encoder-decoder model the kwargs should include `encoder_outputs`.
Return:
[`~generation.TFSampleDecoderOnlyOutput`], [`~generation.TFSampleEncoderDecoderOutput`] or `tf.Tensor`: A
`tf.Tensor` containing the generated tokens (default behaviour) or a
[`~generation.TFSampleDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and
`return_dict_in_generate=True` or a [`~generation.TFSampleEncoderDecoderOutput`] if
`model.config.is_encoder_decoder=True`.
Examples:
```python
>>> import tensorflow as tf
>>> from transformers import (
... AutoTokenizer,
... TFAutoModelForCausalLM,
... TFLogitsProcessorList,
... TFMinLengthLogitsProcessor,
... TFTopKLogitsWarper,
... TFTemperatureLogitsWarper,
... )
>>> tokenizer = AutoTokenizer.from_pretrained("gpt2")
>>> model = TFAutoModelForCausalLM.from_pretrained("gpt2")
>>> # set pad_token_id to eos_token_id because GPT2 does not have a EOS token
>>> model.generation_config.pad_token_id = model.generation_config.eos_token_id
>>> input_prompt = "Today is a beautiful day, and"
>>> input_ids = tokenizer(input_prompt, return_tensors="tf").input_ids
>>> # instantiate logits processors
>>> logits_processor = TFLogitsProcessorList(
... [
... TFMinLengthLogitsProcessor(15, eos_token_id=model.generation_config.eos_token_id),
... ]
... )
>>> # instantiate logits processors
>>> logits_warper = TFLogitsProcessorList(
... [
... TFTopKLogitsWarper(50),
... TFTemperatureLogitsWarper(0.7),
... ]
... )
>>> tf.random.set_seed(0)
>>> outputs = model.sample(input_ids, logits_processor=logits_processor, logits_warper=logits_warper)
>>> tokenizer.batch_decode(outputs, skip_special_tokens=True)
['Today is a beautiful day, and I love my country. But when I look at Donald Trump,']
```"""
# 1. init greedy_search values
logits_processor = logits_processor if logits_processor is not None else TFLogitsProcessorList()
logits_warper = logits_warper if logits_warper is not None else TFLogitsProcessorList()
max_length = max_length if max_length is not None else self.generation_config.max_length
pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id
eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id
if isinstance(eos_token_id, int):
eos_token_id = [eos_token_id]
output_scores = output_scores if output_scores is not None else self.generation_config.output_scores
output_attentions = (
output_attentions if output_attentions is not None else self.generation_config.output_attentions
)
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states
)
return_dict_in_generate = (
return_dict_in_generate
if return_dict_in_generate is not None
else self.generation_config.return_dict_in_generate
)
use_cache = model_kwargs.pop("use_cache", self.generation_config.use_cache)
use_xla = not tf.executing_eagerly()
# TODO (Joao): fix cache format or find programatic way to detect cache index
# GPT2 and other models has a slightly different cache structure, with a different batch axis
model_name = str(self.decoder) if "EncoderDecoder" in str(self) else str(self)
cache_batch_axis = 1 if any([model_prefix in model_name for model_prefix in ("TFGPT2", "TFCTRL")]) else 0
# some models, like XLNet, need more than the last token in the presence of past_key_values
needs_full_input = "use_mems" in set(inspect.signature(self.prepare_inputs_for_generation).parameters.keys())
# 2. init `attentions`, `hidden_states`, and `scores` tuples
scores = [] if (return_dict_in_generate and output_scores) else None
decoder_attentions = [] if (return_dict_in_generate and output_attentions) else None
cross_attentions = [] if (return_dict_in_generate and output_attentions) else None
decoder_hidden_states = [] if (return_dict_in_generate and output_hidden_states) else None
# 3. init tensors to use for "xla-compileable" generate function
batch_size, cur_len = shape_list(input_ids)
# initialize `generated` (pre-populated with `pad_token_id`), `finished_sequences`
input_ids_padding = tf.ones((batch_size, max_length - cur_len), dtype=tf.int32) * (pad_token_id or 0)
generated = tf.concat([input_ids, input_ids_padding], axis=-1)
finished_sequences = tf.zeros((batch_size,), dtype=tf.bool)
# 4. define "xla-compile-able" stop-condition and auto-regressive function
def sample_cond_fn(generated, finished_sequences, cur_len, model_kwargs):
return ~tf.reduce_all(finished_sequences)
def sample_body_fn(generated, finished_sequences, cur_len, model_kwargs):
if model_kwargs.get("past_key_values") is None or needs_full_input:
input_ids = generated[:, :cur_len]
else:
input_ids = tf.expand_dims(generated[:, cur_len - 1], -1)
model_inputs = self.prepare_inputs_for_generation(input_ids, use_cache=use_cache, **model_kwargs)
# forward pass to get next token logits
model_outputs = self(
**model_inputs,
return_dict=True,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
next_token_logits = model_outputs.logits[:, -1]
# pre-process distribution
next_tokens_scores = logits_processor(generated, next_token_logits, cur_len)
next_tokens_scores = logits_warper(generated, next_tokens_scores, cur_len)
# Store scores, attentions and hidden_states when required
if not use_xla and return_dict_in_generate:
if output_scores:
scores.append(next_tokens_scores)
if output_attentions and self.config.is_encoder_decoder:
decoder_attentions.append(model_outputs.decoder_attentions)
elif output_attentions and not self.config.is_encoder_decoder:
decoder_attentions.append(model_outputs.attentions)
if self.config.is_encoder_decoder:
cross_attentions.append(model_outputs.cross_attentions)
if output_hidden_states and self.config.is_encoder_decoder:
decoder_hidden_states.append(model_outputs.decoder_hidden_states)
elif output_hidden_states and self.config.is_encoder_decoder:
decoder_hidden_states.append(model_outputs.hidden_states)
# sample
if seed is not None:
sample_seed = seed
else:
sample_seed = tf.experimental.numpy.random.randint(tf.int32.min, tf.int32.max, (2,), dtype=tf.int32)
next_tokens = tf.squeeze(
tf.random.stateless_categorical(
logits=next_tokens_scores, num_samples=1, seed=sample_seed, dtype=tf.int32
),
axis=1,
)
if eos_token_id is not None:
if pad_token_id is None:
raise ValueError("If `eos_token_id` is defined, make sure that `pad_token_id` is defined.")
unfinished_seq = 1 - tf.cast(finished_sequences, tf.int32)
next_tokens = next_tokens * unfinished_seq + pad_token_id * (1 - unfinished_seq)
next_token_is_eos = tf.math.reduce_any(
tf.equal(
tf.broadcast_to(next_tokens, (len(eos_token_id), batch_size)), tf.expand_dims(eos_token_id, -1)
),
axis=0,
)
finished_sequences = finished_sequences | next_token_is_eos
# update `generated` and `cur_len`
update_indices = tf.stack([tf.range(batch_size), tf.broadcast_to(cur_len, [batch_size])], axis=-1)
generated = tf.tensor_scatter_nd_update(tensor=generated, indices=update_indices, updates=next_tokens)
cur_len += 1
# update model_kwargs
if use_xla:
model_kwargs = self._update_model_kwargs_for_xla_generation(
model_outputs=model_outputs,
model_kwargs=model_kwargs,
cur_len=cur_len,
max_length=max_length,
batch_size=batch_size,
is_encoder_decoder=self.config.is_encoder_decoder,
batch_axis=cache_batch_axis,
)
else:
model_kwargs = self._update_model_kwargs_for_generation(
model_outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder
)
# if we don't cache past_key_values key values we need the whole input
if model_kwargs.get("past_key_values", None) is None:
# let's throw out `past_key_values` since we don't want `None` tensors
model_kwargs.pop("past_key_values", None)
return generated, finished_sequences, cur_len, model_kwargs
# 5. run generation
# 1st generation step has to be run before to initialize `past_key_values`
generated, finished_sequences, cur_len, model_kwargs = sample_body_fn(
generated, finished_sequences, cur_len, model_kwargs
)
# 2-to-n generation steps can then be run in autoregressive fashion
# only in case 1st generation step does NOT yield EOS token though
if sample_cond_fn(generated, finished_sequences, cur_len, model_kwargs):
maximum_iterations = max_length - cur_len
generated, _, cur_len, _ = tf.while_loop(
sample_cond_fn,
sample_body_fn,
(generated, finished_sequences, cur_len, model_kwargs),
maximum_iterations=maximum_iterations,
)
# 6. prepare outputs
if not use_xla:
# cut for backward compatibility
generated = generated[:, :cur_len]
if return_dict_in_generate:
if self.config.is_encoder_decoder:
# if model is an encoder-decoder, retrieve encoder attention weights
# and hidden states
encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None
encoder_hidden_states = (
model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None
)
scores = tuple(scores) if scores is not None else None
decoder_attentions = tuple(decoder_attentions) if decoder_attentions is not None else None
cross_attentions = tuple(cross_attentions) if cross_attentions is not None else None
decoder_hidden_states = tuple(decoder_hidden_states) if decoder_hidden_states is not None else None
return TFSampleEncoderDecoderOutput(
sequences=generated,
scores=scores,
encoder_attentions=encoder_attentions,
encoder_hidden_states=encoder_hidden_states,
decoder_attentions=decoder_attentions,
cross_attentions=cross_attentions,
decoder_hidden_states=decoder_hidden_states,
)
else:
return TFSampleDecoderOnlyOutput(
sequences=generated,
scores=scores,
attentions=decoder_attentions,
hidden_states=decoder_hidden_states,
)
else:
return generated
@staticmethod
def _gather_beams(nested, beam_indices, batch_axis=0):
"""Gathers the beam slices indexed by beam_indices into new beam array."""
def gather_fn(tensor):
if batch_axis > 0:
# pushes all dimentions before the batch to the end, so we get (batch, beam_id, ...)
perm = tf.concat((tf.range(tf.rank(tensor))[batch_axis:], tf.range(batch_axis)), axis=0)
tensor = tf.transpose(tensor, perm=perm)
gathered_tensor = tf.gather(params=tensor, indices=beam_indices, axis=1, batch_dims=1)
if batch_axis > 0:
# transposes back to the original dimensions
perm = tf.concat((tf.range(tf.rank(tensor))[batch_axis:], tf.range(batch_axis)), axis=0)
perm = tf.math.invert_permutation(perm)
gathered_tensor = tf.transpose(gathered_tensor, perm=perm)
return gathered_tensor
return tf.nest.map_structure(gather_fn, nested)
def beam_search(
self,
input_ids: tf.Tensor,
do_sample: bool = False,
max_length: Optional[int] = None,
pad_token_id: Optional[int] = None,
eos_token_id: Optional[int] = None,
length_penalty: Optional[float] = None,
early_stopping: Optional[Union[bool, str]] = None,
logits_processor: Optional[TFLogitsProcessorList] = None,
logits_warper: Optional[TFLogitsProcessorList] = None,
num_return_sequences: Optional[int] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
output_scores: Optional[bool] = None,
return_dict_in_generate: Optional[bool] = None,
**model_kwargs,
) -> Union[TFBeamSearchOutput, TFBeamSampleOutput, tf.Tensor]:
r"""
Generates sequences for models with a language modeling head using beam search. If `do_sample` is `False`, uses
a greedy approach, otherwise does multinomial sampling without replacement.
Parameters:
input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
The sequence used as a prompt for the generation.
do_sample (`bool`, *optional*, defaults to `False`):
Whether or not to use sampling ; use greedy decoding otherwise.
max_length (`int`, *optional*, defaults to 20):
The maximum length of the sequence to be generated.
pad_token_id (`int`, *optional*):
The id of the *padding* token.
eos_token_id (`Union[int, List[int]]`, *optional*):
The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
length_penalty (`float`, *optional*, defaults to 1.0):
Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent
to the sequence length, which in turn is used to divide the score of the sequence. Since the score is
the log likelihood of the sequence (i.e. negative), `length_penalty` > 0.0 promotes longer sequences,
while `length_penalty` < 0.0 encourages shorter sequences.
early_stopping (`bool` or `str`, *optional*, defaults to `False`):
Controls the stopping condition for beam-based methods, like beam-search. It accepts the following
values: `True`, where the generation stops as soon as there are `num_beams` complete candidates;
`False`, where an heuristic is applied and the generation stops when is it very unlikely to find better
candidates; `"never"`, where the beam search procedure only stops when there cannot be better
candidates (canonical beam search algorithm).
logits_processor (`[TFLogitsProcessorList]`, *optional*):
An instance of [`TFLogitsProcessorList`]. List of instances of class derived from [`TFLogitsProcessor`]
used to modify the prediction scores of the language modeling head applied at each generation step.
logits_warper (`TFLogitsProcessorList`, *optional*):
An instance of [`TFLogitsProcessorList`]. List of instances of class derived from [`TFLogitsWarper`]
used to warp the prediction score distribution of the language modeling head applied before multinomial
sampling at each generation step.
num_return_sequences(`int`, *optional*, defaults to 1):
The number of independently computed returned sequences for each element in the batch.
output_attentions (`bool`, *optional*, defaults to `False`):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more details.
output_hidden_states (`bool`, *optional*, defaults to `False`):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more details.
return_dict_in_generate (`bool`, *optional*, defaults to `False`):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
model_kwargs:
Additional model specific kwargs will be forwarded to the `call` function of the model. If model is an
encoder-decoder model the kwargs should include `encoder_outputs`.
Return:
[`~generation.TFBeamSearchDecoderOnlyOutput`], [`~generation.TFBeamSearchEncoderDecoderOutput`] or
`tf.Tensor`: A `tf.Tensor` containing the generated tokens (default behaviour) or a
[`~generation.TFBeamSearchDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and
`return_dict_in_generate=True` or a [`~generation.TFBeamSearchEncoderDecoderOutput`] if
`model.config.is_encoder_decoder=True`.
Examples:
```python
>>> from transformers import (
... AutoTokenizer,
... TFAutoModelForSeq2SeqLM,
... TFLogitsProcessorList,
... TFMinLengthLogitsProcessor,
... )
>>> import tensorflow as tf
>>> tokenizer = AutoTokenizer.from_pretrained("t5-base")
>>> model = TFAutoModelForSeq2SeqLM.from_pretrained("t5-base")
>>> encoder_input_str = "translate English to German: How old are you?"
>>> encoder_input_ids = tokenizer(encoder_input_str, return_tensors="tf").input_ids
>>> # lets run beam search using 3 beams
>>> num_beams = 3
>>> # define decoder start token ids
>>> input_ids = tf.ones((1, num_beams, 1), dtype=tf.int32)
>>> input_ids = input_ids * model.generation_config.decoder_start_token_id
>>> # add encoder_outputs to model keyword arguments
>>> encoder_outputs = model.get_encoder()(encoder_input_ids, return_dict=True)
>>> encoder_outputs.last_hidden_state = tf.repeat(
... tf.expand_dims(encoder_outputs.last_hidden_state, axis=0), num_beams, axis=1
... )
>>> model_kwargs = {"encoder_outputs": encoder_outputs}
>>> # instantiate logits processors
>>> logits_processor = TFLogitsProcessorList(
... [TFMinLengthLogitsProcessor(5, eos_token_id=model.generation_config.eos_token_id)]
... )
>>> outputs = model.beam_search(input_ids, logits_processor=logits_processor, **model_kwargs)
>>> tokenizer.batch_decode(outputs, skip_special_tokens=True)
['Wie alt bist du?']
```"""
def flatten_beam_dim(tensor, batch_axis=0):
"""Flattens the first two dimensions of a non-scalar array."""
shape = shape_list(tensor)
return tf.reshape(
tensor,
shape[:batch_axis] + [shape[batch_axis] * shape[batch_axis + 1]] + shape[batch_axis + 2 :],
)
def unflatten_beam_dim(tensor, num_beams, batch_axis=0):
"""Unflattens the first, flat batch*beam dimension of a non-scalar array."""
shape = shape_list(tensor)
return tf.reshape(tensor, shape[:batch_axis] + [-1, num_beams] + shape[batch_axis + 1 :])
# 1. init beam_search values
logits_processor = logits_processor if logits_processor is not None else TFLogitsProcessorList()
logits_warper = logits_warper if logits_warper is not None else TFLogitsProcessorList()
max_length = max_length if max_length is not None else self.generation_config.max_length
pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id
eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id
if isinstance(eos_token_id, int):
eos_token_id = [eos_token_id]
num_return_sequences = (
num_return_sequences if num_return_sequences is not None else self.generation_config.num_return_sequences
)
output_attentions = (
output_attentions if output_attentions is not None else self.generation_config.output_attentions
)
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states
)
output_scores = output_scores if output_scores is not None else self.generation_config.output_scores
return_dict_in_generate = (
return_dict_in_generate
if return_dict_in_generate is not None
else self.generation_config.return_dict_in_generate
)
length_penalty = length_penalty if length_penalty is not None else self.generation_config.length_penalty
early_stopping = early_stopping if early_stopping is not None else self.generation_config.early_stopping
use_cache = model_kwargs.pop("use_cache", self.generation_config.use_cache)
use_xla = not tf.executing_eagerly()
# TODO (Joao): fix cache format or find programatic way to detect cache index
# GPT2 and other models has a slightly different cache structure, with a different batch axis
model_name = str(self.decoder) if "EncoderDecoder" in str(self) else str(self)
cache_batch_axis = 1 if any([model_prefix in model_name for model_prefix in ("TFGPT2", "TFCTRL")]) else 0
# some models, like XLNet, need more than the last token in the presence of past_key_values
needs_full_input = "use_mems" in set(inspect.signature(self.prepare_inputs_for_generation).parameters.keys())
# 2. init `attentions`, `hidden_states`, and `scores` tuples
all_scores = [] if (return_dict_in_generate and output_scores) else None
decoder_attentions = [] if (return_dict_in_generate and output_attentions) else None
cross_attentions = [] if (return_dict_in_generate and output_attentions) else None
decoder_hidden_states = [] if (return_dict_in_generate and output_hidden_states) else None
# 3. init tensors to use for "xla-compileable" generate function
batch_size, num_beams, cur_len = shape_list(input_ids)
# per batch, beam-item holding current token in loop, pre-populated with `pad_token_id`
input_ids_padding = tf.ones((batch_size, num_beams, max_length - cur_len), dtype=tf.int32) * (
pad_token_id or 0
)
running_sequences = tf.concat([input_ids, input_ids_padding], axis=-1)
sequences = tf.ones((batch_size, num_beams, max_length), dtype=tf.int32) * (pad_token_id or 0)
# per batch,beam-item state bit indicating if sentence has finished.
is_sent_finished = tf.zeros((batch_size, num_beams), dtype=tf.bool)
# per batch, beam-item score, logprobs
running_scores = tf.tile(
tf.expand_dims(tf.convert_to_tensor([0.0] + [-1.0e9] * (num_beams - 1)), axis=0), [batch_size, 1]
)
scores = tf.ones((batch_size, num_beams)) * -1.0e9
# per batch beam indices
running_beam_indices = tf.ones((batch_size, num_beams, max_length), dtype=tf.int32) * -1
beam_indices = tf.ones((batch_size, num_beams, max_length), dtype=tf.int32) * -1
# flatten beam dim
if "encoder_outputs" in model_kwargs:
model_kwargs["encoder_outputs"]["last_hidden_state"] = flatten_beam_dim(
model_kwargs["encoder_outputs"]["last_hidden_state"]
)
if "attention_mask" in model_kwargs:
model_kwargs["attention_mask"] = flatten_beam_dim(model_kwargs["attention_mask"])
# 4. define "xla-compile-able" stop-condition and auto-regressive function
# define stop-condition and auto-regressive function
def beam_search_cond_fn(
cur_len,
running_sequences,
running_scores,
running_beam_indices,
sequences,
scores,
beam_indices,
is_sent_finished,
model_kwargs,
):
"""
Beam Search termination condition function -- halts the generation loop if any of these conditions becomes
False
"""
# 1. is less than max length?
not_max_length_yet = cur_len < max_length
# 2. can the new beams still improve?
# early_stopping == False -> apply heuristic = always get the best score from `cur_len`. See the discussion
# below for more details.
# https://github.com/huggingface/transformers/pull/20901#issuecomment-1369845565
# early_stopping == "never" -> compute the best score from max_length or cur_len, depending on the sign of
# length_penalty. Positive length_penalty favors longer sequences, thus we use max_length there.
if early_stopping == "never" and length_penalty > 0.0:
best_running_score = running_scores[:, :1] / (max_length**length_penalty)
else:
best_running_score = running_scores[:, :1] / (tf.cast(cur_len, dtype=tf.float32) ** length_penalty)
worst_finished_score = tf.where(
is_sent_finished, tf.math.reduce_min(scores, axis=1, keepdims=True), -1.0e9
)
improvement_still_possible = tf.math.reduce_any(best_running_score > worst_finished_score)
# 3. is there still a beam that has not finished?
still_open_beam = ~(tf.math.reduce_all(is_sent_finished) & (early_stopping is True))
return not_max_length_yet & still_open_beam & improvement_still_possible
def beam_search_body_fn(
cur_len,
running_sequences,
running_scores,
running_beam_indices,
sequences,
scores,
beam_indices,
is_sent_finished,
model_kwargs,
):
"""
Beam Search iterative update function -- each iteration adds a new token and updates the best sequences
seen so far
"""
# 1. Forward current tokens
if model_kwargs.get("past_key_values") is None or needs_full_input:
input_ids = running_sequences[:, :, :cur_len]
else:
input_ids = tf.expand_dims(running_sequences[:, :, cur_len - 1], -1)
model_inputs = self.prepare_inputs_for_generation(
flatten_beam_dim(input_ids), use_cache=use_cache, **model_kwargs
)
model_outputs = self(
**model_inputs,
return_dict=True,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
logits = unflatten_beam_dim(model_outputs.logits[:, -1], num_beams)
# 2. Compute log probs
# get log probabilities from logits, process logits with processors (*e.g.* min_length, ...), and
# add new logprobs to existing running logprobs scores.
log_probs = tf.nn.log_softmax(logits)
log_probs = logits_processor(flatten_beam_dim(running_sequences), flatten_beam_dim(log_probs), cur_len)
log_probs = unflatten_beam_dim(log_probs, num_beams)
log_probs_processed = log_probs
log_probs = log_probs + tf.expand_dims(running_scores, axis=2)
if do_sample:
# Note: logits warpers are intentionally applied after adding running beam scores. On some logits
# warpers (like top_p) this is indiferent, but on others (like temperature) it is not. For reference,
# see https://github.com/huggingface/transformers/pull/5420#discussion_r449779867
log_probs = logits_warper(flatten_beam_dim(running_sequences), flatten_beam_dim(log_probs), cur_len)
log_probs = unflatten_beam_dim(log_probs, num_beams)
vocab_size = log_probs.shape[2]
log_probs = tf.reshape(log_probs, (batch_size, num_beams * vocab_size))
# Store scores, attentions and hidden_states when required
if not use_xla and return_dict_in_generate:
if output_scores:
all_scores.append(
logits_warper(
flatten_beam_dim(running_sequences), flatten_beam_dim(log_probs_processed), cur_len
)
)
if output_attentions and self.config.is_encoder_decoder:
decoder_attentions.append(model_outputs.decoder_attentions)
elif output_attentions and not self.config.is_encoder_decoder:
decoder_attentions.append(model_outputs.attentions)
if self.config.is_encoder_decoder:
cross_attentions.append(model_outputs.cross_attentions)
if output_hidden_states and self.config.is_encoder_decoder:
decoder_hidden_states.append(model_outputs.decoder_hidden_states)
elif output_hidden_states and self.config.is_encoder_decoder:
decoder_hidden_states.append(model_outputs.hidden_states)
# 3. Retrieve top-K
# Each item in batch has num_beams * vocab_size candidate sequences. For each item, get the top 2*k
# candidates with the highest log-probabilities. We gather the top 2*K beams here so that even if the
# best K sequences reach EOS simultaneously, we have another K sequences remaining to continue the live
# beam search.
# Gather the top 2*K scores from _all_ beams.
# Gather 2*k top beams.
# Recover the beam index by floor division.
# Recover token id by modulo division and expand Id array for broadcasting.
# Update sequences for the 2*K top-k new sequences.
beams_to_keep = 2 * num_beams
if do_sample:
topk_indices = sample_without_replacement(log_probs, beams_to_keep)
topk_log_probs = tf.gather(log_probs, topk_indices, axis=1, batch_dims=1)
else:
topk_log_probs, topk_indices = tf.math.top_k(log_probs, k=beams_to_keep)
topk_current_beam_indices = topk_indices // vocab_size
topk_running_beam_indices = self._gather_beams(running_beam_indices, topk_current_beam_indices)
topk_running_sequences = self._gather_beams(running_sequences, topk_current_beam_indices)
topk_ids = topk_indices % vocab_size
# writes the new token
indices_batch = tf.repeat(tf.range(batch_size), [beams_to_keep])
indices_beam = tf.tile(tf.range(beams_to_keep), [batch_size])
update_indices = tf.stack(
[indices_batch, indices_beam, tf.broadcast_to(cur_len, [batch_size * beams_to_keep])], axis=-1
)
topk_sequences = tf.tensor_scatter_nd_update(
tensor=topk_running_sequences,
indices=update_indices,
updates=tf.reshape(topk_ids, [batch_size * beams_to_keep]),
)
# we want to store the beam indices with batch information -> real beam index = beam index % num beams
batch_modified_indices = topk_current_beam_indices + tf.broadcast_to(
tf.expand_dims(tf.range(batch_size) * num_beams, axis=1), topk_current_beam_indices.shape
)
topk_beam_indices = tf.tensor_scatter_nd_update(
tensor=topk_running_beam_indices,
indices=update_indices,
updates=tf.reshape(batch_modified_indices, [batch_size * beams_to_keep]),
)
# 4. Check which sequences have ended
# Update current sequences: Did the top `num_beams` sequences reach an end marker?
# To prevent these just finished sequences from being added to the current sequences
# set of active beam search sequences, set their log probs to a very large negative value.
if eos_token_id is None:
eos_in_next_token = tf.zeros(topk_sequences[:, :, cur_len].shape, dtype=tf.bool)
else:
eos_in_next_token = tf.math.reduce_any(
tf.equal(
tf.broadcast_to(
topk_sequences[:, :, cur_len], [len(eos_token_id)] + topk_sequences[:, :, cur_len].shape
),
tf.expand_dims(tf.expand_dims(eos_token_id, -1), -1),
),
axis=0,
)
did_topk_just_finished = eos_in_next_token & tf.broadcast_to(
tf.concat((tf.ones((num_beams), dtype=tf.bool), tf.zeros((num_beams), dtype=tf.bool)), axis=0),
shape_list(eos_in_next_token),
)
# non-top `num_beams` eos tokens can't be used to finish a beam, but the others can't be used in the next
# running sentences either
running_topk_log_probs = topk_log_probs + tf.cast(eos_in_next_token, tf.float32) * -1.0e9
# 5. Get running sequences scores for next
# Determine the top k beam indices (from top 2*k beams) from log probs and gather top k beams
# (from top 2*k beams).
next_topk_indices = tf.math.top_k(running_topk_log_probs, k=num_beams)[1]
next_running_sequences, next_running_scores, next_running_beam_indices = self._gather_beams(
[topk_sequences, running_topk_log_probs, topk_beam_indices], next_topk_indices
)
# 6. Process topk logits
# Further process log probs:
# - add length penalty
# - make sure no scores can be added anymore if beam is full
# - make sure still running sequences cannot be chosen as finalized beam
topk_log_probs = topk_log_probs / (tf.cast(cur_len, dtype=tf.float32) ** length_penalty)
beams_in_batch_are_full = tf.broadcast_to(
tf.math.reduce_all(is_sent_finished, axis=-1, keepdims=True), shape_list(did_topk_just_finished)
) & (early_stopping is True)
add_penalty = ~did_topk_just_finished | beams_in_batch_are_full
topk_log_probs += tf.cast(add_penalty, tf.float32) * -1.0e9
# 7. Get scores, sequences, is sentence finished for next.
# Combine sequences, scores, and flags along the beam dimension and compare new finished sequence scores
# to existing finished scores and select the best from the new set of beams
merged_sequences = tf.concat([sequences, topk_sequences], axis=1)
merged_scores = tf.concat([scores, topk_log_probs], axis=1)
merged_beams = tf.concat([beam_indices, topk_beam_indices], axis=1)
merged_is_sent_finished = tf.concat([is_sent_finished, did_topk_just_finished], axis=1)
topk_merged_indices = tf.math.top_k(merged_scores, k=num_beams)[1]
next_sequences, next_scores, next_beam_indices, next_is_sent_finished = self._gather_beams(
[merged_sequences, merged_scores, merged_beams, merged_is_sent_finished], topk_merged_indices
)
# 8. Prepare data for the next iteration
# Determine the top k beam indices from the original set of all beams. With these, gather the top k
# beam-associated caches.
cur_len = cur_len + 1
if "past_key_values" in model_outputs:
cache = tf.nest.map_structure(
lambda tensor: unflatten_beam_dim(tensor, num_beams, batch_axis=cache_batch_axis),
model_outputs.past_key_values,
)
next_running_indices = self._gather_beams(topk_current_beam_indices, next_topk_indices)
next_cache = self._gather_beams(cache, next_running_indices, batch_axis=cache_batch_axis)
model_outputs["past_key_values"] = tf.nest.map_structure(
lambda tensor: flatten_beam_dim(tensor, batch_axis=cache_batch_axis), next_cache
)
if use_xla:
next_model_kwargs = self._update_model_kwargs_for_xla_generation(
model_outputs=model_outputs,
model_kwargs=model_kwargs,
cur_len=cur_len,
max_length=max_length,
batch_size=(batch_size * num_beams),
is_encoder_decoder=self.config.is_encoder_decoder,
batch_axis=cache_batch_axis,
)
else:
next_model_kwargs = self._update_model_kwargs_for_generation(
model_outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder
)
# if we don't cache past_key_values key values we need the whole input
if model_kwargs.get("past_key_values", None) is None:
# let's throw out `past_key_values` since we don't want `None` tensors
model_kwargs.pop("past_key_values", None)
return (
cur_len,
next_running_sequences,
next_running_scores,
next_running_beam_indices,
next_sequences,
next_scores,
next_beam_indices,
next_is_sent_finished,
next_model_kwargs,
)
# 5. run generation
# 1st generation step has to be run before to initialize `past_key_values` (if active)
(
cur_len,
running_sequences,
running_scores,
running_beam_indices,
sequences,
scores,
beam_indices,
is_sent_finished,
model_kwargs,
) = beam_search_body_fn(
cur_len,
running_sequences,
running_scores,
running_beam_indices,
sequences,
scores,
beam_indices,
is_sent_finished,
model_kwargs,
)
# 2-to-n generation steps can then be run in autoregressive fashion (only in case 1st generation step does
# NOT yield EOS token though)
if beam_search_cond_fn(
cur_len,
running_sequences,
running_scores,
running_beam_indices,
sequences,
scores,
beam_indices,
is_sent_finished,
model_kwargs,
):
maximum_iterations = max_length - cur_len
(
cur_len,
running_sequences,
running_scores,
running_beam_indices,
sequences,
scores,
beam_indices,
is_sent_finished,
_,
) = tf.while_loop(
beam_search_cond_fn,
beam_search_body_fn,
(
cur_len,
running_sequences,
running_scores,
running_beam_indices,
sequences,
scores,
beam_indices,
is_sent_finished,
model_kwargs,
),
maximum_iterations=maximum_iterations,
)
# 6. prepare outputs
# Account for the edge-case where there are no finished sequences for a particular batch item. If so, return
# running sequences for that batch item.
none_finished = tf.math.reduce_any(is_sent_finished, axis=1)
sequences = tf.where(none_finished[:, None, None], sequences, running_sequences)
beam_indices = tf.where(none_finished[:, None, None], beam_indices, running_beam_indices)
# Apply the length penalty so that running scores match the finalized scores if they are used
running_scores = running_scores / (tf.cast(cur_len, dtype=tf.float32) ** length_penalty)
scores = tf.where(none_finished[:, None], scores, running_scores)
# Take best beams for each batch (the score is sorted in descending order)
sequences = flatten_beam_dim(sequences[:, :num_return_sequences, :])
scores = flatten_beam_dim(scores[:, :num_return_sequences])
beam_indices = flatten_beam_dim(beam_indices[:, :num_return_sequences, :])
if not use_xla:
# Cut for backward compatibility
sequences = sequences[:, :cur_len]
beam_indices = beam_indices[:, :cur_len]
if return_dict_in_generate:
if self.config.is_encoder_decoder:
# if model is an encoder-decoder, retrieve encoder attention weights and hidden states
encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None
encoder_hidden_states = (
model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None
)
output_cls = TFBeamSampleEncoderDecoderOutput if do_sample else TFBeamSearchEncoderDecoderOutput
return output_cls(
sequences=sequences,
sequences_scores=scores,
scores=all_scores,
beam_indices=beam_indices,
encoder_attentions=encoder_attentions,
encoder_hidden_states=encoder_hidden_states,
decoder_attentions=decoder_attentions,
cross_attentions=cross_attentions,
decoder_hidden_states=decoder_hidden_states,
)
else:
output_cls = TFBeamSampleDecoderOnlyOutput if do_sample else TFBeamSearchDecoderOnlyOutput
return output_cls(
sequences=sequences,
sequences_scores=scores,
scores=all_scores,
beam_indices=beam_indices,
attentions=decoder_attentions,
hidden_states=decoder_hidden_states,
)
else:
return sequences
def contrastive_search(
self,
input_ids: tf.Tensor,
top_k: Optional[int] = 1,
penalty_alpha: Optional[float] = 0,
logits_processor: Optional[TFLogitsProcessorList] = None,
logits_warper: Optional[TFLogitsProcessorList] = None,
max_length: Optional[int] = None,
pad_token_id: Optional[int] = None,
eos_token_id: Optional[int] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
output_scores: Optional[bool] = None,
return_dict_in_generate: Optional[bool] = None,
**model_kwargs,
) -> Union[TFContrastiveSearchOutput, tf.Tensor]:
r"""
Generates sequences of token ids for models with a language modeling head using **contrastive search** and can
be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models.
Parameters:
input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
The sequence used as a prompt for the generation.
top_k (`int`, *optional*, defaults to 1):
The size of the candidate set that is used to re-rank for contrastive search
penalty_alpha (`float`, *optional*, defaults to 0):
The degeneration penalty for contrastive search; activate when it is larger than 0
logits_processor (`TFLogitsProcessorList`, *optional*):
An instance of [`TFLogitsProcessorList`]. List of instances of class derived from [`TFLogitsProcessor`]
used to modify the prediction scores of the language modeling head applied at each generation step.
logits_warper (`TFLogitsProcessorList`, *optional*):
An instance of [`TFLogitsProcessorList`]. List of instances of class derived from [`TFLogitsWarper`]
used to warp the prediction score distribution of the language modeling head applied before multinomial
sampling at each generation step.
max_length (`int`, *optional*, defaults to 20):
The maximum length of the sequence to be generated.
pad_token_id (`int`, *optional*):
The id of the *padding* token.
eos_token_id (`Union[int, List[int]]`, *optional*):
The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
output_attentions (`bool`, *optional*, defaults to `False`):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more details.
output_hidden_states (`bool`, *optional*, defaults to `False`):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more details.
output_scores (`bool`, *optional*, defaults to `False`):
Whether or not to return the prediction scores. See `scores` under returned tensors for more details.
return_dict_in_generate (`bool`, *optional*, defaults to `False`):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
model_kwargs:
Additional model specific keyword arguments will be forwarded to the `call` function of the model. If
model is an encoder-decoder model the kwargs should include `encoder_outputs`.
Return:
[`~generation.TFContrastiveSearchDecoderOnlyOutput`],
[`~generation.TFContrastiveSearchEncoderDecoderOutput`] or `tf.Tensor`: A `tf.Tensor` containing the
generated tokens (default behaviour) or a [`~generation.TFContrastiveySearchDecoderOnlyOutput`] if
`model.config.is_encoder_decoder=False` and `return_dict_in_generate=True` or a
[`~generation.TFContrastiveSearchEncoderDecoderOutput`] if `model.config.is_encoder_decoder=True`.
Examples:
```python
>>> from transformers import AutoTokenizer, TFAutoModelForCausalLM
>>> tokenizer = AutoTokenizer.from_pretrained("facebook/opt-125m")
>>> model = TFAutoModelForCausalLM.from_pretrained("facebook/opt-125m")
>>> # set pad_token_id to eos_token_id because OPT does not have a PAD token
>>> model.config.pad_token_id = model.config.eos_token_id
>>> input_prompt = "DeepMind Company is"
>>> input_ids = tokenizer(input_prompt, return_tensors="tf")
>>> outputs = model.contrastive_search(**input_ids, penalty_alpha=0.6, top_k=4, max_length=64)
>>> tokenizer.batch_decode(outputs, skip_special_tokens=True)
['DeepMind Company is a company that focuses on the development and commercialization of artificial intelligence (AI). DeepMind’s mission is to help people understand and solve problems that are difficult to solve in the world today.\n\nIn this post, we talk about the benefits of deep learning in business and how it']
```"""
def gather_best_candidate(nested, selected_idx_stacked, batch_axis=0):
"""Gathers the slices indexed by selected_idx_stacked from a potentially nested structure of tensors."""
def gather_fn(tensor):
gathered_tensor = tf.gather(params=tensor, indices=selected_idx_stacked, axis=batch_axis)
return gathered_tensor
return tf.nest.map_structure(gather_fn, nested)
# 1. init greedy_search values
logits_processor = logits_processor if logits_processor is not None else TFLogitsProcessorList()
logits_warper = logits_warper if logits_warper is not None else TFLogitsProcessorList()
max_length = max_length if max_length is not None else self.generation_config.max_length
pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id
eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id
if isinstance(eos_token_id, int):
eos_token_id = [eos_token_id]
output_scores = output_scores if output_scores is not None else self.generation_config.output_scores
output_attentions = (
output_attentions if output_attentions is not None else self.generation_config.output_attentions
)
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states
)
return_dict_in_generate = (
return_dict_in_generate
if return_dict_in_generate is not None
else self.generation_config.return_dict_in_generate
)
use_cache = True # In contrastive search, we always use cache
model_kwargs.pop("use_cache", None)
use_xla = not tf.executing_eagerly()
# TODO (Joao): fix cache format or find programatic way to detect cache index
# GPT2 and other models has a slightly different cache structure, with a different batch axis
model_name = str(self.decoder) if "EncoderDecoder" in str(self) else str(self)
cache_batch_axis = 1 if any([model_prefix in model_name for model_prefix in ("TFGPT2", "TFCTRL")]) else 0
# 2. init `attentions`, `hidden_states`, and `scores` tuples
scores = [] if (return_dict_in_generate and output_scores) else None
decoder_attentions = [] if (return_dict_in_generate and output_attentions) else None
cross_attentions = [] if (return_dict_in_generate and output_attentions) else None
decoder_hidden_states = [] if (return_dict_in_generate and output_hidden_states) else None
# 3. init tensors to use for "xla-compileable" generate function
batch_size, cur_len = shape_list(input_ids)
# initialize `generated` (`input_ids` padded with `pad_token_id`), `finished_sequences`
input_ids_padding = tf.ones((batch_size, max_length - cur_len), dtype=tf.int32) * (pad_token_id or 0)
generated = tf.concat([input_ids, input_ids_padding], axis=-1)
finished_sequences = tf.zeros((batch_size,), dtype=tf.bool)
# 4. define "xla-compile-able" stop-condition and auto-regressive function
# define condition fn
def contrastive_search_cond_fn(
generated, finished_sequences, cur_len, model_kwargs, next_step_cached_variables
):
"""state termination condition fn."""
return ~tf.reduce_all(finished_sequences)
# define condition fn
def contrastive_search_body_fn(
generated, finished_sequences, cur_len, model_kwargs, next_step_cached_variables
):
"""state update fn."""
# if the first step in the loop, encode all the prefix and obtain: (1) past_key_values;
# (2) last_hidden_states; (3) logit_for_next_step; (4) update model kwargs for the next step
if model_kwargs.get("past_key_values") is None:
# prepare inputs
model_inputs = self.prepare_inputs_for_generation(
generated[:, :cur_len], use_cache=use_cache, **model_kwargs
)
# encode the given prefix and prepare model inputs; encoder-decoder model process the prefix and save
# the `encoder_outputs`
outputs = self(
**model_inputs, return_dict=True, output_hidden_states=True, output_attentions=output_attentions
)
# last decoder hidden states will be used to compute the degeneration penalty (cosine similarity with
# previous tokens)
if self.config.is_encoder_decoder:
last_hidden_states = outputs.decoder_hidden_states[-1]
else:
last_hidden_states = outputs.hidden_states[-1]
# XLA: last_hidden_states normally grows at each step, but in XLA it is padded so as to be used across
# iterations (with fixed shapes)
if use_xla:
last_hidden_states = tf.pad(last_hidden_states, [[0, 0], [0, max_length - cur_len], [0, 0]])
# next logit for contrastive search to select top-k candidate tokens
logit_for_next_step = outputs.logits[:, -1, :]
if use_xla:
model_kwargs = self._update_model_kwargs_for_xla_generation(
model_outputs=outputs,
model_kwargs=model_kwargs,
cur_len=cur_len,
max_length=max_length,
batch_size=batch_size,
is_encoder_decoder=self.config.is_encoder_decoder,
batch_axis=cache_batch_axis,
)
else:
model_kwargs = self._update_model_kwargs_for_generation(
outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder
)
# Expands model inputs top_k times, for batched forward passes (akin to beam search).
_, model_kwargs = self._expand_inputs_for_generation(
expand_size=top_k, is_encoder_decoder=self.config.is_encoder_decoder, **model_kwargs
)
past_key_values = model_kwargs.get("past_key_values")
if past_key_values is None:
raise ValueError(
f"{self.__class__.__name__} does not support caching and therefore **can't** be used "
"for contrastive search."
)
elif (
not isinstance(past_key_values[0], (tuple, tf.Tensor))
or past_key_values[0][0].shape[0] != batch_size
):
raise ValueError(
f"{self.__class__.__name__} does not have a standard cache format and therefore **can't** be "
"used for contrastive search without further modifications."
)
else:
logit_for_next_step = next_step_cached_variables["logit_for_next_step"]
last_hidden_states = next_step_cached_variables["last_hidden_states"]
outputs = next_step_cached_variables["outputs"]
# contrastive_search main logic start:
# contrastive search decoding consists of two steps: (1) candidate tokens recall; (2) candidate re-rank by
# degeneration penalty
logit_for_next_step = logits_processor(generated, logit_for_next_step, cur_len)
logit_for_next_step = logits_warper(generated, logit_for_next_step, cur_len)
next_probs = stable_softmax(logit_for_next_step, axis=-1)
top_k_probs, top_k_ids = tf.math.top_k(next_probs, k=top_k)
# Store scores, attentions and hidden_states when required
if not use_xla and return_dict_in_generate:
if output_scores:
scores.append(logit_for_next_step)
if output_attentions and self.config.is_encoder_decoder:
decoder_attentions.append(outputs.decoder_attentions)
elif output_attentions and not self.config.is_encoder_decoder:
decoder_attentions.append(outputs.attentions)
if self.config.is_encoder_decoder:
cross_attentions.append(outputs.cross_attentions)
if output_hidden_states and self.config.is_encoder_decoder:
decoder_hidden_states.append(outputs.decoder_hidden_states)
elif output_hidden_states and self.config.is_encoder_decoder:
decoder_hidden_states.append(outputs.hidden_states)
# Replicates the new past_key_values to match the `top_k` candidates
model_kwargs["past_key_values"] = tf.nest.map_structure(
lambda tensor: tf.repeat(tensor, top_k, axis=cache_batch_axis), model_kwargs["past_key_values"]
)
# compute the candidate tokens by the language model and collects their hidden_states
next_model_inputs = self.prepare_inputs_for_generation(
tf.reshape(top_k_ids, [-1, 1]), use_cache=use_cache, **model_kwargs
)
outputs = self(
**next_model_inputs, return_dict=True, output_hidden_states=True, output_attentions=output_attentions
)
next_past_key_values = self._extract_past_from_model_output(outputs)
logits = outputs.logits[:, -1, :]
# name is different for encoder-decoder and decoder-only models
if self.config.is_encoder_decoder:
next_hidden = outputs.decoder_hidden_states[-1]
full_hidden_states = outputs.decoder_hidden_states
else:
next_hidden = outputs.hidden_states[-1]
full_hidden_states = outputs.hidden_states
context_hidden = tf.repeat(last_hidden_states[:, :cur_len, :], top_k, axis=0)
# compute the degeneration penalty and re-rank the candidates based on the degeneration penalty and the
# model confidence
selected_idx = _ranking_fast(context_hidden, next_hidden, top_k_probs, penalty_alpha, top_k)
# converts indices to a dimension of top_k to the stacked top_k * batch_size dimension, for indexing
# without a need to reshape on tensors that have these two dimensions stacked
selected_idx_stacked = selected_idx + tf.range(selected_idx.shape[0], dtype=tf.int64) * top_k
# prepare for the next step: (1) next token_id; (2) past_key_values; (3) last_hidden_states for computing
# the degeneration penalty; (4) logits for selecting next top-k candidates; (5) selected tokens scores
# (model confidence minus degeneration penalty); (6) decoder hidden_states
next_tokens = tf.gather(top_k_ids, selected_idx, axis=1, batch_dims=1)
next_hidden = gather_best_candidate(next_hidden, selected_idx_stacked)
# XLA: last_hidden_states normally grows at each step, but in XLA it is padded so as to be used across
# iterations (with fixed shapes)
if use_xla:
last_hidden_states = dynamic_update_slice(last_hidden_states, next_hidden, [0, cur_len, 0])
else:
last_hidden_states = tf.concat([last_hidden_states, next_hidden], axis=1)
next_decoder_hidden_states = gather_best_candidate(full_hidden_states, selected_idx_stacked)
next_past_key_values = gather_best_candidate(
next_past_key_values, selected_idx_stacked, batch_axis=cache_batch_axis
)
logit_for_next_step = gather_best_candidate(logits, selected_idx_stacked)
# Rebuilds the relevant parts of the model output for the selected token, for use in the next iteration
if self.config.is_encoder_decoder:
next_step_cross_attentions = ()
next_step_decoder_attentions = ()
if output_attentions:
next_step_cross_attentions = gather_best_candidate(outputs.cross_attentions, selected_idx_stacked)
next_step_decoder_attentions = gather_best_candidate(
outputs.decoder_attentions, selected_idx_stacked
)
outputs = TFSeq2SeqLMOutput(
past_key_values=next_past_key_values,
decoder_hidden_states=next_decoder_hidden_states,
decoder_attentions=next_step_decoder_attentions or None,
cross_attentions=next_step_cross_attentions or None,
)
else:
next_step_attentions = ()
if output_attentions:
next_step_attentions = gather_best_candidate(outputs.attentions, selected_idx_stacked)
outputs = TFCausalLMOutputWithPast(
past_key_values=next_past_key_values,
hidden_states=next_decoder_hidden_states,
attentions=next_step_attentions or None,
)
# contrastive_search main logic end
if eos_token_id is not None:
if pad_token_id is None:
raise ValueError("If `eos_token_id` is defined, make sure that `pad_token_id` is defined.")
unfinished_seq = 1 - tf.cast(finished_sequences, tf.int32)
next_tokens = next_tokens * unfinished_seq + pad_token_id * (1 - unfinished_seq)
next_token_is_eos = tf.math.reduce_any(
tf.equal(
tf.broadcast_to(next_tokens, (len(eos_token_id), batch_size)), tf.expand_dims(eos_token_id, -1)
),
axis=0,
)
finished_sequences = finished_sequences | next_token_is_eos
# update `generated` and `cur_len`
update_indices = tf.stack([tf.range(batch_size), tf.broadcast_to(cur_len, [batch_size])], axis=-1)
generated = tf.tensor_scatter_nd_update(tensor=generated, indices=update_indices, updates=next_tokens)
cur_len += 1
if use_xla:
# NOTE: 1) relative to other generation strategies, contrastive search is always running forward
# passes one step ahead -- hence the `cur_len=cur_len + 1`; 2) the attention mask here is expanded from
# [batch_size, ...] to [batch_size*top_k, ...] -- hence the `batch_size=batch_size * top_k`
model_kwargs = self._update_model_kwargs_for_xla_generation(
model_outputs=outputs,
model_kwargs=model_kwargs,
cur_len=cur_len + 1,
max_length=max_length,
batch_size=batch_size * top_k,
is_encoder_decoder=self.config.is_encoder_decoder,
batch_axis=cache_batch_axis,
)
else:
model_kwargs = self._update_model_kwargs_for_generation(
outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder
)
next_step_cached_variables = {
"logit_for_next_step": logit_for_next_step,
"last_hidden_states": last_hidden_states,
"outputs": outputs,
}
return generated, finished_sequences, cur_len, model_kwargs, next_step_cached_variables
# 5. run generation
# 1st generation step has to be run before to initialize `past_key_values`
generated, finished_sequences, cur_len, model_kwargs, next_step_cached_variables = contrastive_search_body_fn(
generated, finished_sequences, cur_len, model_kwargs, None
)
# 2-to-n generation steps can then be run in autoregressive fashion
# only in case 1st generation step does NOT yield EOS token though
if contrastive_search_cond_fn(
generated, finished_sequences, cur_len, model_kwargs, next_step_cached_variables
):
maximum_iterations = max_length - cur_len
(
generated,
_,
cur_len,
_,
_,
) = tf.while_loop(
contrastive_search_cond_fn,
contrastive_search_body_fn,
(generated, finished_sequences, cur_len, model_kwargs, next_step_cached_variables),
maximum_iterations=maximum_iterations,
)
# 6. prepare outputs
if not use_xla:
# cut for backward compatibility
generated = generated[:, :cur_len]
if return_dict_in_generate:
if self.config.is_encoder_decoder:
# if model is an encoder-decoder, retrieve encoder attention weights
# and hidden states
encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None
encoder_hidden_states = (
model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None
)
scores = tuple(scores) if scores is not None else None
decoder_attentions = tuple(decoder_attentions) if decoder_attentions is not None else None
cross_attentions = tuple(cross_attentions) if cross_attentions is not None else None
decoder_hidden_states = tuple(decoder_hidden_states) if decoder_hidden_states is not None else None
return TFContrastiveSearchEncoderDecoderOutput(
sequences=generated,
scores=scores,
encoder_attentions=encoder_attentions,
encoder_hidden_states=encoder_hidden_states,
decoder_attentions=decoder_attentions,
cross_attentions=cross_attentions,
decoder_hidden_states=decoder_hidden_states,
)
else:
return TFContrastiveSearchDecoderOnlyOutput(
sequences=generated,
scores=scores,
attentions=decoder_attentions,
hidden_states=decoder_hidden_states,
)
else:
return generated
def tf_top_k_top_p_filtering(logits, top_k=0, top_p=1.0, filter_value=-float("Inf"), min_tokens_to_keep=1):
"""
Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
Args:
logits: logits distribution shape (batch size, vocabulary size)
top_k (`int`, *optional*, defaults to 0):
If > 0, only keep the top k tokens with highest probability (top-k filtering)
top_p (`float`, *optional*, defaults to 1.0):
If < 1.0, only keep the top tokens with cumulative probability >= top_p (nucleus filtering). Nucleus
filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
min_tokens_to_keep (`int`, *optional*, defaults to 1):
Minimumber of tokens we keep per batch example in the output.
From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
"""
logits_shape = shape_list(logits)
if top_k > 0:
top_k = min(max(top_k, min_tokens_to_keep), logits_shape[-1]) # Safety check
# Remove all tokens with a probability less than the last token of the top-k
indices_to_remove = logits < tf.math.top_k(logits, k=top_k)[0][..., -1, None]
logits = tf.where(indices_to_remove, filter_value, logits)
if top_p < 1.0:
sorted_indices = tf.argsort(logits, direction="DESCENDING")
sorted_logits = tf.gather(
logits, sorted_indices, axis=-1, batch_dims=1
) # expects logits to be of dim (batch_size, vocab_size)
cumulative_probs = tf.math.cumsum(stable_softmax(sorted_logits, axis=-1), axis=-1)
# Remove tokens with cumulative probability above the threshold (token with 0 are kept)
sorted_indices_to_remove = cumulative_probs > top_p
if min_tokens_to_keep > 1:
# Keep at least min_tokens_to_keep (set to min_tokens_to_keep-1 because we add the first one below)
sorted_indices_to_remove = tf.concat(
[
tf.zeros_like(sorted_indices_to_remove[:, :min_tokens_to_keep]),
sorted_indices_to_remove[:, min_tokens_to_keep:],
],
-1,
)
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove = tf.concat(
[tf.zeros_like(sorted_indices_to_remove[:, :1]), sorted_indices_to_remove[:, :-1]],
-1,
)
# scatter sorted tensors to original indexing
indices_to_remove = scatter_values_on_batch_indices(sorted_indices_to_remove, sorted_indices)
logits = tf.where(indices_to_remove, filter_value, logits)
return logits
def scatter_values_on_batch_indices(values, batch_indices):
shape = shape_list(batch_indices)
# broadcast batch dim to shape
broad_casted_batch_dims = tf.reshape(tf.broadcast_to(tf.expand_dims(tf.range(shape[0]), axis=-1), shape), [1, -1])
# transform batch_indices to pair_indices
pair_indices = tf.transpose(tf.concat([broad_casted_batch_dims, tf.reshape(batch_indices, [1, -1])], 0))
# scatter values to pair indices
return tf.scatter_nd(pair_indices, tf.reshape(values, [-1]), shape)
def sample_without_replacement(logits, num_samples):
"""
categorical sampling without replacement is currently not implemented the gumbel-max trick will do for now see
https://github.com/tensorflow/tensorflow/issues/9260 for more info
"""
z = -tf.math.log(-tf.math.log(tf.random.uniform(shape_list(logits), 0, 1)))
_, indices = tf.nn.top_k(logits + z, num_samples)
return indices
def _ranking_fast(
context_hidden: tf.Tensor,
next_hidden: tf.Tensor,
next_top_k_probs: tf.Tensor,
alpha: float,
beam_width: int,
) -> tf.Tensor:
"""
Reranks the top_k candidates based on a degeneration penalty (cosine similarity with previous tokens), as described
in the paper "A Contrastive Framework for Neural Text Generation". Returns the index of the best candidate for each
row in the batch.
"""
norm_context_hidden = context_hidden / tf.norm(context_hidden, axis=2, keepdims=True)
norm_next_hidden = next_hidden / tf.norm(next_hidden, axis=2, keepdims=True)
cosine_matrix = tf.squeeze(tf.linalg.matmul(norm_context_hidden, norm_next_hidden, transpose_b=True), axis=-1)
degeneration_penalty = tf.reduce_max(cosine_matrix, axis=-1)
next_top_k_probs = tf.reshape(next_top_k_probs, shape=[-1])
contrastive_score = (1.0 - alpha) * next_top_k_probs - alpha * degeneration_penalty
contrastive_score = tf.reshape(contrastive_score, shape=[-1, beam_width])
selected_idx = tf.argmax(contrastive_score, axis=1)
return selected_idx
|
27182812/ChatGLM-LLaMA-chinese-insturct | 5,510 | src/transformers/generation/stopping_criteria.py | import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings
STOPPING_CRITERIA_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`BertTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs:
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
"""
class StoppingCriteria(ABC):
"""Abstract base class for all stopping criteria that can be applied during generation."""
@add_start_docstrings(STOPPING_CRITERIA_INPUTS_DOCSTRING)
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
raise NotImplementedError("StoppingCriteria needs to be subclassed")
class MaxLengthCriteria(StoppingCriteria):
"""
This class can be used to stop generation whenever the full generated number of tokens exceeds `max_length`. Keep
in mind for decoder-only type of transformers, this will include the initial prompted tokens.
Args:
max_length (`int`):
The maximum length that the output sequence can have in number of tokens.
"""
def __init__(self, max_length: int):
self.max_length = max_length
@add_start_docstrings(STOPPING_CRITERIA_INPUTS_DOCSTRING)
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
return input_ids.shape[-1] >= self.max_length
class MaxNewTokensCriteria(StoppingCriteria):
"""
This class can be used to stop generation whenever the generated number of tokens exceeds `max_new_tokens`. Keep in
mind for decoder-only type of transformers, this will **not** include the initial prompted tokens. This is very
close to `MaxLengthCriteria` but ignores the number of initial tokens.
Args:
start_length (`int`):
The number of initial tokens.
max_new_tokens (`int`):
The maximum number of tokens to generate.
"""
def __init__(self, start_length: int, max_new_tokens: int):
warnings.warn(
"The class `MaxNewTokensCriteria` is deprecated. "
f"Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` "
"with `max_length = start_length + max_new_tokens` instead.",
FutureWarning,
)
self.start_length = start_length
self.max_new_tokens = max_new_tokens
self.max_length = start_length + max_new_tokens
@add_start_docstrings(STOPPING_CRITERIA_INPUTS_DOCSTRING)
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
return input_ids.shape[-1] >= self.max_length
class MaxTimeCriteria(StoppingCriteria):
"""
This class can be used to stop generation whenever the full generation exceeds some amount of time. By default, the
time will start being counted when you initialize this function. You can override this by passing an
`initial_time`.
Args:
max_time (`float`):
The maximum allowed time in seconds for the generation.
initial_time (`float`, *optional*, defaults to `time.time()`):
The start of the generation allowed time.
"""
def __init__(self, max_time: float, initial_timestamp: Optional[float] = None):
self.max_time = max_time
self.initial_timestamp = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(STOPPING_CRITERIA_INPUTS_DOCSTRING)
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
return time.time() - self.initial_timestamp > self.max_time
class StoppingCriteriaList(list):
@add_start_docstrings(STOPPING_CRITERIA_INPUTS_DOCSTRING)
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
return any(criteria(input_ids, scores) for criteria in self)
@property
def max_length(self) -> Optional[int]:
for stopping_criterium in self:
if isinstance(stopping_criterium, MaxLengthCriteria):
return stopping_criterium.max_length
elif isinstance(stopping_criterium, MaxNewTokensCriteria):
return stopping_criterium.max_length
return None
def validate_stopping_criteria(stopping_criteria: StoppingCriteriaList, max_length: int) -> StoppingCriteriaList:
stopping_max_length = stopping_criteria.max_length
new_stopping_criteria = deepcopy(stopping_criteria)
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter", UserWarning)
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=max_length))
return new_stopping_criteria
|
233zzh/TitanDataOperationSystem | 20,228 | 代码/web代码/titanApp/src/main/resources/static/src/assets/extra-libs/flot.tooltip/js/jquery.flot.tooltip.source.js | (function ($) {
// plugin options, default values
var defaultOptions = {
tooltip: {
show: false,
cssClass: "flotTip",
content: "%s | X: %x | Y: %y",
// allowed templates are:
// %s -> series label,
// %c -> series color,
// %lx -> x axis label (requires flot-axislabels plugin https://github.com/markrcote/flot-axislabels),
// %ly -> y axis label (requires flot-axislabels plugin https://github.com/markrcote/flot-axislabels),
// %x -> X value,
// %y -> Y value,
// %x.2 -> precision of X value,
// %p -> percent
xDateFormat: null,
yDateFormat: null,
monthNames: null,
dayNames: null,
shifts: {
x: 10,
y: 20
},
defaultTheme: true,
lines: false,
// callbacks
onHover: function (flotItem, $tooltipEl) {},
$compat: false
}
};
// dummy default options object for legacy code (<0.8.5) - is deleted later
defaultOptions.tooltipOpts = defaultOptions.tooltip;
// object
var FlotTooltip = function (plot) {
// variables
this.tipPosition = {x: 0, y: 0};
this.init(plot);
};
// main plugin function
FlotTooltip.prototype.init = function (plot) {
var that = this;
// detect other flot plugins
var plotPluginsLength = $.plot.plugins.length;
this.plotPlugins = [];
if (plotPluginsLength) {
for (var p = 0; p < plotPluginsLength; p++) {
this.plotPlugins.push($.plot.plugins[p].name);
}
}
plot.hooks.bindEvents.push(function (plot, eventHolder) {
// get plot options
that.plotOptions = plot.getOptions();
// for legacy (<0.8.5) implementations
if (typeof(that.plotOptions.tooltip) === 'boolean') {
that.plotOptions.tooltipOpts.show = that.plotOptions.tooltip;
that.plotOptions.tooltip = that.plotOptions.tooltipOpts;
delete that.plotOptions.tooltipOpts;
}
// if not enabled return
if (that.plotOptions.tooltip.show === false || typeof that.plotOptions.tooltip.show === 'undefined') return;
// shortcut to access tooltip options
that.tooltipOptions = that.plotOptions.tooltip;
if (that.tooltipOptions.$compat) {
that.wfunc = 'width';
that.hfunc = 'height';
} else {
that.wfunc = 'innerWidth';
that.hfunc = 'innerHeight';
}
// create tooltip DOM element
var $tip = that.getDomElement();
// bind event
$( plot.getPlaceholder() ).bind("plothover", plothover);
$(eventHolder).bind('mousemove', mouseMove);
});
plot.hooks.shutdown.push(function (plot, eventHolder){
$(plot.getPlaceholder()).unbind("plothover", plothover);
$(eventHolder).unbind("mousemove", mouseMove);
});
function mouseMove(e){
var pos = {};
pos.x = e.pageX;
pos.y = e.pageY;
plot.setTooltipPosition(pos);
}
function plothover(event, pos, item) {
// Simple distance formula.
var lineDistance = function (p1x, p1y, p2x, p2y) {
return Math.sqrt((p2x - p1x) * (p2x - p1x) + (p2y - p1y) * (p2y - p1y));
};
// Here is some voodoo magic for determining the distance to a line form a given point {x, y}.
var dotLineLength = function (x, y, x0, y0, x1, y1, o) {
if (o && !(o =
function (x, y, x0, y0, x1, y1) {
if (typeof x0 !== 'undefined') return { x: x0, y: y };
else if (typeof y0 !== 'undefined') return { x: x, y: y0 };
var left,
tg = -1 / ((y1 - y0) / (x1 - x0));
return {
x: left = (x1 * (x * tg - y + y0) + x0 * (x * -tg + y - y1)) / (tg * (x1 - x0) + y0 - y1),
y: tg * left - tg * x + y
};
} (x, y, x0, y0, x1, y1),
o.x >= Math.min(x0, x1) && o.x <= Math.max(x0, x1) && o.y >= Math.min(y0, y1) && o.y <= Math.max(y0, y1))
) {
var l1 = lineDistance(x, y, x0, y0), l2 = lineDistance(x, y, x1, y1);
return l1 > l2 ? l2 : l1;
} else {
var a = y0 - y1, b = x1 - x0, c = x0 * y1 - y0 * x1;
return Math.abs(a * x + b * y + c) / Math.sqrt(a * a + b * b);
}
};
if (item) {
plot.showTooltip(item, pos);
} else if (that.plotOptions.series.lines.show && that.tooltipOptions.lines === true) {
var maxDistance = that.plotOptions.grid.mouseActiveRadius;
var closestTrace = {
distance: maxDistance + 1
};
$.each(plot.getData(), function (i, series) {
var xBeforeIndex = 0,
xAfterIndex = -1;
// Our search here assumes our data is sorted via the x-axis.
// TODO: Improve efficiency somehow - search smaller sets of data.
for (var j = 1; j < series.data.length; j++) {
if (series.data[j - 1][0] <= pos.x && series.data[j][0] >= pos.x) {
xBeforeIndex = j - 1;
xAfterIndex = j;
}
}
if (xAfterIndex === -1) {
plot.hideTooltip();
return;
}
var pointPrev = { x: series.data[xBeforeIndex][0], y: series.data[xBeforeIndex][1] },
pointNext = { x: series.data[xAfterIndex][0], y: series.data[xAfterIndex][1] };
var distToLine = dotLineLength(series.xaxis.p2c(pos.x), series.yaxis.p2c(pos.y), series.xaxis.p2c(pointPrev.x),
series.yaxis.p2c(pointPrev.y), series.xaxis.p2c(pointNext.x), series.yaxis.p2c(pointNext.y), false);
if (distToLine < closestTrace.distance) {
var closestIndex = lineDistance(pointPrev.x, pointPrev.y, pos.x, pos.y) <
lineDistance(pos.x, pos.y, pointNext.x, pointNext.y) ? xBeforeIndex : xAfterIndex;
var pointSize = series.datapoints.pointsize;
// Calculate the point on the line vertically closest to our cursor.
var pointOnLine = [
pos.x,
pointPrev.y + ((pointNext.y - pointPrev.y) * ((pos.x - pointPrev.x) / (pointNext.x - pointPrev.x)))
];
var item = {
datapoint: pointOnLine,
dataIndex: closestIndex,
series: series,
seriesIndex: i
};
closestTrace = {
distance: distToLine,
item: item
};
}
});
if (closestTrace.distance < maxDistance + 1)
plot.showTooltip(closestTrace.item, pos);
else
plot.hideTooltip();
} else {
plot.hideTooltip();
}
}
// Quick little function for setting the tooltip position.
plot.setTooltipPosition = function (pos) {
var $tip = that.getDomElement();
var totalTipWidth = $tip.outerWidth() + that.tooltipOptions.shifts.x;
var totalTipHeight = $tip.outerHeight() + that.tooltipOptions.shifts.y;
if ((pos.x - $(window).scrollLeft()) > ($(window)[that.wfunc]() - totalTipWidth)) {
pos.x -= totalTipWidth;
}
if ((pos.y - $(window).scrollTop()) > ($(window)[that.hfunc]() - totalTipHeight)) {
pos.y -= totalTipHeight;
}
that.tipPosition.x = pos.x;
that.tipPosition.y = pos.y;
};
// Quick little function for showing the tooltip.
plot.showTooltip = function (target, position) {
var $tip = that.getDomElement();
// convert tooltip content template to real tipText
var tipText = that.stringFormat(that.tooltipOptions.content, target);
if (tipText === '')
return;
$tip.html(tipText);
plot.setTooltipPosition({ x: position.pageX, y: position.pageY });
$tip.css({
left: that.tipPosition.x + that.tooltipOptions.shifts.x,
top: that.tipPosition.y + that.tooltipOptions.shifts.y
}).show();
// run callback
if (typeof that.tooltipOptions.onHover === 'function') {
that.tooltipOptions.onHover(target, $tip);
}
};
// Quick little function for hiding the tooltip.
plot.hideTooltip = function () {
that.getDomElement().hide().html('');
};
};
/**
* get or create tooltip DOM element
* @return jQuery object
*/
FlotTooltip.prototype.getDomElement = function () {
var $tip = $('.' + this.tooltipOptions.cssClass);
if( $tip.length === 0 ){
$tip = $('<div />').addClass(this.tooltipOptions.cssClass);
$tip.appendTo('body').hide().css({position: 'absolute'});
if(this.tooltipOptions.defaultTheme) {
$tip.css({
'background': '#fff',
'z-index': '1040',
'padding': '0.4em 0.6em',
'border-radius': '0.5em',
'font-size': '0.8em',
'border': '1px solid #111',
'display': 'none',
'white-space': 'nowrap'
});
}
}
return $tip;
};
/**
* core function, create tooltip content
* @param {string} content - template with tooltip content
* @param {object} item - Flot item
* @return {string} real tooltip content for current item
*/
FlotTooltip.prototype.stringFormat = function (content, item) {
var percentPattern = /%p\.{0,1}(\d{0,})/;
var seriesPattern = /%s/;
var colorPattern = /%c/;
var xLabelPattern = /%lx/; // requires flot-axislabels plugin https://github.com/markrcote/flot-axislabels, will be ignored if plugin isn't loaded
var yLabelPattern = /%ly/; // requires flot-axislabels plugin https://github.com/markrcote/flot-axislabels, will be ignored if plugin isn't loaded
var xPattern = /%x\.{0,1}(\d{0,})/;
var yPattern = /%y\.{0,1}(\d{0,})/;
var xPatternWithoutPrecision = "%x";
var yPatternWithoutPrecision = "%y";
var customTextPattern = "%ct";
var x, y, customText, p;
// for threshold plugin we need to read data from different place
if (typeof item.series.threshold !== "undefined") {
x = item.datapoint[0];
y = item.datapoint[1];
customText = item.datapoint[2];
} else if (typeof item.series.lines !== "undefined" && item.series.lines.steps) {
x = item.series.datapoints.points[item.dataIndex * 2];
y = item.series.datapoints.points[item.dataIndex * 2 + 1];
// TODO: where to find custom text in this variant?
customText = "";
} else {
x = item.series.data[item.dataIndex][0];
y = item.series.data[item.dataIndex][1];
customText = item.series.data[item.dataIndex][2];
}
// I think this is only in case of threshold plugin
if (item.series.label === null && item.series.originSeries) {
item.series.label = item.series.originSeries.label;
}
// if it is a function callback get the content string
if (typeof(content) === 'function') {
content = content(item.series.label, x, y, item);
}
// the case where the passed content is equal to false
if (typeof(content) === 'boolean' && !content) {
return '';
}
// percent match for pie charts and stacked percent
if (typeof (item.series.percent) !== 'undefined') {
p = item.series.percent;
} else if (typeof (item.series.percents) !== 'undefined') {
p = item.series.percents[item.dataIndex];
}
if (typeof p === 'number') {
content = this.adjustValPrecision(percentPattern, content, p);
}
// series match
if (typeof(item.series.label) !== 'undefined') {
content = content.replace(seriesPattern, item.series.label);
} else {
//remove %s if label is undefined
content = content.replace(seriesPattern, "");
}
// color match
if (typeof(item.series.color) !== 'undefined') {
content = content.replace(colorPattern, item.series.color);
} else {
//remove %s if color is undefined
content = content.replace(colorPattern, "");
}
// x axis label match
if (this.hasAxisLabel('xaxis', item)) {
content = content.replace(xLabelPattern, item.series.xaxis.options.axisLabel);
} else {
//remove %lx if axis label is undefined or axislabels plugin not present
content = content.replace(xLabelPattern, "");
}
// y axis label match
if (this.hasAxisLabel('yaxis', item)) {
content = content.replace(yLabelPattern, item.series.yaxis.options.axisLabel);
} else {
//remove %ly if axis label is undefined or axislabels plugin not present
content = content.replace(yLabelPattern, "");
}
// time mode axes with custom dateFormat
if (this.isTimeMode('xaxis', item) && this.isXDateFormat(item)) {
content = content.replace(xPattern, this.timestampToDate(x, this.tooltipOptions.xDateFormat, item.series.xaxis.options));
}
if (this.isTimeMode('yaxis', item) && this.isYDateFormat(item)) {
content = content.replace(yPattern, this.timestampToDate(y, this.tooltipOptions.yDateFormat, item.series.yaxis.options));
}
// set precision if defined
if (typeof x === 'number') {
content = this.adjustValPrecision(xPattern, content, x);
}
if (typeof y === 'number') {
content = this.adjustValPrecision(yPattern, content, y);
}
// change x from number to given label, if given
if (typeof item.series.xaxis.ticks !== 'undefined') {
var ticks;
if (this.hasRotatedXAxisTicks(item)) {
// xaxis.ticks will be an empty array if tickRotor is being used, but the values are available in rotatedTicks
ticks = 'rotatedTicks';
} else {
ticks = 'ticks';
}
// see https://github.com/krzysu/flot.tooltip/issues/65
var tickIndex = item.dataIndex + item.seriesIndex;
for (var index in item.series.xaxis[ticks]) {
if (item.series.xaxis[ticks].hasOwnProperty(tickIndex) && !this.isTimeMode('xaxis', item)) {
var valueX = (this.isCategoriesMode('xaxis', item)) ? item.series.xaxis[ticks][tickIndex].label : item.series.xaxis[ticks][tickIndex].v;
if (valueX === x) {
content = content.replace(xPattern, item.series.xaxis[ticks][tickIndex].label);
}
}
}
}
// change y from number to given label, if given
if (typeof item.series.yaxis.ticks !== 'undefined') {
for (var index in item.series.yaxis.ticks) {
if (item.series.yaxis.ticks.hasOwnProperty(index)) {
var valueY = (this.isCategoriesMode('yaxis', item)) ? item.series.yaxis.ticks[index].label : item.series.yaxis.ticks[index].v;
if (valueY === y) {
content = content.replace(yPattern, item.series.yaxis.ticks[index].label);
}
}
}
}
// if no value customization, use tickFormatter by default
if (typeof item.series.xaxis.tickFormatter !== 'undefined') {
//escape dollar
content = content.replace(xPatternWithoutPrecision, item.series.xaxis.tickFormatter(x, item.series.xaxis).replace(/\$/g, '$$'));
}
if (typeof item.series.yaxis.tickFormatter !== 'undefined') {
//escape dollar
content = content.replace(yPatternWithoutPrecision, item.series.yaxis.tickFormatter(y, item.series.yaxis).replace(/\$/g, '$$'));
}
if (customText)
content = content.replace(customTextPattern, customText);
return content;
};
// helpers just for readability
FlotTooltip.prototype.isTimeMode = function (axisName, item) {
return (typeof item.series[axisName].options.mode !== 'undefined' && item.series[axisName].options.mode === 'time');
};
FlotTooltip.prototype.isXDateFormat = function (item) {
return (typeof this.tooltipOptions.xDateFormat !== 'undefined' && this.tooltipOptions.xDateFormat !== null);
};
FlotTooltip.prototype.isYDateFormat = function (item) {
return (typeof this.tooltipOptions.yDateFormat !== 'undefined' && this.tooltipOptions.yDateFormat !== null);
};
FlotTooltip.prototype.isCategoriesMode = function (axisName, item) {
return (typeof item.series[axisName].options.mode !== 'undefined' && item.series[axisName].options.mode === 'categories');
};
//
FlotTooltip.prototype.timestampToDate = function (tmst, dateFormat, options) {
var theDate = $.plot.dateGenerator(tmst, options);
return $.plot.formatDate(theDate, dateFormat, this.tooltipOptions.monthNames, this.tooltipOptions.dayNames);
};
//
FlotTooltip.prototype.adjustValPrecision = function (pattern, content, value) {
var precision;
var matchResult = content.match(pattern);
if( matchResult !== null ) {
if(RegExp.$1 !== '') {
precision = RegExp.$1;
value = value.toFixed(precision);
// only replace content if precision exists, in other case use thickformater
content = content.replace(pattern, value);
}
}
return content;
};
// other plugins detection below
// check if flot-axislabels plugin (https://github.com/markrcote/flot-axislabels) is used and that an axis label is given
FlotTooltip.prototype.hasAxisLabel = function (axisName, item) {
return ($.inArray(this.plotPlugins, 'axisLabels') !== -1 && typeof item.series[axisName].options.axisLabel !== 'undefined' && item.series[axisName].options.axisLabel.length > 0);
};
// check whether flot-tickRotor, a plugin which allows rotation of X-axis ticks, is being used
FlotTooltip.prototype.hasRotatedXAxisTicks = function (item) {
return ($.inArray(this.plotPlugins, 'tickRotor') !== -1 && typeof item.series.xaxis.rotatedTicks !== 'undefined');
};
//
var init = function (plot) {
new FlotTooltip(plot);
};
// define Flot plugin
$.plot.plugins.push({
init: init,
options: defaultOptions,
name: 'tooltip',
version: '0.8.5'
});
})(jQuery);
|
27182812/ChatGLM-LLaMA-chinese-insturct | 9,308 | src/transformers/generation/__init__.py | # Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available
_import_structure = {"configuration_utils": ["GenerationConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["beam_constraints"] = [
"Constraint",
"ConstraintListState",
"DisjunctiveConstraint",
"PhrasalConstraint",
]
_import_structure["beam_search"] = [
"BeamHypotheses",
"BeamScorer",
"BeamSearchScorer",
"ConstrainedBeamSearchScorer",
]
_import_structure["logits_process"] = [
"EpsilonLogitsWarper",
"EtaLogitsWarper",
"ForcedBOSTokenLogitsProcessor",
"ForcedEOSTokenLogitsProcessor",
"HammingDiversityLogitsProcessor",
"InfNanRemoveLogitsProcessor",
"LogitsProcessor",
"LogitsProcessorList",
"LogitsWarper",
"MinLengthLogitsProcessor",
"MinNewTokensLengthLogitsProcessor",
"NoBadWordsLogitsProcessor",
"NoRepeatNGramLogitsProcessor",
"PrefixConstrainedLogitsProcessor",
"RepetitionPenaltyLogitsProcessor",
"EncoderRepetitionPenaltyLogitsProcessor",
"TemperatureLogitsWarper",
"TopKLogitsWarper",
"TopPLogitsWarper",
"TypicalLogitsWarper",
"EncoderNoRepeatNGramLogitsProcessor",
"ExponentialDecayLengthPenalty",
"LogitNormalization",
]
_import_structure["stopping_criteria"] = [
"MaxNewTokensCriteria",
"MaxLengthCriteria",
"MaxTimeCriteria",
"StoppingCriteria",
"StoppingCriteriaList",
"validate_stopping_criteria",
]
_import_structure["utils"] = [
"GenerationMixin",
"top_k_top_p_filtering",
"GreedySearchEncoderDecoderOutput",
"GreedySearchDecoderOnlyOutput",
"SampleEncoderDecoderOutput",
"SampleDecoderOnlyOutput",
"BeamSearchEncoderDecoderOutput",
"BeamSearchDecoderOnlyOutput",
"BeamSampleEncoderDecoderOutput",
"BeamSampleDecoderOnlyOutput",
"ContrastiveSearchEncoderDecoderOutput",
"ContrastiveSearchDecoderOnlyOutput",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["tf_logits_process"] = [
"TFForcedBOSTokenLogitsProcessor",
"TFForcedEOSTokenLogitsProcessor",
"TFLogitsProcessor",
"TFLogitsProcessorList",
"TFLogitsWarper",
"TFMinLengthLogitsProcessor",
"TFNoBadWordsLogitsProcessor",
"TFNoRepeatNGramLogitsProcessor",
"TFRepetitionPenaltyLogitsProcessor",
"TFTemperatureLogitsWarper",
"TFTopKLogitsWarper",
"TFTopPLogitsWarper",
"TFForceTokensLogitsProcessor",
"TFSuppressTokensAtBeginLogitsProcessor",
"TFSuppressTokensLogitsProcessor",
]
_import_structure["tf_utils"] = [
"TFGenerationMixin",
"tf_top_k_top_p_filtering",
"TFGreedySearchDecoderOnlyOutput",
"TFGreedySearchEncoderDecoderOutput",
"TFSampleEncoderDecoderOutput",
"TFSampleDecoderOnlyOutput",
"TFBeamSearchEncoderDecoderOutput",
"TFBeamSearchDecoderOnlyOutput",
"TFBeamSampleEncoderDecoderOutput",
"TFBeamSampleDecoderOnlyOutput",
"TFContrastiveSearchEncoderDecoderOutput",
"TFContrastiveSearchDecoderOnlyOutput",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["flax_logits_process"] = [
"FlaxForcedBOSTokenLogitsProcessor",
"FlaxForcedEOSTokenLogitsProcessor",
"FlaxLogitsProcessor",
"FlaxLogitsProcessorList",
"FlaxLogitsWarper",
"FlaxMinLengthLogitsProcessor",
"FlaxTemperatureLogitsWarper",
"FlaxTopKLogitsWarper",
"FlaxTopPLogitsWarper",
]
_import_structure["flax_utils"] = [
"FlaxGenerationMixin",
"FlaxGreedySearchOutput",
"FlaxSampleOutput",
"FlaxBeamSearchOutput",
]
if TYPE_CHECKING:
from .configuration_utils import GenerationConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .beam_constraints import Constraint, ConstraintListState, DisjunctiveConstraint, PhrasalConstraint
from .beam_search import BeamHypotheses, BeamScorer, BeamSearchScorer, ConstrainedBeamSearchScorer
from .logits_process import (
EncoderNoRepeatNGramLogitsProcessor,
EncoderRepetitionPenaltyLogitsProcessor,
EpsilonLogitsWarper,
EtaLogitsWarper,
ExponentialDecayLengthPenalty,
ForcedBOSTokenLogitsProcessor,
ForcedEOSTokenLogitsProcessor,
HammingDiversityLogitsProcessor,
InfNanRemoveLogitsProcessor,
LogitNormalization,
LogitsProcessor,
LogitsProcessorList,
LogitsWarper,
MinLengthLogitsProcessor,
MinNewTokensLengthLogitsProcessor,
NoBadWordsLogitsProcessor,
NoRepeatNGramLogitsProcessor,
PrefixConstrainedLogitsProcessor,
RepetitionPenaltyLogitsProcessor,
TemperatureLogitsWarper,
TopKLogitsWarper,
TopPLogitsWarper,
TypicalLogitsWarper,
)
from .stopping_criteria import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
from .utils import (
BeamSampleDecoderOnlyOutput,
BeamSampleEncoderDecoderOutput,
BeamSearchDecoderOnlyOutput,
BeamSearchEncoderDecoderOutput,
ContrastiveSearchDecoderOnlyOutput,
ContrastiveSearchEncoderDecoderOutput,
GenerationMixin,
GreedySearchDecoderOnlyOutput,
GreedySearchEncoderDecoderOutput,
SampleDecoderOnlyOutput,
SampleEncoderDecoderOutput,
top_k_top_p_filtering,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tf_logits_process import (
TFForcedBOSTokenLogitsProcessor,
TFForcedEOSTokenLogitsProcessor,
TFForceTokensLogitsProcessor,
TFLogitsProcessor,
TFLogitsProcessorList,
TFLogitsWarper,
TFMinLengthLogitsProcessor,
TFNoBadWordsLogitsProcessor,
TFNoRepeatNGramLogitsProcessor,
TFRepetitionPenaltyLogitsProcessor,
TFSuppressTokensAtBeginLogitsProcessor,
TFSuppressTokensLogitsProcessor,
TFTemperatureLogitsWarper,
TFTopKLogitsWarper,
TFTopPLogitsWarper,
)
from .tf_utils import (
TFBeamSampleDecoderOnlyOutput,
TFBeamSampleEncoderDecoderOutput,
TFBeamSearchDecoderOnlyOutput,
TFBeamSearchEncoderDecoderOutput,
TFContrastiveSearchDecoderOnlyOutput,
TFContrastiveSearchEncoderDecoderOutput,
TFGenerationMixin,
TFGreedySearchDecoderOnlyOutput,
TFGreedySearchEncoderDecoderOutput,
TFSampleDecoderOnlyOutput,
TFSampleEncoderDecoderOutput,
tf_top_k_top_p_filtering,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .flax_logits_process import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessor,
FlaxLogitsProcessorList,
FlaxLogitsWarper,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
from .flax_utils import FlaxBeamSearchOutput, FlaxGenerationMixin, FlaxGreedySearchOutput, FlaxSampleOutput
else:
import sys
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
27182812/ChatGLM-LLaMA-chinese-insturct | 44,108 | src/transformers/generation/logits_process.py | # coding=utf-8
# Copyright 2020 The HuggingFace Inc. team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import math
from typing import Callable, Iterable, List, Optional, Tuple, Union
import numpy as np
import torch
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
logger = get_logger(__name__)
LOGITS_PROCESSOR_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`BertTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
search or log softmax for each vocabulary token when using beam search
kwargs:
Additional logits processor specific kwargs.
Return:
`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
"""
class LogitsProcessor:
"""Abstract base class for all logit processors that can be applied during generation."""
@add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
"""Torch method for processing logits."""
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called."
)
class LogitsWarper:
"""Abstract base class for all logit warpers that can be applied during generation with multinomial sampling."""
@add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
"""Torch method for warping logits."""
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called."
)
class LogitsProcessorList(list):
"""
This class can be used to create a list of [`LogitsProcessor`] or [`LogitsWarper`] to subsequently process a
`scores` input tensor. This class inherits from list and adds a specific *__call__* method to apply each
[`LogitsProcessor`] or [`LogitsWarper`] to the inputs.
"""
@add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> torch.FloatTensor:
for processor in self:
function_args = inspect.signature(processor.__call__).parameters
if len(function_args) > 2:
if not all(arg in kwargs for arg in list(function_args.keys())[2:]):
raise ValueError(
f"Make sure that all the required parameters: {list(function_args.keys())} for "
f"{processor.__class__} are passed to the logits processor."
)
scores = processor(input_ids, scores, **kwargs)
else:
scores = processor(input_ids, scores)
return scores
class MinLengthLogitsProcessor(LogitsProcessor):
r"""
[`LogitsProcessor`] enforcing a min-length by setting EOS probability to 0.
Args:
min_length (`int`):
The minimum length below which the score of `eos_token_id` is set to `-float("Inf")`.
eos_token_id (`Union[int, List[int]]`):
The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
"""
def __init__(self, min_length: int, eos_token_id: Union[int, List[int]]):
if not isinstance(min_length, int) or min_length < 0:
raise ValueError(f"`min_length` has to be a positive integer, but is {min_length}")
if isinstance(eos_token_id, int):
eos_token_id = [eos_token_id]
if not all([isinstance(i, int) for i in eos_token_id]) or any([i < 0 for i in eos_token_id]):
raise ValueError(f"`eos_token_id` has to be a list of positive integers, but is {eos_token_id}")
self.min_length = min_length
self.eos_token_id = eos_token_id
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
cur_len = input_ids.shape[-1]
if cur_len < self.min_length:
for i in self.eos_token_id:
scores[:, i] = -float("inf")
return scores
class MinNewTokensLengthLogitsProcessor(LogitsProcessor):
r"""
[`LogitsProcessor`] enforcing a min-length of new tokens by setting EOS (End-Of-Sequence) token probability to 0.
Args:
prompt_length_to_skip (`int`):
The input tokens length.
min_new_tokens (`int`):
The minimum *new* tokens length below which the score of `eos_token_id` is set to `-float("Inf")`.
eos_token_id (`int`):
The id of the *end-of-sequence* token.
"""
def __init__(self, prompt_length_to_skip: int, min_new_tokens: int, eos_token_id: int):
for arg_name, arg_value in [
("prompt_length_to_skip", prompt_length_to_skip),
("min_new_tokens", min_new_tokens),
("eos_token_id", eos_token_id),
]:
if not isinstance(arg_value, int) or arg_value < 0:
raise ValueError(f"`{arg_name}` has to be a positive integer, but is {arg_value}")
self.prompt_length_to_skip = prompt_length_to_skip
self.min_new_tokens = min_new_tokens
self.eos_token_id = eos_token_id
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
new_tokens_length = input_ids.shape[-1] - self.prompt_length_to_skip
if new_tokens_length < self.min_new_tokens:
scores[:, self.eos_token_id] = -float("inf")
return scores
class TemperatureLogitsWarper(LogitsWarper):
r"""
[`LogitsWarper`] for temperature (exponential scaling output probability distribution).
Args:
temperature (`float`):
The value used to module the logits distribution.
"""
def __init__(self, temperature: float):
if not isinstance(temperature, float) or not (temperature > 0):
raise ValueError(f"`temperature` has to be a strictly positive float, but is {temperature}")
self.temperature = temperature
def __call__(self, input_ids: torch.Tensor, scores: torch.Tensor) -> torch.FloatTensor:
scores = scores / self.temperature
return scores
class RepetitionPenaltyLogitsProcessor(LogitsProcessor):
r"""
[`LogitsProcessor`] enforcing an exponential penalty on repeated sequences.
Args:
repetition_penalty (`float`):
The parameter for repetition penalty. 1.0 means no penalty. See [this
paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
"""
def __init__(self, penalty: float):
if not isinstance(penalty, float) or not (penalty > 0):
raise ValueError(f"`penalty` has to be a strictly positive float, but is {penalty}")
self.penalty = penalty
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
score = torch.gather(scores, 1, input_ids)
# if score < 0 then repetition penalty has to be multiplied to reduce the previous token probability
score = torch.where(score < 0, score * self.penalty, score / self.penalty)
scores.scatter_(1, input_ids, score)
return scores
class EncoderRepetitionPenaltyLogitsProcessor(LogitsProcessor):
r"""
[`LogitsProcessor`] enforcing an exponential penalty on tokens that are not in the original input.
Args:
hallucination_penalty (`float`):
The parameter for hallucination penalty. 1.0 means no penalty.
encoder_input_ids (`torch.LongTensor`):
The encoder_input_ids that should not be repeated within the decoder ids.
"""
def __init__(self, penalty: float, encoder_input_ids: torch.LongTensor):
if not isinstance(penalty, float) or not (penalty > 0):
raise ValueError(f"`penalty` has to be a strictly positive float, but is {penalty}")
self.penalty = 1 / penalty
self.encoder_input_ids = encoder_input_ids
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
score = torch.gather(scores, 1, self.encoder_input_ids)
# if score < 0 then repetition penalty has to be multiplied to reduce the previous token probability
score = torch.where(score < 0, score * self.penalty, score / self.penalty)
scores.scatter_(1, self.encoder_input_ids, score)
return scores
class TopPLogitsWarper(LogitsWarper):
"""
[`LogitsWarper`] that performs top-p, i.e. restricting to top tokens summing to prob_cut_off <= prob_cut_off.
Args:
top_p (`float`):
If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or
higher are kept for generation.
filter_value (`float`, *optional*, defaults to `-float("Inf")`):
All filtered values will be set to this float value.
min_tokens_to_keep (`int`, *optional*, defaults to 1):
Minimum number of tokens that cannot be filtered.
"""
def __init__(self, top_p: float, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1):
top_p = float(top_p)
if top_p < 0 or top_p > 1.0:
raise ValueError(f"`top_p` has to be a float > 0 and < 1, but is {top_p}")
self.top_p = top_p
self.filter_value = filter_value
self.min_tokens_to_keep = min_tokens_to_keep
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
sorted_logits, sorted_indices = torch.sort(scores, descending=False)
cumulative_probs = sorted_logits.softmax(dim=-1).cumsum(dim=-1)
# Remove tokens with cumulative top_p above the threshold (token with 0 are kept)
sorted_indices_to_remove = cumulative_probs <= (1 - self.top_p)
if self.min_tokens_to_keep > 1:
# Keep at least min_tokens_to_keep
sorted_indices_to_remove[..., -self.min_tokens_to_keep :] = 0
# scatter sorted tensors to original indexing
indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove)
scores = scores.masked_fill(indices_to_remove, self.filter_value)
return scores
class TopKLogitsWarper(LogitsWarper):
r"""
[`LogitsWarper`] that performs top-k, i.e. restricting to the k highest probability elements.
Args:
top_k (`int`):
The number of highest probability vocabulary tokens to keep for top-k-filtering.
filter_value (`float`, *optional*, defaults to `-float("Inf")`):
All filtered values will be set to this float value.
min_tokens_to_keep (`int`, *optional*, defaults to 1):
Minimum number of tokens that cannot be filtered.
"""
def __init__(self, top_k: int, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1):
if not isinstance(top_k, int) or top_k <= 0:
raise ValueError(f"`top_k` has to be a strictly positive integer, but is {top_k}")
self.top_k = max(top_k, min_tokens_to_keep)
self.filter_value = filter_value
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
top_k = min(self.top_k, scores.size(-1)) # Safety check
# Remove all tokens with a probability less than the last token of the top-k
indices_to_remove = scores < torch.topk(scores, top_k)[0][..., -1, None]
scores = scores.masked_fill(indices_to_remove, self.filter_value)
return scores
class TypicalLogitsWarper(LogitsWarper):
r"""
[`LogitsWarper`] that performs typical decoding. See [Typical Decoding for Natural Language
Generation](https://arxiv.org/abs/2202.00666) for more information.
Args:
mass (`float`):
Value of typical_p between 0 and 1 inclusive, defaults to 0.9.
filter_value (`float`, *optional*, defaults to `-float("Inf")`):
All filtered values will be set to this float value.
min_tokens_to_keep (`int`, *optional*, defaults to 1):
Minimum number of tokens that cannot be filtered.
"""
def __init__(self, mass: float = 0.9, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1):
mass = float(mass)
if not (mass > 0 and mass < 1):
raise ValueError(f"`typical_p` has to be a float > 0 and < 1, but is {mass}")
self.filter_value = filter_value
self.mass = mass
self.min_tokens_to_keep = min_tokens_to_keep
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
# calculate entropy
normalized = torch.nn.functional.log_softmax(scores, dim=-1)
p = torch.exp(normalized)
ent = -(normalized * p).nansum(-1, keepdim=True)
# shift and sort
shifted_scores = torch.abs((-normalized) - ent)
sorted_scores, sorted_indices = torch.sort(shifted_scores, descending=False)
sorted_logits = scores.gather(-1, sorted_indices)
cumulative_probs = sorted_logits.softmax(dim=-1).cumsum(dim=-1)
# Remove tokens with cumulative mass above the threshold
last_ind = (cumulative_probs < self.mass).sum(dim=1)
last_ind[last_ind < 0] = 0
sorted_indices_to_remove = sorted_scores > sorted_scores.gather(1, last_ind.view(-1, 1))
if self.min_tokens_to_keep > 1:
# Keep at least min_tokens_to_keep (set to min_tokens_to_keep-1 because we add the first one below)
sorted_indices_to_remove[..., : self.min_tokens_to_keep] = 0
indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove)
scores = scores.masked_fill(indices_to_remove, self.filter_value)
return scores
class EpsilonLogitsWarper(LogitsWarper):
r"""
[`LogitsWarper`] that performs epsilon-sampling, i.e. restricting to tokens with `prob >= epsilon`. Takes the
largest min_tokens_to_keep tokens if no tokens satisfy this constraint. See [Truncation Sampling as Language Model
Desmoothing](https://arxiv.org/abs/2210.15191) for more information.
Args:
epsilon (`float`):
If set to > 0, only the most tokens with probabilities `epsilon` or higher are kept for generation.
filter_value (`float`, *optional*, defaults to `-float("Inf")`):
All filtered values will be set to this float value.
min_tokens_to_keep (`int`, *optional*, defaults to 1):
Minimum number of tokens that cannot be filtered.
"""
def __init__(self, epsilon: float, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1):
epsilon = float(epsilon)
if epsilon <= 0 or epsilon >= 1:
raise ValueError(f"`epsilon_cutoff` has to be a float > 0 and < 1, but is {epsilon}")
min_tokens_to_keep = int(min_tokens_to_keep)
if min_tokens_to_keep < 1:
raise ValueError(
f"`min_tokens_to_keep` has to be a strictly positive integer, but is {min_tokens_to_keep}"
)
self.epsilon = epsilon
self.filter_value = filter_value
self.min_tokens_to_keep = min_tokens_to_keep
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
# Determine which indices to remove
probabilities = scores.softmax(dim=-1)
indices_to_remove = probabilities < self.epsilon
# Keep the words with the 'min_tokens_to_keep'-highest probabilities
top_k = min(self.min_tokens_to_keep, scores.size(-1)) # Safety check
indices_to_remove = indices_to_remove & (scores < torch.topk(scores, top_k)[0][..., -1, None])
scores = scores.masked_fill(indices_to_remove, self.filter_value)
return scores
class EtaLogitsWarper(LogitsWarper):
r"""
[`LogitsWarper`] that performs eta-sampling, i.e. calculates a dynamic cutoff `eta := min(epsilon, sqrt(epsilon,
e^-entropy(probabilities)))` and restricts to tokens with `prob >= eta`. Takes the largest min_tokens_to_keep
tokens if no tokens satisfy this constraint. See [Truncation Sampling as Language Model
Desmoothing](https://arxiv.org/abs/2210.15191) for more information.
Args:
min_tokens_to_keep (`int`, *optional*, defaults to 1):
Minimum number of tokens that cannot be filtered."""
def __init__(self, epsilon: float, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1):
epsilon = float(epsilon)
if epsilon <= 0 or epsilon >= 1:
raise ValueError(f"`eta_cutoff` has to be a float > 0 and < 1, but is {epsilon}")
min_tokens_to_keep = int(min_tokens_to_keep)
if min_tokens_to_keep < 1:
raise ValueError(
f"`min_tokens_to_keep` has to be a strictly positive integer, but is {min_tokens_to_keep}"
)
self.epsilon = torch.tensor(epsilon)
self.filter_value = filter_value
self.min_tokens_to_keep = min_tokens_to_keep
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
# Calculate the adaptive cutoff
probabilities = scores.softmax(dim=-1)
entropy = torch.distributions.Categorical(logits=scores).entropy()
eta = torch.min(self.epsilon, torch.sqrt(self.epsilon) * torch.exp(-entropy))[..., None]
indices_to_remove = probabilities < eta
# Keep the words with the 'min_tokens_to_keep'-highest probabilities
top_k = min(self.min_tokens_to_keep, scores.size(-1)) # Safety check
indices_to_remove = indices_to_remove & (scores < torch.topk(scores, top_k)[0][..., -1, None])
scores = scores.masked_fill(indices_to_remove, self.filter_value)
return scores
def _get_ngrams(ngram_size: int, prev_input_ids: torch.Tensor, num_hypos: int):
generated_ngrams = [{} for _ in range(num_hypos)]
for idx in range(num_hypos):
gen_tokens = prev_input_ids[idx].tolist()
generated_ngram = generated_ngrams[idx]
for ngram in zip(*[gen_tokens[i:] for i in range(ngram_size)]):
prev_ngram_tuple = tuple(ngram[:-1])
generated_ngram[prev_ngram_tuple] = generated_ngram.get(prev_ngram_tuple, []) + [ngram[-1]]
return generated_ngrams
def _get_generated_ngrams(banned_ngrams, prev_input_ids, ngram_size, cur_len):
# Before decoding the next token, prevent decoding of ngrams that have already appeared
start_idx = cur_len + 1 - ngram_size
ngram_idx = tuple(prev_input_ids[start_idx:cur_len].tolist())
return banned_ngrams.get(ngram_idx, [])
def _calc_banned_ngram_tokens(
ngram_size: int, prev_input_ids: torch.Tensor, num_hypos: int, cur_len: int
) -> List[Iterable[int]]:
"""Copied from fairseq for no_repeat_ngram in beam_search"""
if cur_len + 1 < ngram_size:
# return no banned tokens if we haven't generated no_repeat_ngram_size tokens yet
return [[] for _ in range(num_hypos)]
generated_ngrams = _get_ngrams(ngram_size, prev_input_ids, num_hypos)
banned_tokens = [
_get_generated_ngrams(generated_ngrams[hypo_idx], prev_input_ids[hypo_idx], ngram_size, cur_len)
for hypo_idx in range(num_hypos)
]
return banned_tokens
class NoRepeatNGramLogitsProcessor(LogitsProcessor):
r"""
[`LogitsProcessor`] that enforces no repetition of n-grams. See
[Fairseq](https://github.com/pytorch/fairseq/blob/a07cb6f40480928c9e0548b737aadd36ee66ac76/fairseq/sequence_generator.py#L345).
Args:
ngram_size (`int`):
All ngrams of size `ngram_size` can only occur once.
"""
def __init__(self, ngram_size: int):
if not isinstance(ngram_size, int) or ngram_size <= 0:
raise ValueError(f"`ngram_size` has to be a strictly positive integer, but is {ngram_size}")
self.ngram_size = ngram_size
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
num_batch_hypotheses = scores.shape[0]
cur_len = input_ids.shape[-1]
banned_batch_tokens = _calc_banned_ngram_tokens(self.ngram_size, input_ids, num_batch_hypotheses, cur_len)
for i, banned_tokens in enumerate(banned_batch_tokens):
scores[i, banned_tokens] = -float("inf")
return scores
class EncoderNoRepeatNGramLogitsProcessor(LogitsProcessor):
r"""
[`LogitsProcessor`] that enforces no repetition of encoder input ids n-grams for the decoder ids. See
[ParlAI](https://github.com/facebookresearch/ParlAI/blob/master/parlai/core/torch_generator_agent.py#L1350).
Args:
encoder_ngram_size (`int`):
All ngrams of size `ngram_size` can only occur within the encoder input ids.
encoder_input_ids (`int`):
The encoder_input_ids that should not be repeated within the decoder ids.
"""
def __init__(self, encoder_ngram_size: int, encoder_input_ids: torch.LongTensor):
if not isinstance(encoder_ngram_size, int) or encoder_ngram_size <= 0:
raise ValueError(
f"`encoder_ngram_size` has to be a strictly positive integer, but is {encoder_ngram_size}"
)
self.ngram_size = encoder_ngram_size
if len(encoder_input_ids.shape) == 1:
encoder_input_ids = encoder_input_ids.unsqueeze(0)
self.batch_size = encoder_input_ids.shape[0]
self.generated_ngrams = _get_ngrams(encoder_ngram_size, encoder_input_ids, self.batch_size)
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
# B x num_beams
num_hypos = scores.shape[0]
num_beams = num_hypos // self.batch_size
cur_len = input_ids.shape[-1]
banned_batch_tokens = [
_get_generated_ngrams(
self.generated_ngrams[hypo_idx // num_beams], input_ids[hypo_idx], self.ngram_size, cur_len
)
for hypo_idx in range(num_hypos)
]
for i, banned_tokens in enumerate(banned_batch_tokens):
scores[i, banned_tokens] = -float("inf")
return scores
class NoBadWordsLogitsProcessor(LogitsProcessor):
"""
[`LogitsProcessor`] that enforces that specified sequences will never be sampled.
Args:
bad_words_ids (`List[List[int]]`):
List of list of token ids that are not allowed to be generated. In order to get the token ids of the words
that should not appear in the generated text, use `tokenizer(bad_words, add_prefix_space=True,
add_special_tokens=False).input_ids`.
eos_token_id (`Union[int, List[int]]`):
The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
"""
def __init__(self, bad_words_ids: List[List[int]], eos_token_id: Union[int, List[int]]):
if not isinstance(bad_words_ids, List) or len(bad_words_ids) == 0:
raise ValueError(f"`bad_words_ids` has to be a non-empty list, but is {bad_words_ids}.")
if any(not isinstance(bad_word_ids, list) for bad_word_ids in bad_words_ids):
raise ValueError(f"`bad_words_ids` has to be a list of lists, but is {bad_words_ids}.")
if any(
any((not isinstance(token_id, (int, np.integer)) or token_id < 0) for token_id in bad_word_ids)
for bad_word_ids in bad_words_ids
):
raise ValueError(
f"Each list in `bad_words_ids` has to be a list of positive integers, but is {bad_words_ids}."
)
if eos_token_id is None:
eos_token_id = []
if isinstance(eos_token_id, int):
eos_token_id = [eos_token_id]
bad_words_ids = list(
filter(lambda bad_token_seq: all([bad_token_seq != [i] for i in eos_token_id]), bad_words_ids)
)
self.bad_words_id_length_1 = []
self.bad_words_id_length_greater_than_1 = []
for word in bad_words_ids:
if len(word) == 1:
self.bad_words_id_length_1.append(word[0])
else:
self.bad_words_id_length_greater_than_1.append(word)
self.static_bad_words_mask: Optional[torch.LongTensor] = None
for banned_token_seq in self.bad_words_id_length_greater_than_1:
if len(banned_token_seq) == 0:
raise ValueError(f"Banned words token sequences {bad_words_ids} cannot have an empty list")
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
if self.static_bad_words_mask is None and len(self.bad_words_id_length_1) > 0:
self.static_bad_words_mask = self._calc_static_bad_word_mask(scores)
dynamic_banned_tokens = self._calc_banned_bad_words_ids(input_ids.tolist())
scores = self._set_scores_to_inf_for_banned_tokens(scores, dynamic_banned_tokens)
return scores
def _calc_static_bad_word_mask(self, scores: torch.FloatTensor) -> torch.BoolTensor:
static_bad_words_mask = torch.zeros(scores.shape[1])
static_bad_words_mask[self.bad_words_id_length_1] = 1
return static_bad_words_mask.unsqueeze(0).to(scores.device).bool()
def _tokens_match(self, prev_tokens: List[int], tokens: List[int]) -> bool:
if len(tokens) == 0:
# if bad word tokens is just one token always ban it
return True
elif len(tokens) > len(prev_tokens):
# if bad word tokens are longer then prev input_ids they can't be equal
return False
else:
return prev_tokens[-len(tokens) :] == tokens
def _calc_banned_bad_words_ids(self, prev_input_ids: List[List[int]]) -> Iterable[int]:
banned_tokens = []
for prev_input_ids_slice in prev_input_ids:
banned_tokens_slice = []
for banned_token_seq in self.bad_words_id_length_greater_than_1:
if self._tokens_match(prev_input_ids_slice, banned_token_seq[:-1]):
banned_tokens_slice.append(banned_token_seq[-1])
banned_tokens.append(banned_tokens_slice)
return banned_tokens
def _set_scores_to_inf_for_banned_tokens(
self, scores: torch.Tensor, banned_tokens: List[List[int]]
) -> torch.Tensor:
"""
Modifies the scores in place by setting the banned token positions to `-inf`. Banned token is expected to be a
list of list of banned tokens to ban in the format [[batch index, vocabulary position],...
Args:
scores: logits distribution of shape (batch size, vocabulary size)
banned_tokens: list of list of tokens to ban of length (batch_size)
"""
banned_mask_list = []
for idx, batch_banned_tokens in enumerate(banned_tokens):
for token in batch_banned_tokens:
# Eliminates invalid bad word IDs that are over the vocabulary size.
if token <= scores.shape[1]:
banned_mask_list.append([idx, token])
else:
logger.error(
f"An invalid bad word ID is defined: {token}. This ID is not contained in the "
"vocabulary, and is therefore ignored."
)
if not banned_mask_list and self.static_bad_words_mask is None:
return scores
else:
if banned_mask_list:
banned_mask = torch.LongTensor(banned_mask_list)
indices = torch.ones(len(banned_mask))
# A sparse tensor is generated from a list of coordinates: [[0, 1], [0, 2], [2, 0]]. A conversion to dense tensor generates:
# [ 0 1 1 ]
# [ 0 0 0 ]
# [ 1 0 0 ]
banned_mask = (
torch.sparse.LongTensor(banned_mask.t(), indices, scores.size())
.to(scores.device)
.to_dense()
.bool()
)
if self.static_bad_words_mask is not None:
banned_mask = torch.bitwise_or(banned_mask, self.static_bad_words_mask)
else:
banned_mask = self.static_bad_words_mask
scores = scores.masked_fill(banned_mask, -float("inf"))
return scores
class PrefixConstrainedLogitsProcessor(LogitsProcessor):
r"""
[`LogitsProcessor`] that enforces constrained generation and is useful for prefix-conditioned constrained
generation. See [Autoregressive Entity Retrieval](https://arxiv.org/abs/2010.00904) for more information.
Args:
prefix_allowed_tokens_fn: (`Callable[[int, torch.Tensor], List[int]]`):
This function constraints the beam search to allowed tokens only at each step. This function takes 2
arguments `inputs_ids` and the batch ID `batch_id`. It has to return a list with the allowed tokens for the
next generation step conditioned on the previously generated tokens `inputs_ids` and the batch ID
`batch_id`.
"""
def __init__(self, prefix_allowed_tokens_fn: Callable[[int, torch.Tensor], List[int]], num_beams: int):
self._prefix_allowed_tokens_fn = prefix_allowed_tokens_fn
self._num_beams = num_beams
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
mask = torch.full_like(scores, -math.inf)
for batch_id, beam_sent in enumerate(input_ids.view(-1, self._num_beams, input_ids.shape[-1])):
for beam_id, sent in enumerate(beam_sent):
mask[batch_id * self._num_beams + beam_id, self._prefix_allowed_tokens_fn(batch_id, sent)] = 0
return scores + mask
class HammingDiversityLogitsProcessor(LogitsProcessor):
r"""
[`LogitsProcessor`] that enforces diverse beam search. Note that this logits processor is only effective for
[`PreTrainedModel.group_beam_search`]. See [Diverse Beam Search: Decoding Diverse Solutions from Neural Sequence
Models](https://arxiv.org/pdf/1610.02424.pdf) for more details.
Args:
diversity_penalty (`float`):
This value is subtracted from a beam's score if it generates a token same as any beam from other group at a
particular time. Note that `diversity_penalty` is only effective if `group beam search` is enabled.
num_beams (`int`):
Number of beams used for group beam search. See [this paper](https://arxiv.org/pdf/1610.02424.pdf) for more
details.
num_beam_groups (`int`):
Number of groups to divide `num_beams` into in order to ensure diversity among different groups of beams.
See [this paper](https://arxiv.org/pdf/1610.02424.pdf) for more details.
"""
def __init__(self, diversity_penalty: float, num_beams: int, num_beam_groups: int):
if not isinstance(diversity_penalty, float) or (not diversity_penalty > 0.0):
raise ValueError("`diversity_penalty` should be a float strictly larger than 0.")
self._diversity_penalty = diversity_penalty
if not isinstance(num_beams, int) or num_beams < 2:
raise ValueError("`num_beams` should be an integer strictly larger than 1.")
self._num_beams = num_beams
if not isinstance(num_beam_groups, int) or num_beam_groups < 2:
raise ValueError("`num_beam_groups` should be an integer strictly larger than 1.")
if num_beam_groups > num_beams:
raise ValueError("`beam_groups` has to be smaller or equal to `num_beams`.")
self._num_sub_beams = num_beams // num_beam_groups
def __call__(
self,
input_ids: torch.LongTensor,
scores: torch.FloatTensor,
current_tokens: torch.LongTensor,
beam_group_idx: int,
) -> torch.FloatTensor:
# hamming diversity: penalise using same token in current group which was used in previous groups at
# the same time step
batch_size = current_tokens.shape[0] // self._num_beams
group_start_idx = beam_group_idx * self._num_sub_beams
group_end_idx = min(group_start_idx + self._num_sub_beams, self._num_beams)
group_size = group_end_idx - group_start_idx
vocab_size = scores.shape[-1]
if group_start_idx == 0:
return scores
for batch_idx in range(batch_size):
# predicted tokens of last time step of previous groups
previous_group_tokens = current_tokens[
batch_idx * self._num_beams : batch_idx * self._num_beams + group_start_idx
]
token_frequency = torch.bincount(previous_group_tokens, minlength=vocab_size).to(scores.device)
scores[batch_idx * group_size : (batch_idx + 1) * group_size] -= self._diversity_penalty * token_frequency
return scores
class ForcedBOSTokenLogitsProcessor(LogitsProcessor):
r"""
[`LogitsProcessor`] that enforces the specified token as the first generated token.
Args:
bos_token_id (`int`):
The id of the token to force as the first generated token.
"""
def __init__(self, bos_token_id: int):
self.bos_token_id = bos_token_id
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
cur_len = input_ids.shape[-1]
if cur_len == 1:
num_tokens = scores.shape[1]
scores[:, [i for i in range(num_tokens) if i != self.bos_token_id]] = -float("inf")
scores[:, self.bos_token_id] = 0
return scores
class ForcedEOSTokenLogitsProcessor(LogitsProcessor):
r"""
[`LogitsProcessor`] that enforces the specified token as the last generated token when `max_length` is reached.
Args:
max_length (`int`):
The maximum length of the sequence to be generated.
eos_token_id (`Union[int, List[int]]`):
The id of the token to force as the last generated token when `max_length` is reached. Optionally, use a
list to set multiple *end-of-sequence* tokens.
"""
def __init__(self, max_length: int, eos_token_id: Union[int, List[int]]):
self.max_length = max_length
if isinstance(eos_token_id, int):
eos_token_id = [eos_token_id]
self.eos_token_id = eos_token_id
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
cur_len = input_ids.shape[-1]
if cur_len == self.max_length - 1:
num_tokens = scores.shape[1]
scores[:, [i for i in range(num_tokens) if i not in self.eos_token_id]] = -float("inf")
for i in self.eos_token_id:
scores[:, i] = 0
return scores
class InfNanRemoveLogitsProcessor(LogitsProcessor):
r"""
[`LogitsProcessor`] that removes all `nan` and `inf` values to avoid the generation method to fail. Note that using
the logits processor should only be used if necessary since it can slow down the generation method.
"""
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
# set all nan values to 0.0
scores[scores != scores] = 0.0
# set all inf values to max possible value
scores[scores == float("inf")] = torch.finfo(scores.dtype).max
return scores
class ExponentialDecayLengthPenalty(LogitsProcessor):
r"""
[`LogitsProcessor`] that exponentially increases the score of the eos_token_id after regulation_start has been
reached.
Args:
exponential_decay_length_penalty (`tuple(int, float)`):
This tuple shall consist of: `(start_index, decay_factor)` where `start_index` indicates where penalty
starts and `decay_factor` represents the factor of exponential decay
eos_token_id (`Union[int, List[int]]`):
The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
input_ids_seq_length (`int`):
The length of the input sequence.
"""
def __init__(
self,
exponential_decay_length_penalty: Tuple[int, float],
eos_token_id: Union[int, List[int]],
input_ids_seq_length: int,
):
self.regulation_start = exponential_decay_length_penalty[0] + input_ids_seq_length
self.regulation_factor = exponential_decay_length_penalty[1]
if isinstance(eos_token_id, int):
eos_token_id = [eos_token_id]
self.eos_token_id = eos_token_id
def __call__(self, input_ids: torch.Tensor, scores: torch.Tensor) -> torch.FloatTensor:
cur_len = input_ids.shape[-1]
if cur_len > self.regulation_start:
for i in self.eos_token_id:
scores[:, i] = scores[:, i] * pow(self.regulation_factor, cur_len - self.regulation_start)
return scores
class LogitNormalization(LogitsProcessor, LogitsWarper):
r"""
[`LogitsWarper`] and [`LogitsProcessor`] for normalizing the scores using log-softmax. It's important to normalize
the scores during beam search, after applying the logits processors or warpers, since the search algorithm used in
this library doesn't do it (it only does it before, but they may need re-normalization) but it still supposes that
the scores are normalized when comparing the hypotheses.
"""
def __call__(self, input_ids: torch.Tensor, scores: torch.Tensor) -> torch.Tensor:
scores = scores.log_softmax(dim=-1)
return scores
class SuppressTokensAtBeginLogitsProcessor(LogitsProcessor):
r"""
[`SuppressTokensAtBeginLogitsProcessor`] supresses a list of tokens as soon as the `generate` function starts
generating using `begin_index` tokens. This should ensure that the tokens defined by `begin_suppress_tokens` at not
sampled at the begining of the generation.
"""
def __init__(self, begin_suppress_tokens, begin_index):
self.begin_suppress_tokens = list(begin_suppress_tokens)
self.begin_index = begin_index
def __call__(self, input_ids, scores):
if input_ids.shape[1] == self.begin_index:
scores[:, self.begin_suppress_tokens] = -float("inf")
return scores
class SuppressTokensLogitsProcessor(LogitsProcessor):
r"""This processor can be used to suppress a list of tokens. The processor will set their log probs to `-inf` so that they
are not sampled."""
def __init__(self, suppress_tokens):
self.suppress_tokens = list(suppress_tokens)
def __call__(self, input_ids, scores):
scores[:, self.suppress_tokens] = -float("inf")
return scores
class ForceTokensLogitsProcessor(LogitsProcessor):
r"""This processor takes a list of pairs of integers which indicates a mapping from generation indices to token
indices that will be forced before sampling. The processor will set their log probs to `inf` so that they are
sampled at their corresponding index."""
def __init__(self, force_token_map: List[List[int]]):
self.force_token_map = dict(force_token_map)
def __call__(self, input_ids, scores):
generation_idx = input_ids.shape[-1]
current_token = self.force_token_map.get(generation_idx, None)
if current_token is not None:
scores[:, :] = -float("inf")
scores[:, current_token] = 0
return scores
class WhisperTimeStampLogitsProcessor(LogitsProcessor):
r"""
Whisper specific Processor. This processor can be used to force a list of tokens. The processor will set their log
probs to `inf` so that they are sampled at their corresponding index.
Args:
generate_config (`GenerateConfig`):
The generate config used to generate the output. The following parameters are required:
eos_token_id (`int`, *optional*, defaults to 50257):
The id of the *end-of-sequence* token.
no_timestamps_token_id (`int`, *optional*, defaults to 50363):
The id of the `"<|notimestamps|>"` token.
max_initial_timestamp_index (`int`, *optional*, defaults to 1):
Used to set the maximum value of the initial timestamp. This is used to prevent the model from
predicting timestamps that are too far in the future.
"""
def __init__(self, generate_config): # support for the kwargs
self.eos_token_id = generate_config.eos_token_id
self.no_timestamps_token_id = generate_config.no_timestamps_token_id
self.timestamp_begin = generate_config.no_timestamps_token_id + 1
self.begin_index = len(generate_config.forced_decoder_ids) + 2
if generate_config.forced_decoder_ids[-1][1] == self.no_timestamps_token_id:
self.begin_index -= 1
self.max_initial_timestamp_index = generate_config.max_initial_timestamp_index
def __call__(self, input_ids, scores):
# suppress <|notimestamps|> which is handled by without_timestamps
scores[:, self.no_timestamps_token_id] = -float("inf")
if input_ids.shape[1] == self.begin_index - 1:
scores[:, :] = -float("inf")
scores[:, self.timestamp_begin] = 0
return scores
# timestamps have to appear in pairs, except directly before eos_token; mask logits accordingly
for k in range(input_ids.shape[0]):
seq = list(input_ids[k, self.begin_index :].tolist())
last_was_timestamp = len(seq) >= 1 and seq[-1] >= self.timestamp_begin
penultimate_was_timestamp = len(seq) < 2 or seq[-2] >= self.timestamp_begin
if last_was_timestamp:
if penultimate_was_timestamp: # has to be non-timestamp
scores[k, self.timestamp_begin :] = -float("inf")
else: # cannot be normal text tokens
scores[k, : self.eos_token_id] = -float("inf")
# apply the `max_initial_timestamp` option
if input_ids.shape[1] == self.begin_index and self.max_initial_timestamp_index is not None:
last_allowed = self.timestamp_begin + self.max_initial_timestamp_index
scores[:, last_allowed + 1 :] = -float("inf")
# if sum of probability over timestamps is above any other token, sample timestamp
logprobs = torch.nn.functional.log_softmax(scores.float(), dim=-1)
for k in range(input_ids.shape[0]):
timestamp_logprob = logprobs[k, self.timestamp_begin :].logsumexp(dim=-1)
max_text_token_logprob = logprobs[k, : self.timestamp_begin].max()
if timestamp_logprob > max_text_token_logprob:
scores[k, : self.timestamp_begin] = -float("inf")
return scores
|
233zzh/TitanDataOperationSystem | 7,811 | 代码/web代码/titanApp/src/main/resources/static/src/assets/extra-libs/flot.tooltip/js/jquery.flot.tooltip.min.js | /*
* jquery.flot.tooltip
*
* description: easy-to-use tooltips for Flot charts
* version: 0.8.5
* authors: Krzysztof Urbas @krzysu [myviews.pl],Evan Steinkerchner @Roundaround
* website: https://github.com/krzysu/flot.tooltip
*
* build on 2015-05-11
* released under MIT License, 2012
*/
!function(a){var b={tooltip:{show:!1,cssClass:"flotTip",content:"%s | X: %x | Y: %y",xDateFormat:null,yDateFormat:null,monthNames:null,dayNames:null,shifts:{x:10,y:20},defaultTheme:!0,lines:!1,onHover:function(a,b){},$compat:!1}};b.tooltipOpts=b.tooltip;var c=function(a){this.tipPosition={x:0,y:0},this.init(a)};c.prototype.init=function(b){function c(a){var c={};c.x=a.pageX,c.y=a.pageY,b.setTooltipPosition(c)}function d(c,d,f){var g=function(a,b,c,d){return Math.sqrt((c-a)*(c-a)+(d-b)*(d-b))},h=function(a,b,c,d,e,f,h){if(!h||(h=function(a,b,c,d,e,f){if("undefined"!=typeof c)return{x:c,y:b};if("undefined"!=typeof d)return{x:a,y:d};var g,h=-1/((f-d)/(e-c));return{x:g=(e*(a*h-b+d)+c*(a*-h+b-f))/(h*(e-c)+d-f),y:h*g-h*a+b}}(a,b,c,d,e,f),h.x>=Math.min(c,e)&&h.x<=Math.max(c,e)&&h.y>=Math.min(d,f)&&h.y<=Math.max(d,f))){var i=d-f,j=e-c,k=c*f-d*e;return Math.abs(i*a+j*b+k)/Math.sqrt(i*i+j*j)}var l=g(a,b,c,d),m=g(a,b,e,f);return l>m?m:l};if(f)b.showTooltip(f,d);else if(e.plotOptions.series.lines.show&&e.tooltipOptions.lines===!0){var i=e.plotOptions.grid.mouseActiveRadius,j={distance:i+1};a.each(b.getData(),function(a,c){for(var e=0,f=-1,i=1;i<c.data.length;i++)c.data[i-1][0]<=d.x&&c.data[i][0]>=d.x&&(e=i-1,f=i);if(-1===f)return void b.hideTooltip();var k={x:c.data[e][0],y:c.data[e][1]},l={x:c.data[f][0],y:c.data[f][1]},m=h(c.xaxis.p2c(d.x),c.yaxis.p2c(d.y),c.xaxis.p2c(k.x),c.yaxis.p2c(k.y),c.xaxis.p2c(l.x),c.yaxis.p2c(l.y),!1);if(m<j.distance){var n=g(k.x,k.y,d.x,d.y)<g(d.x,d.y,l.x,l.y)?e:f,o=(c.datapoints.pointsize,[d.x,k.y+(l.y-k.y)*((d.x-k.x)/(l.x-k.x))]),p={datapoint:o,dataIndex:n,series:c,seriesIndex:a};j={distance:m,item:p}}}),j.distance<i+1?b.showTooltip(j.item,d):b.hideTooltip()}else b.hideTooltip()}var e=this,f=a.plot.plugins.length;if(this.plotPlugins=[],f)for(var g=0;f>g;g++)this.plotPlugins.push(a.plot.plugins[g].name);b.hooks.bindEvents.push(function(b,f){if(e.plotOptions=b.getOptions(),"boolean"==typeof e.plotOptions.tooltip&&(e.plotOptions.tooltipOpts.show=e.plotOptions.tooltip,e.plotOptions.tooltip=e.plotOptions.tooltipOpts,delete e.plotOptions.tooltipOpts),e.plotOptions.tooltip.show!==!1&&"undefined"!=typeof e.plotOptions.tooltip.show){e.tooltipOptions=e.plotOptions.tooltip,e.tooltipOptions.$compat?(e.wfunc="width",e.hfunc="height"):(e.wfunc="innerWidth",e.hfunc="innerHeight");e.getDomElement();a(b.getPlaceholder()).bind("plothover",d),a(f).bind("mousemove",c)}}),b.hooks.shutdown.push(function(b,e){a(b.getPlaceholder()).unbind("plothover",d),a(e).unbind("mousemove",c)}),b.setTooltipPosition=function(b){var c=e.getDomElement(),d=c.outerWidth()+e.tooltipOptions.shifts.x,f=c.outerHeight()+e.tooltipOptions.shifts.y;b.x-a(window).scrollLeft()>a(window)[e.wfunc]()-d&&(b.x-=d),b.y-a(window).scrollTop()>a(window)[e.hfunc]()-f&&(b.y-=f),e.tipPosition.x=b.x,e.tipPosition.y=b.y},b.showTooltip=function(a,c){var d=e.getDomElement(),f=e.stringFormat(e.tooltipOptions.content,a);""!==f&&(d.html(f),b.setTooltipPosition({x:c.pageX,y:c.pageY}),d.css({left:e.tipPosition.x+e.tooltipOptions.shifts.x,top:e.tipPosition.y+e.tooltipOptions.shifts.y}).show(),"function"==typeof e.tooltipOptions.onHover&&e.tooltipOptions.onHover(a,d))},b.hideTooltip=function(){e.getDomElement().hide().html("")}},c.prototype.getDomElement=function(){var b=a("."+this.tooltipOptions.cssClass);return 0===b.length&&(b=a("<div />").addClass(this.tooltipOptions.cssClass),b.appendTo("body").hide().css({position:"absolute"}),this.tooltipOptions.defaultTheme&&b.css({background:"#fff","z-index":"1040",padding:"0.4em 0.6em","border-radius":"0.5em","font-size":"0.8em",border:"1px solid #111",display:"none","white-space":"nowrap"})),b},c.prototype.stringFormat=function(a,b){var c,d,e,f,g=/%p\.{0,1}(\d{0,})/,h=/%s/,i=/%c/,j=/%lx/,k=/%ly/,l=/%x\.{0,1}(\d{0,})/,m=/%y\.{0,1}(\d{0,})/,n="%x",o="%y",p="%ct";if("undefined"!=typeof b.series.threshold?(c=b.datapoint[0],d=b.datapoint[1],e=b.datapoint[2]):"undefined"!=typeof b.series.lines&&b.series.lines.steps?(c=b.series.datapoints.points[2*b.dataIndex],d=b.series.datapoints.points[2*b.dataIndex+1],e=""):(c=b.series.data[b.dataIndex][0],d=b.series.data[b.dataIndex][1],e=b.series.data[b.dataIndex][2]),null===b.series.label&&b.series.originSeries&&(b.series.label=b.series.originSeries.label),"function"==typeof a&&(a=a(b.series.label,c,d,b)),"boolean"==typeof a&&!a)return"";if("undefined"!=typeof b.series.percent?f=b.series.percent:"undefined"!=typeof b.series.percents&&(f=b.series.percents[b.dataIndex]),"number"==typeof f&&(a=this.adjustValPrecision(g,a,f)),a="undefined"!=typeof b.series.label?a.replace(h,b.series.label):a.replace(h,""),a="undefined"!=typeof b.series.color?a.replace(i,b.series.color):a.replace(i,""),a=this.hasAxisLabel("xaxis",b)?a.replace(j,b.series.xaxis.options.axisLabel):a.replace(j,""),a=this.hasAxisLabel("yaxis",b)?a.replace(k,b.series.yaxis.options.axisLabel):a.replace(k,""),this.isTimeMode("xaxis",b)&&this.isXDateFormat(b)&&(a=a.replace(l,this.timestampToDate(c,this.tooltipOptions.xDateFormat,b.series.xaxis.options))),this.isTimeMode("yaxis",b)&&this.isYDateFormat(b)&&(a=a.replace(m,this.timestampToDate(d,this.tooltipOptions.yDateFormat,b.series.yaxis.options))),"number"==typeof c&&(a=this.adjustValPrecision(l,a,c)),"number"==typeof d&&(a=this.adjustValPrecision(m,a,d)),"undefined"!=typeof b.series.xaxis.ticks){var q;q=this.hasRotatedXAxisTicks(b)?"rotatedTicks":"ticks";var r=b.dataIndex+b.seriesIndex;for(var s in b.series.xaxis[q])if(b.series.xaxis[q].hasOwnProperty(r)&&!this.isTimeMode("xaxis",b)){var t=this.isCategoriesMode("xaxis",b)?b.series.xaxis[q][r].label:b.series.xaxis[q][r].v;t===c&&(a=a.replace(l,b.series.xaxis[q][r].label))}}if("undefined"!=typeof b.series.yaxis.ticks)for(var s in b.series.yaxis.ticks)if(b.series.yaxis.ticks.hasOwnProperty(s)){var u=this.isCategoriesMode("yaxis",b)?b.series.yaxis.ticks[s].label:b.series.yaxis.ticks[s].v;u===d&&(a=a.replace(m,b.series.yaxis.ticks[s].label))}return"undefined"!=typeof b.series.xaxis.tickFormatter&&(a=a.replace(n,b.series.xaxis.tickFormatter(c,b.series.xaxis).replace(/\$/g,"$$"))),"undefined"!=typeof b.series.yaxis.tickFormatter&&(a=a.replace(o,b.series.yaxis.tickFormatter(d,b.series.yaxis).replace(/\$/g,"$$"))),e&&(a=a.replace(p,e)),a},c.prototype.isTimeMode=function(a,b){return"undefined"!=typeof b.series[a].options.mode&&"time"===b.series[a].options.mode},c.prototype.isXDateFormat=function(a){return"undefined"!=typeof this.tooltipOptions.xDateFormat&&null!==this.tooltipOptions.xDateFormat},c.prototype.isYDateFormat=function(a){return"undefined"!=typeof this.tooltipOptions.yDateFormat&&null!==this.tooltipOptions.yDateFormat},c.prototype.isCategoriesMode=function(a,b){return"undefined"!=typeof b.series[a].options.mode&&"categories"===b.series[a].options.mode},c.prototype.timestampToDate=function(b,c,d){var e=a.plot.dateGenerator(b,d);return a.plot.formatDate(e,c,this.tooltipOptions.monthNames,this.tooltipOptions.dayNames)},c.prototype.adjustValPrecision=function(a,b,c){var d,e=b.match(a);return null!==e&&""!==RegExp.$1&&(d=RegExp.$1,c=c.toFixed(d),b=b.replace(a,c)),b},c.prototype.hasAxisLabel=function(b,c){return-1!==a.inArray(this.plotPlugins,"axisLabels")&&"undefined"!=typeof c.series[b].options.axisLabel&&c.series[b].options.axisLabel.length>0},c.prototype.hasRotatedXAxisTicks=function(b){return-1!==a.inArray(this.plotPlugins,"tickRotor")&&"undefined"!=typeof b.series.xaxis.rotatedTicks};var d=function(a){new c(a)};a.plot.plugins.push({init:d,options:b,name:"tooltip",version:"0.8.5"})}(jQuery); |
27182812/ChatGLM-LLaMA-chinese-insturct | 27,575 | src/transformers/generation/tf_logits_process.py | # coding=utf-8
# Copyright 2022 The HuggingFace Inc. team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
from typing import List, Tuple
import numpy as np
import tensorflow as tf
from ..tf_utils import stable_softmax
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
logger = get_logger(__name__)
TF_LOGITS_PROCESSOR_INPUTS_DOCSTRING = r"""
Args:
input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`tf.Tensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
search or log softmax for each vocabulary token when using beam search.
cur_len (`int`):
The current length of valid input sequence tokens. In the TF implementation, the input_ids' sequence length
is the maximum length generate can produce, and we need to know which of its tokens are valid.
kwargs:
Additional logits processor specific kwargs.
Return:
`tf.Tensor` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
"""
class TFLogitsProcessor:
"""Abstract base class for all logit processors that can be applied during generation."""
@add_start_docstrings(TF_LOGITS_PROCESSOR_INPUTS_DOCSTRING)
def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor:
"""TF method for processing logits."""
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called."
)
class TFLogitsWarper:
"""Abstract base class for all logit warpers that can be applied during generation with multinomial sampling."""
@add_start_docstrings(TF_LOGITS_PROCESSOR_INPUTS_DOCSTRING)
def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor:
"""TF method for warping logits."""
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called."
)
class TFLogitsProcessorList(list):
"""
This class can be used to create a list of [`TFLogitsProcessor`] to subsequently process a `scores` input tensor.
This class inherits from list and adds a specific *__call__* method to apply each [`TFLogitsProcessor`] to the
inputs.
"""
@add_start_docstrings(TF_LOGITS_PROCESSOR_INPUTS_DOCSTRING)
def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int, **kwargs) -> tf.Tensor:
for processor in self:
function_args = inspect.signature(processor.__call__).parameters
if len(function_args) > 3:
if not all(arg in kwargs for arg in list(function_args.keys())[2:]):
raise ValueError(
f"Make sure that all the required parameters: {list(function_args.keys())} for "
f"{processor.__class__} are passed to the logits processor."
)
scores = processor(input_ids, scores, cur_len, **kwargs)
else:
scores = processor(input_ids, scores, cur_len)
return scores
class TFTemperatureLogitsWarper(TFLogitsWarper):
r"""
[`TFLogitsWarper`] for temperature (exponential scaling output probability distribution).
Args:
temperature (`float`):
The value used to module the logits distribution.
"""
def __init__(self, temperature: float):
if not isinstance(temperature, float) or not (temperature > 0):
raise ValueError(f"`temperature` has to be a strictly positive float, but is {temperature}")
self.temperature = temperature
def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor:
scores = scores / self.temperature
return scores
class TFTopKLogitsWarper(TFLogitsWarper):
r"""
[`TFLogitsWarper`] that performs top-k, i.e. restricting to the k highest probability elements.
Args:
top_k (`int`):
The number of highest probability vocabulary tokens to keep for top-k-filtering.
filter_value (`float`, *optional*, defaults to `-float("Inf")`):
All filtered values will be set to this float value.
min_tokens_to_keep (`int`, *optional*, defaults to 1):
Minimum number of tokens that cannot be filtered.
"""
def __init__(self, top_k: int, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1):
if not isinstance(top_k, int) or top_k <= 0:
raise ValueError(f"`top_k` has to be a strictly positive integer, but is {top_k}")
self.top_k = max(top_k, min_tokens_to_keep)
self.filter_value = filter_value
def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor:
top_k = min(self.top_k, scores.shape[-1]) # Safety check
# Boolean mask containing all tokens with a probability less than the last token of the top-k
indices_to_remove = scores < tf.math.top_k(scores, k=top_k)[0][..., -1:]
next_scores = tf.where(indices_to_remove, self.filter_value, scores)
return next_scores
class TFTopPLogitsWarper(TFLogitsWarper):
"""
[`TFLogitsWarper`] that performs top-p, i.e. restricting to top tokens summing to <= prob_cut_off.
Args:
top_p (`float`):
If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or
higher are kept for generation.
filter_value (`float`, *optional*, defaults to `-float("Inf")`):
All filtered values will be set to this float value.
min_tokens_to_keep (`int`, *optional*, defaults to 1):
Minimum number of tokens that cannot be filtered.
"""
def __init__(self, top_p: float, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1):
if not isinstance(top_p, float) or (top_p < 0 or top_p > 1.0):
raise ValueError(f"`top_p` has to be a float > 0 and < 1, but is {top_p}")
self.top_p = top_p
self.filter_value = filter_value
self.min_tokens_to_keep = min_tokens_to_keep
def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor:
topk_scores, topk_indices = tf.math.top_k(scores, scores.shape[-1])
mask_scores = tf.fill(scores.shape, self.filter_value)
cumulative_probs = tf.math.cumsum(stable_softmax(topk_scores, axis=-1), axis=-1)
score_mask = cumulative_probs < self.top_p
# Also include the token that is higher than top_p (the first false = shift and insert a True on the left)
score_mask = tf.concat((tf.ones([score_mask.shape[0], 1], dtype=tf.bool), score_mask[:, :-1]), axis=-1)
# Ensure min tokens to keep
score_mask = tf.concat(
(
tf.ones([score_mask.shape[0], self.min_tokens_to_keep], dtype=tf.bool),
score_mask[:, self.min_tokens_to_keep :],
),
axis=-1,
)
# Mask the values that do not fit the criteria
topk_next_scores = tf.where(score_mask, topk_scores, mask_scores)
# Undo the topk sorting: converts the 2D matrix of per-row original indices of shape (batch_size, vocab_size)
# to a 3D tensor of shape (batch_size, vocab_size, 2) containing the original score coordinate, from which we
# can scatter (i.e. `scatter_indices[row, col, :]` is a tensor containing `[row, topk_indices[row, col]]`)
scatter_rows = tf.tile(tf.expand_dims(tf.range(topk_indices.shape[0]), axis=-1), [1, topk_indices.shape[-1]])
scatter_indices = tf.stack((scatter_rows, topk_indices), axis=-1)
next_scores = tf.scatter_nd(scatter_indices, topk_next_scores, shape=topk_next_scores.shape)
return next_scores
class TFMinLengthLogitsProcessor(TFLogitsProcessor):
r"""
[`TFLogitsProcessor`] enforcing a min-length by setting EOS probability to 0.
Args:
min_length (`int`):
The minimum length below which the score of `eos_token_id` is set to `-float("Inf")`.
eos_token_id (`int`):
The id of the *end-of-sequence* token.
"""
def __init__(self, min_length: int, eos_token_id: int):
if not isinstance(min_length, int) or min_length < 0:
raise ValueError(f"`min_length` has to be a positive integer, but is {min_length}")
if not isinstance(eos_token_id, int) or eos_token_id < 0:
raise ValueError(f"`eos_token_id` has to be a positive integer, but is {eos_token_id}")
self.min_length = min_length
self.eos_token_id = eos_token_id
def _apply_eos_token_mask(self, scores: tf.Tensor) -> tf.Tensor:
eos_token_id_mask = tf.range(scores.shape[-1]) == self.eos_token_id
scores = tf.where(eos_token_id_mask, float("-inf"), scores)
return scores
def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor:
# applies eos token masking if the first argument is true
scores = tf.cond(
tf.less(cur_len, self.min_length),
lambda: self._apply_eos_token_mask(scores),
lambda: tf.identity(scores),
)
return scores
class TFRepetitionPenaltyLogitsProcessor(TFLogitsProcessor):
r"""
[`TFLogitsProcessor`] enforcing an exponential penalty on repeated sequences.
Args:
repetition_penalty (`float`):
The parameter for repetition penalty. 1.0 means no penalty. See [this
paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
"""
def __init__(self, penalty: float):
if not isinstance(penalty, float) or not (penalty > 0):
raise ValueError(f"`penalty` has to be a strictly positive float, but is {penalty}")
self.penalty = penalty
def _create_score_penalties(self, input_ids: tf.Tensor, logits: tf.Tensor) -> tf.Tensor:
# We want to populate the penalties in the positions of `input_ids`. Since XLA can't handle shapes unknown
# before runtime, `tf.unique` can't be used. Therefore, we may have redundant updates, when a given row has
# the same token multiple times.
# Gathers the penalties to apply
logit_penalties = tf.gather(logits, input_ids, axis=1, batch_dims=1)
logit_penalties = tf.where(logit_penalties > 0, 1 / self.penalty, logit_penalties)
logit_penalties = tf.where(logit_penalties < 0, self.penalty, logit_penalties)
# Scatters the penalties
token_penalties = tf.ones(logits.shape)
batch_size = input_ids.shape[0]
seq_len = tf.shape(input_ids)[1] # the sequence length has dynamic size, hence the dynamic shape
indexable_prev_input_ids = tf.concat(
(
tf.expand_dims(tf.repeat(tf.range(batch_size), seq_len), axis=-1),
tf.expand_dims(tf.reshape(input_ids, [-1]), axis=-1),
),
axis=1,
)
token_penalties = tf.tensor_scatter_nd_update(
token_penalties, indices=indexable_prev_input_ids, updates=tf.reshape(logit_penalties, [-1])
)
return token_penalties
def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor:
score_penalties = self._create_score_penalties(input_ids[:, :cur_len], scores)
scores = tf.math.multiply(scores, score_penalties)
return scores
class TFNoBadWordsLogitsProcessor(TFLogitsProcessor):
"""
[`TFLogitsProcessor`] that enforces that specified sequences will never be sampled.
Args:
bad_words_ids (`List[List[int]]`):
List of list of token ids that are not allowed to be generated. In order to get the tokens of the words
that should not appear in the generated text, use `tokenizer(bad_word, add_prefix_space=True).input_ids`.
eos_token_id (`int`):
The id of the *end-of-sequence* token.
"""
def __init__(self, bad_words_ids: List[List[int]], eos_token_id: int):
if not isinstance(bad_words_ids, List) or len(bad_words_ids) == 0:
raise ValueError(f"`bad_words_ids` has to be a non-empty list, but is {bad_words_ids}.")
if any(not isinstance(bad_word_ids, list) for bad_word_ids in bad_words_ids):
raise ValueError(f"`bad_words_ids` has to be a list of lists, but is {bad_words_ids}.")
if any(
any((not isinstance(token_id, (int, np.integer)) or token_id < 0) for token_id in bad_word_ids)
for bad_word_ids in bad_words_ids
):
raise ValueError(
f"Each list in `bad_words_ids` has to be a list of positive integers, but is {bad_words_ids}."
)
# stores the information about bad words in three tensors:
# 1. a rectangular tensor with the forbidden sequences (padded with `-1`), for full data comparisons
self.bad_word_seqs_ids = tf.ragged.constant(bad_words_ids).to_tensor(default_value=-1)
# 2. a tensor with the unpadded length of each forbidden sequence, for quick length comparisons
bad_word_seqs_len = [len(bad_words) for bad_words in bad_words_ids]
if any([word_len == 0 for word_len in bad_word_seqs_len]):
raise ValueError(f"Banned words token sequences {bad_words_ids} cannot have an empty list")
self.bad_word_seqs_len = tf.convert_to_tensor(bad_word_seqs_len, dtype=tf.int32)
# 3. a tensor containing the last token for each sequence, for easy access to the tokens that may be banned
self.seq_forbidden_tokens = tf.convert_to_tensor([bad_words[-1] for bad_words in bad_words_ids])
def _calc_row_banned_bad_tokens(self, row_input_ids: tf.Tensor) -> tf.Tensor:
def _tokens_match(bad_word_seq_number):
def _len_one():
# If the bad sequence only has one token, always mask it
return tf.cond(
tf.math.equal(self.bad_word_seqs_len[bad_word_seq_number], 1),
lambda: tf.ones((), dtype=tf.bool),
_len_greater_than_cur_len,
)
def _len_greater_than_cur_len():
# Otherwise, if the bad sequence is longer than the current length they can't ever match
return tf.cond(
tf.math.greater(self.bad_word_seqs_len[bad_word_seq_number], tf.shape(row_input_ids)[0]),
lambda: tf.zeros((), dtype=tf.bool),
_match_found,
)
def _match_found():
# Finaly, runs the actual comparison. Can only be called if the previous comparisons do not yield
# an answer (otherwise we get indexing exceptions)
compare_len = self.bad_word_seqs_len[bad_word_seq_number] - 1
return tf.cond(
tf.math.reduce_all(
tf.math.equal(
row_input_ids[-compare_len:], self.bad_word_seqs_ids[bad_word_seq_number, :compare_len]
)
),
lambda: tf.ones((), dtype=tf.bool),
lambda: tf.zeros((), dtype=tf.bool),
)
match = _len_one()
return match
# Compares the current row against all bad word sequences, obtaining a mask with the matches.
match_mask = tf.map_fn(_tokens_match, tf.range(self.bad_word_seqs_ids.shape[0]), fn_output_signature=tf.bool)
row_banned_tokens = self.seq_forbidden_tokens[match_mask]
return row_banned_tokens
def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor:
# We want to mask some banned tokens, at a score level. Since the banned tokens depend on the previous
# `input_ids`, they may have a different length for each row, and they may even be empty for some rows.
# To remain simple and XLA-compatible, we work on a per-row fashion.
# TODO (Joao): this function might trigger XLA retracing as `cur_len` increases. Fix it if it becomes
# a frequent choke point. (make `cur_len` a tensor?)
def _get_row_updated_score(row_inputs: Tuple[tf.Tensor]) -> tf.Tensor:
row_input_ids, row_score = row_inputs
banned_tokens = self._calc_row_banned_bad_tokens(row_input_ids[:cur_len])
banned_tokens_mask = tf.scatter_nd(
indices=tf.expand_dims(banned_tokens, axis=-1),
updates=tf.ones_like(banned_tokens, dtype=tf.bool),
shape=row_score.shape,
)
row_score = tf.where(banned_tokens_mask, -float("inf"), row_score)
return row_score
scores = tf.map_fn(_get_row_updated_score, (input_ids, scores), fn_output_signature=tf.float32)
return scores
class TFNoRepeatNGramLogitsProcessor(TFLogitsProcessor):
r"""
[`TFLogitsProcessor`] that enforces no repetition of n-grams. See
[Fairseq](https://github.com/pytorch/fairseq/blob/a07cb6f40480928c9e0548b737aadd36ee66ac76/fairseq/sequence_generator.py#L345).
Args:
ngram_size (`int`):
All ngrams of size `ngram_size` can only occur once.
"""
def __init__(self, ngram_size: int):
if not isinstance(ngram_size, int) or ngram_size <= 0:
raise ValueError(f"`ngram_size` has to be a strictly positive integer, but is {ngram_size}")
self.ngram_size = ngram_size
def calc_banned_ngram_tokens(self, input_ids, num_hypos, cur_len):
# Copied from fairseq for no_repeat_ngram in beam_search
if cur_len + 1 < self.ngram_size:
# return no banned tokens if we haven't generated ngram_size tokens yet
return [[] for _ in range(num_hypos)]
generated_ngrams = [{} for _ in range(num_hypos)]
prev_input_ids = input_ids[:, :cur_len]
for idx in range(num_hypos):
gen_tokens = prev_input_ids[idx].numpy().tolist()
generated_ngram = generated_ngrams[idx]
for ngram in zip(*[gen_tokens[i:] for i in range(self.ngram_size)]):
prev_ngram_tuple = tuple(ngram[:-1])
generated_ngram[prev_ngram_tuple] = generated_ngram.get(prev_ngram_tuple, []) + [ngram[-1]]
def _get_generated_ngrams(hypo_idx):
# Before decoding the next token, prevent decoding of ngrams that have already appeared
start_idx = cur_len + 1 - self.ngram_size
ngram_idx = tuple(prev_input_ids[hypo_idx, start_idx:cur_len].numpy().tolist())
return generated_ngrams[hypo_idx].get(ngram_idx, [])
banned_tokens = [_get_generated_ngrams(hypo_idx) for hypo_idx in range(num_hypos)]
return banned_tokens
def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor:
# TODO (joao): enable XLA on this logits processor. See discussion and attempts in
# https://github.com/huggingface/transformers/pull/16974
if not tf.executing_eagerly():
raise NotImplementedError("TFNoRepeatNGramLogitsProcessor is only implemented for eager execution.")
batch_size, vocab_size = scores.shape
banned_tokens = self.calc_banned_ngram_tokens(input_ids, batch_size, cur_len)
# create banned_tokens boolean mask
banned_tokens_indices_mask = []
for banned_tokens_slice in banned_tokens:
banned_tokens_indices_mask.append(
[True if token in banned_tokens_slice else False for token in range(vocab_size)]
)
scores = tf.where(tf.convert_to_tensor(banned_tokens_indices_mask, dtype=tf.bool), -float("inf"), scores)
return scores
class TFForcedBOSTokenLogitsProcessor(TFLogitsProcessor):
r"""
[`TFLogitsProcessor`] that enforces the specified token as the first generated token.
Args:
bos_token_id (`int`):
The id of the token to force as the first generated token.
"""
def __init__(self, bos_token_id: int):
if bos_token_id < 0:
raise ValueError(f"The forced bos token id must be a non-negative integer, got {bos_token_id}")
self.bos_token_id = bos_token_id
def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor:
if cur_len == 1:
batch_size, num_tokens = scores.shape
# sets the score to 0 in the bos_token_id column
scores = tf.zeros((batch_size, 1))
# sets the score to -inf everywhere else
if self.bos_token_id > 0:
scores = tf.concat((tf.broadcast_to(-float("inf"), (batch_size, self.bos_token_id)), scores), axis=-1)
if self.bos_token_id < (num_tokens - 1):
scores = tf.concat(
(scores, tf.broadcast_to(-float("inf"), (batch_size, (num_tokens - 1) - self.bos_token_id))),
axis=-1,
)
return scores
class TFForcedEOSTokenLogitsProcessor(TFLogitsProcessor):
r"""
[`TFLogitsProcessor`] that enforces the specified token as the last generated token when `max_length` is reached.
Args:
max_length (`int`):
The maximum length of the sequence to be generated.
eos_token_id (`int`):
The id of the token to force as the last generated token when `max_length` is reached.
"""
def __init__(self, max_length: int, eos_token_id: int):
self.max_length = max_length
if eos_token_id < 0:
raise ValueError(f"The forced eos token id must be a non-negative integer, got {eos_token_id}")
self.eos_token_id = eos_token_id
def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor:
if cur_len == self.max_length - 1:
batch_size, num_tokens = scores.shape
# sets the score to 0 in the eos_token_id column
scores = tf.zeros((batch_size, 1))
# sets the score to -inf everywhere else
if self.eos_token_id > 0:
scores = tf.concat((tf.broadcast_to(-float("inf"), (batch_size, self.eos_token_id)), scores), axis=-1)
if self.eos_token_id < (num_tokens - 1):
scores = tf.concat(
(scores, tf.broadcast_to(-float("inf"), (batch_size, (num_tokens - 1) - self.eos_token_id))),
axis=-1,
)
return scores
class TFSuppressTokensAtBeginLogitsProcessor(TFLogitsProcessor):
r"""
[`TFSuppressTokensAtBeginLogitsProcessor`] suppresses a list of tokens as soon as the `generate` function starts
generating using `begin_index` tokens. This should ensure that the tokens defined by `begin_suppress_tokens` at not
sampled at the begining of the generation.
"""
def __init__(self, begin_suppress_tokens, begin_index):
self.begin_suppress_tokens = list(begin_suppress_tokens)
self.begin_index = begin_index
def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor:
scores = tf.cond(
tf.equal(cur_len, self.begin_index),
lambda: tf.tensor_scatter_nd_update(
scores,
indices=[[i, token] for i in range(scores.shape[0]) for token in self.begin_suppress_tokens],
updates=[-float("inf") for _ in range(scores.shape[0] * len(self.begin_suppress_tokens))],
),
lambda: scores,
)
return scores
class TFSuppressTokensLogitsProcessor(TFLogitsProcessor):
r"""This processor can be used to suppress a list of tokens. The processor will set their log probs to `-inf` so that they
are not sampled."""
def __init__(self, suppress_tokens):
self.suppress_tokens = list(suppress_tokens)
def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor:
scores = tf.tensor_scatter_nd_update(
scores,
indices=[[i, token] for i in range(scores.shape[0]) for token in self.suppress_tokens],
updates=[-float("inf") for _ in range(scores.shape[0] * len(self.suppress_tokens))],
)
return scores
class TFForceTokensLogitsProcessor(TFLogitsProcessor):
r"""This processor takes a list of pairs of integers which indicates a mapping from generation indices to token
indices that will be forced before sampling. The processor will set their log probs to `0` and all other tokens to
`-inf` so that they are sampled at their corresponding index."""
def __init__(self, force_token_map: List[List[int]]):
force_token_map = dict(force_token_map)
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have an negative value.
force_token_array = np.ones((max(force_token_map.keys()) + 1), dtype=np.int32) * -1
for index, token in force_token_map.items():
if token is not None:
force_token_array[index] = token
self.force_token_array = tf.convert_to_tensor(force_token_array, dtype=tf.int32)
def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor:
def _force_token(generation_idx):
batch_size = scores.shape[0]
current_token = self.force_token_array[generation_idx]
new_scores = tf.ones_like(scores, dtype=scores.dtype) * -float("inf")
indices = tf.stack((tf.range(batch_size), tf.tile([current_token], [batch_size])), axis=1)
updates = tf.zeros((batch_size,), dtype=scores.dtype)
new_scores = tf.tensor_scatter_nd_update(new_scores, indices, updates)
return new_scores
scores = tf.cond(
tf.greater_equal(cur_len, tf.shape(self.force_token_array)[0]),
# If the current length is geq than the length of force_token_array, the processor does nothing.
lambda: tf.identity(scores),
# Otherwise, it may force a certain token.
lambda: tf.cond(
tf.greater_equal(self.force_token_array[cur_len], 0),
# Only valid (positive) tokens are forced
lambda: _force_token(cur_len),
# Otherwise, the processor does nothing.
lambda: scores,
),
)
return scores
|
27182812/ChatGLM-LLaMA-chinese-insturct | 18,995 | src/transformers/generation/flax_logits_process.py | # coding=utf-8
# Copyright 2021 The HuggingFace Inc. team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
logger = get_logger(__name__)
LOGITS_PROCESSOR_INPUTS_DOCSTRING = r"""
Args:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
search or log softmax for each vocabulary token when using beam search
kwargs:
Additional logits processor specific kwargs.
Return:
`jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
"""
class FlaxLogitsProcessor:
"""Abstract base class for all logit processors that can be applied during generation."""
@add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray) -> jnp.ndarray:
"""Flax method for processing logits."""
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called."
)
class FlaxLogitsWarper:
"""Abstract base class for all logit warpers that can be applied during generation with multinomial sampling."""
@add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray) -> jnp.ndarray:
"""Flax method for warping logits."""
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called."
)
class FlaxLogitsProcessorList(list):
"""
This class can be used to create a list of [`FlaxLogitsProcessor`] or [`FlaxLogitsWarper`] to subsequently process
a `scores` input tensor. This class inherits from list and adds a specific *__call__* method to apply each
[`FlaxLogitsProcessor`] or [`FlaxLogitsWarper`] to the inputs.
"""
@add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int, **kwargs) -> jnp.ndarray:
for processor in self:
function_args = inspect.signature(processor.__call__).parameters
if len(function_args) > 3:
if not all(arg in kwargs for arg in list(function_args.keys())[2:]):
raise ValueError(
f"Make sure that all the required parameters: {list(function_args.keys())} for "
f"{processor.__class__} are passed to the logits processor."
)
scores = processor(input_ids, scores, cur_len, **kwargs)
else:
scores = processor(input_ids, scores, cur_len)
return scores
class FlaxTemperatureLogitsWarper(FlaxLogitsWarper):
r"""
[`FlaxLogitsWarper`] for temperature (exponential scaling output probability distribution).
Args:
temperature (`float`):
The value used to module the logits distribution.
"""
def __init__(self, temperature: float):
if not isinstance(temperature, float) or not (temperature > 0):
raise ValueError(f"`temperature` has to be a strictly positive float, but is {temperature}")
self.temperature = temperature
def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray:
scores = scores / self.temperature
return scores
class FlaxTopPLogitsWarper(FlaxLogitsWarper):
"""
[`FlaxLogitsWarper`] that performs top-p, i.e. restricting to top tokens summing to prob_cut_off <= prob_cut_off.
Args:
top_p (`float`):
If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or
higher are kept for generation.
filter_value (`float`, *optional*, defaults to `-float("Inf")`):
All filtered values will be set to this float value.
min_tokens_to_keep (`int`, *optional*, defaults to 1):
Minimum number of tokens that cannot be filtered.
"""
def __init__(self, top_p: float, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1):
if not isinstance(top_p, float) or (top_p < 0 or top_p > 1.0):
raise ValueError(f"`top_p` has to be a float > 0 and < 1, but is {top_p}")
self.top_p = top_p
self.filter_value = filter_value
self.min_tokens_to_keep = min_tokens_to_keep
def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray:
topk_scores, topk_indices = lax.top_k(scores, scores.shape[-1])
mask_scores = jnp.full_like(scores, self.filter_value)
cumulative_probs = jax.nn.softmax(topk_scores, axis=-1).cumsum(axis=-1)
score_mask = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
score_mask = jnp.roll(score_mask, 1)
score_mask |= score_mask.at[:, 0].set(True)
# min tokens to keep
score_mask = score_mask.at[:, : self.min_tokens_to_keep].set(True)
topk_next_scores = jnp.where(score_mask, topk_scores, mask_scores)
next_scores = jax.lax.sort_key_val(topk_indices, topk_next_scores)[-1]
return next_scores
class FlaxTopKLogitsWarper(FlaxLogitsWarper):
r"""
[`FlaxLogitsWarper`] that performs top-k, i.e. restricting to the k highest probability elements.
Args:
top_k (`int`):
The number of highest probability vocabulary tokens to keep for top-k-filtering.
filter_value (`float`, *optional*, defaults to `-float("Inf")`):
All filtered values will be set to this float value.
min_tokens_to_keep (`int`, *optional*, defaults to 1):
Minimum number of tokens that cannot be filtered.
"""
def __init__(self, top_k: int, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1):
if not isinstance(top_k, int) or top_k <= 0:
raise ValueError(f"`top_k` has to be a strictly positive integer, but is {top_k}")
self.top_k = max(top_k, min_tokens_to_keep)
self.filter_value = filter_value
def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray:
batch_size, vocab_size = scores.shape
next_scores_flat = jnp.full(batch_size * vocab_size, self.filter_value)
topk = min(self.top_k, scores.shape[-1]) # Safety check
topk_scores, topk_indices = lax.top_k(scores, topk)
shift = jnp.broadcast_to((jnp.arange(batch_size) * vocab_size)[:, None], (batch_size, topk)).flatten()
topk_scores_flat = topk_scores.flatten()
topk_indices_flat = topk_indices.flatten() + shift
next_scores_flat = next_scores_flat.at[topk_indices_flat].set(topk_scores_flat)
next_scores = next_scores_flat.reshape(batch_size, vocab_size)
return next_scores
class FlaxForcedBOSTokenLogitsProcessor(FlaxLogitsProcessor):
r"""
[`FlaxLogitsProcessor`] that enforces the specified token as the first generated token.
Args:
bos_token_id (`int`):
The id of the token to force as the first generated token.
"""
def __init__(self, bos_token_id: int):
self.bos_token_id = bos_token_id
def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray:
new_scores = jnp.full(scores.shape, -float("inf"))
apply_penalty = 1 - jnp.bool_(cur_len - 1)
scores = jnp.where(apply_penalty, new_scores.at[:, self.bos_token_id].set(0), scores)
return scores
class FlaxForcedEOSTokenLogitsProcessor(FlaxLogitsProcessor):
r"""
[`FlaxLogitsProcessor`] that enforces the specified token as the last generated token when `max_length` is reached.
Args:
max_length (`int`):
The maximum length of the sequence to be generated.
eos_token_id (`int`):
The id of the token to force as the last generated token when `max_length` is reached.
"""
def __init__(self, max_length: int, eos_token_id: int):
self.max_length = max_length
self.eos_token_id = eos_token_id
def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray:
new_scores = jnp.full(scores.shape, -float("inf"))
apply_penalty = 1 - jnp.bool_(cur_len - self.max_length + 1)
scores = jnp.where(apply_penalty, new_scores.at[:, self.eos_token_id].set(0), scores)
return scores
class FlaxMinLengthLogitsProcessor(FlaxLogitsProcessor):
r"""
[`FlaxLogitsProcessor`] enforcing a min-length by setting EOS probability to 0.
Args:
min_length (`int`):
The minimum length below which the score of `eos_token_id` is set to `-float("Inf")`.
eos_token_id (`int`):
The id of the *end-of-sequence* token.
"""
def __init__(self, min_length: int, eos_token_id: int):
if not isinstance(min_length, int) or min_length < 0:
raise ValueError(f"`min_length` has to be a positive integer, but is {min_length}")
if not isinstance(eos_token_id, int) or eos_token_id < 0:
raise ValueError(f"`eos_token_id` has to be a positive integer, but is {eos_token_id}")
self.min_length = min_length
self.eos_token_id = eos_token_id
def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray:
# create boolean flag to decide if min length penalty should be applied
apply_penalty = 1 - jnp.clip(cur_len - self.min_length, 0, 1)
scores = jnp.where(apply_penalty, scores.at[:, self.eos_token_id].set(-float("inf")), scores)
return scores
class FlaxSuppressTokensAtBeginLogitsProcessor(FlaxLogitsProcessor):
r"""
[`FlaxLogitsProcessor`] supressing a list of tokens as soon as the `generate` function starts generating using
`begin_index` tokens. This should ensure that the tokens defined by `begin_suppress_tokens` are not sampled at the
begining of the generation.
Args:
begin_suppress_tokens (`List[int]`):
Tokens to not sample.
begin_index (`int`):
Index where the tokens are suppressed.
"""
def __init__(self, begin_suppress_tokens, begin_index):
self.begin_suppress_tokens = list(begin_suppress_tokens)
self.begin_index = begin_index
def __call__(self, input_ids, scores, cur_len: int):
apply_penalty = 1 - jnp.bool_(cur_len - self.begin_index)
scores = jnp.where(apply_penalty, scores.at[:, self.begin_suppress_tokens].set(-float("inf")), scores)
return scores
class FlaxSuppressTokensLogitsProcessor(FlaxLogitsProcessor):
r"""
[`FlaxLogitsProcessor`] suppressing a list of tokens at each decoding step. The processor will set their log probs
to be `-inf` so they are not sampled.
Args:
suppress_tokens (`list`):
Tokens to not sample.
"""
def __init__(self, suppress_tokens: list):
self.suppress_tokens = list(suppress_tokens)
def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray:
scores = scores.at[..., self.suppress_tokens].set(-float("inf"))
return scores
class FlaxForceTokensLogitsProcessor(FlaxLogitsProcessor):
r"""
[`FlaxLogitsProcessor`] that takes a list of pairs of integers which indicates a mapping from generation indices to
token indices that will be forced before sampling. The processor will set their log probs to 0 and all other tokens
to `-inf` so that they are sampled at their corresponding index.
Args:
force_token_map (`list`):
Map giving token ids and indices where they will be forced to be sampled.
"""
def __init__(self, force_token_map):
force_token_map = dict(force_token_map)
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
force_token_array = jnp.ones((max(force_token_map.keys()) + 1), dtype=jnp.int32) * -1
for index, token in force_token_map.items():
if token is not None:
force_token_array = force_token_array.at[index].set(token)
self.force_token_array = jnp.int32(force_token_array)
def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray:
def _force_token(generation_idx):
batch_size = scores.shape[0]
current_token = self.force_token_array[generation_idx]
new_scores = jnp.ones_like(scores, dtype=scores.dtype) * -float("inf")
updates = jnp.zeros((batch_size, 1), dtype=scores.dtype)
new_scores = lax.dynamic_update_slice(new_scores, updates, (0, current_token))
return new_scores
scores = lax.cond(
cur_len >= self.force_token_array.shape[0],
# If the current length is geq than the length of force_token_array, the processor does nothing.
lambda: scores,
# Otherwise, it may force a certain token.
lambda: lax.cond(
self.force_token_array[cur_len] >= 0,
# Only valid (positive) tokens are forced
lambda: _force_token(cur_len),
# Otherwise, the processor does nothing.
lambda: scores,
),
)
return scores
class FlaxWhisperTimeStampLogitsProcessor(FlaxLogitsProcessor):
r"""
Whisper specific Processor. This processor can be used to force a list of tokens. The processor will set their log
probs to `inf` so that they are sampled at their corresponding index.
Args:
generate_config (`GenerateConfig`):
The generate config used to generate the output. The following parameters are required:
eos_token_id (`int`, *optional*, defaults to 50257):
The id of the *end-of-sequence* token.
no_timestamps_token_id (`int`, *optional*, defaults to 50363):
The id of the `"<|notimestamps|>"` token.
max_initial_timestamp_index (`int`, *optional*, defaults to 1):
Used to set the maximum value of the initial timestamp. This is used to prevent the model from
predicting timestamps that are too far in the future.
"""
def __init__(self, generate_config, model_config, decoder_input_length):
self.eos_token_id = generate_config.eos_token_id
self.no_timestamps_token_id = generate_config.no_timestamps_token_id
self.timestamp_begin = generate_config.no_timestamps_token_id + 1
self.begin_index = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(generate_config, "max_initial_timestamp_index"):
self.max_initial_timestamp_index = generate_config.max_initial_timestamp_index
else:
self.max_initial_timestamp_index = model_config.vocab_size
if self.max_initial_timestamp_index is None:
self.max_initial_timestamp_index = model_config.vocab_size
def __call__(self, input_ids, scores, cur_len):
# suppress <|notimestamps|> which is handled by without_timestamps
scores = scores.at[:, self.no_timestamps_token_id].set(-float("inf"))
def handle_pairs(input_ids_k, scores_k):
last_was_timestamp = jnp.where((cur_len - self.begin_index) >= 1, True, False)
last_was_timestamp = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin,
True and last_was_timestamp,
False,
)
penultimate_was_timestamp = jnp.where((cur_len - self.begin_index) < 2, True, False)
penultimate_was_timestamp = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin,
True,
penultimate_was_timestamp,
)
return jnp.where(
last_was_timestamp,
jnp.where(
penultimate_was_timestamp > 0,
scores_k.at[self.timestamp_begin :].set(-float("inf")),
scores_k.at[: self.eos_token_id].set(-float("inf")),
),
scores_k,
)
scores = jax.vmap(handle_pairs)(input_ids, scores)
apply_max_initial_timestamp = jnp.where(cur_len == self.begin_index, True, False)
apply_max_initial_timestamp = jnp.where(
self.max_initial_timestamp_index is not None,
True and apply_max_initial_timestamp,
False,
)
last_allowed = self.timestamp_begin + self.max_initial_timestamp_index
scores = jnp.where(
apply_max_initial_timestamp,
scores.at[:, last_allowed + 1 :].set(-float("inf")),
scores,
)
# if sum of probability over timestamps is above any other token, sample timestamp
logprobs = jax.nn.log_softmax(scores, axis=-1)
def handle_cumulative_probs(logprobs_k, scores_k):
timestamp_logprob = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :], axis=-1)
max_text_token_logprob = jnp.max(logprobs_k[: self.timestamp_begin])
return jnp.where(
timestamp_logprob > max_text_token_logprob,
scores_k.at[: self.timestamp_begin].set(-float("inf")),
scores_k,
)
scores = jax.vmap(handle_cumulative_probs)(logprobs, scores)
return scores
|
27182812/ChatGLM-LLaMA-chinese-insturct | 19,089 | src/transformers/generation/beam_constraints.py | from abc import ABC, abstractmethod
from typing import List, Optional
class Constraint(ABC):
r"""Abstract base class for all constraints that can be applied during generation.
It must define how the constraint can be satisfied.
All classes that inherit Constraint must follow the requirement that
```py
completed = False
while not completed:
_, completed = constraint.update(constraint.advance())
```
will always terminate (halt).
"""
def __init__(self):
# test for the above condition
self.test()
def test(self):
"""
Tests whether this constraint has been properly defined.
"""
counter = 0
completed = False
while not completed:
if counter == 1:
self.reset()
advance = self.advance()
if not self.does_advance(advance):
raise Exception(
"Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true."
)
stepped, completed, reset = self.update(advance)
counter += 1
if counter > 10000:
raise Exception("update() does not fulfill the constraint.")
if self.remaining() != 0:
raise Exception("Custom Constraint is not defined correctly.")
@abstractmethod
def advance(self):
"""
When called, returns the token that would take this constraint one step closer to being fulfilled.
Return:
token_ids(`torch.tensor`): Must be a tensor of a list of indexable tokens, not some integer.
"""
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called."
)
@abstractmethod
def does_advance(self, token_id: int):
"""
Reads in a token and returns whether it creates progress.
"""
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called."
)
@abstractmethod
def update(self, token_id: int):
"""
Reads in a token and returns booleans that indicate the progress made by it. This function will update the
state of this object unlikes `does_advance(self, token_id: int)`.
This isn't to test whether a certain token will advance the progress; it's to update its state as if it has
been generated. This becomes important if token_id != desired token (refer to else statement in
PhrasalConstraint)
Args:
token_id(`int`):
The id of a newly generated token in the beam search.
Return:
stepped(`bool`):
Whether this constraint has become one step closer to being fulfuilled.
completed(`bool`):
Whether this constraint has been completely fulfilled by this token being generated.
reset (`bool`):
Whether this constraint has reset its progress by this token being generated.
"""
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called."
)
@abstractmethod
def reset(self):
"""
Resets the state of this constraint to its initialization. We would call this in cases where the fulfillment of
a constraint is abrupted by an unwanted token.
"""
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called."
)
@abstractmethod
def remaining(self):
"""
Returns the number of remaining steps of `advance()` in order to complete this constraint.
"""
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called."
)
@abstractmethod
def copy(self, stateful=False):
"""
Creates a new instance of this constraint.
Args:
stateful(`bool`): Whether to not only copy the constraint for new instance, but also its state.
Return:
constraint(`Constraint`): The same constraint as the one being called from.
"""
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called."
)
class PhrasalConstraint(Constraint):
r"""
[`Constraint`] enforcing that an ordered sequence of tokens is included in the output.
Args:
token_ids (`List[int]`):
The id of the token that must be generated by the output.
"""
def __init__(self, token_ids: List[int]):
super(Constraint, self).__init__()
if not isinstance(token_ids, list) or len(token_ids) == 0:
raise ValueError(f"`token_ids` has to be a non-empty list, but is {token_ids}.")
if any((not isinstance(token_id, int) or token_id < 0) for token_id in token_ids):
raise ValueError(f"Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.")
self.token_ids = token_ids
self.seqlen = len(self.token_ids)
self.fulfilled_idx = -1 # the index of the currently fulfilled step
self.completed = False
def advance(self):
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def does_advance(self, token_id: int):
if not isinstance(token_id, int):
raise ValueError(f"`token_id` has to be an `int`, but is {token_id} of type {type(token_id)}")
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def update(self, token_id: int):
if not isinstance(token_id, int):
raise ValueError(f"`token_id` has to be an `int`, but is {token_id} of type {type(token_id)}")
stepped = False
completed = False
reset = False
if self.does_advance(token_id):
self.fulfilled_idx += 1
stepped = True
if self.fulfilled_idx == (self.seqlen - 1):
completed = True
self.completed = completed
else:
# failed to make progress.
reset = True
self.reset()
return stepped, completed, reset
def reset(self):
self.completed = False
self.fulfilled_idx = 0
def remaining(self):
return self.seqlen - (self.fulfilled_idx + 1)
def copy(self, stateful=False):
new_constraint = PhrasalConstraint(self.token_ids)
if stateful:
new_constraint.seq_len = self.seqlen
new_constraint.fulfilled_idx = self.fulfilled_idx
new_constraint.completed = self.completed
return new_constraint
class DisjunctiveTrie:
def __init__(self, nested_token_ids: List[List[int]], no_subsets=True):
r"""
A helper class that builds a trie with the words represented in `nested_token_ids`.
"""
self.max_height = max([len(one) for one in nested_token_ids])
root = {}
for token_ids in nested_token_ids:
level = root
for tidx, token_id in enumerate(token_ids):
if token_id not in level:
level[token_id] = {}
level = level[token_id]
if no_subsets and self.has_subsets(root, nested_token_ids):
raise ValueError(
"Each list in `nested_token_ids` can't be a complete subset of another list, but is"
f" {nested_token_ids}."
)
self.trie = root
def next_tokens(self, current_seq):
"""
The next possible tokens that will progress the trie, given the current sequence of tokens in `current_seq`.
"""
start = self.trie
for current_token in current_seq:
start = start[current_token]
next_tokens = list(start.keys())
return next_tokens
def reached_leaf(self, current_seq):
next_tokens = self.next_tokens(current_seq)
return len(next_tokens) == 0
def count_leaves(self, root):
next_nodes = list(root.values())
if len(next_nodes) == 0:
return 1
else:
return sum([self.count_leaves(nn) for nn in next_nodes])
def has_subsets(self, trie, nested_token_ids):
"""
Returns whether # of leaves == # of words. Otherwise some word is a subset of another.
"""
leaf_count = self.count_leaves(trie)
return len(nested_token_ids) != leaf_count
class DisjunctiveConstraint(Constraint):
r"""
A special [`Constraint`] that is fulfilled by fulfilling just one of several constraints.
Args:
nested_token_ids (`List[List[int]]`): a list of words, where each word is a list of ids. This constraint
is fulfilled by generating just one from the list of words.
"""
def __init__(self, nested_token_ids: List[List[int]]):
super(Constraint, self).__init__()
if not isinstance(nested_token_ids, list) or len(nested_token_ids) == 0:
raise ValueError(f"`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.")
if any(not isinstance(token_ids, list) for token_ids in nested_token_ids):
raise ValueError(f"`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.")
if any(
any((not isinstance(token_id, int) or token_id < 0) for token_id in token_ids)
for token_ids in nested_token_ids
):
raise ValueError(
f"Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}."
)
self.trie = DisjunctiveTrie(nested_token_ids)
self.token_ids = nested_token_ids
self.seqlen = self.trie.max_height
self.current_seq = []
self.completed = False
def advance(self):
token_list = self.trie.next_tokens(self.current_seq)
if len(token_list) == 0:
return None
else:
return token_list
def does_advance(self, token_id: int):
if not isinstance(token_id, int):
raise ValueError(f"`token_id` is supposed to be type `int`, but is {token_id} of type {type(token_id)}")
next_tokens = self.trie.next_tokens(self.current_seq)
return token_id in next_tokens
def update(self, token_id: int):
if not isinstance(token_id, int):
raise ValueError(f"`token_id` is supposed to be type `int`, but is {token_id} of type {type(token_id)}")
stepped = False
completed = False
reset = False
if self.does_advance(token_id):
self.current_seq.append(token_id)
stepped = True
else:
reset = True
self.reset()
completed = self.trie.reached_leaf(self.current_seq)
self.completed = completed
return stepped, completed, reset
def reset(self):
self.completed = False
self.current_seq = []
def remaining(self):
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq)
def copy(self, stateful=False):
new_constraint = DisjunctiveConstraint(self.token_ids)
if stateful:
new_constraint.seq_len = self.seqlen
new_constraint.current_seq = self.current_seq
new_constraint.completed = self.completed
return new_constraint
class ConstraintListState:
r"""
A class for beam scorers to track its progress through a list of constraints.
Args:
constraints (`List[Constraint]`):
A list of [`Constraint`] objects that must be fulfilled by the beam scorer.
"""
def __init__(self, constraints: List[Constraint]):
self.constraints = constraints
# max # of steps required to fulfill a given constraint
self.max_seqlen = max([c.seqlen for c in constraints])
self.n_constraints = len(constraints)
self.completed = False
self.init_state()
def init_state(self):
self.complete_constraints = []
self.inprogress_constraint = None
self.pending_constraints = [constraint.copy(stateful=False) for constraint in self.constraints]
def get_bank(self):
add = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints) * self.max_seqlen) + add
def advance(self):
"""The list of tokens to generate such that we can make progress.
By "list" we don't mean the list of token that will fully fulfill a constraint.
Given constraints `c_i = {t_ij | j == # of tokens}`, If we're not in the middle of progressing through a
specific constraint `c_i`, we return:
`[t_k1 for k in indices of unfulfilled constraints]`
If we are in the middle of a constraint, then we return:
`[t_ij]`, where `i` is the index of the inprogress constraint, `j` is the next step for the constraint.
Though we don't care which constraint is fulfilled first, if we are in the progress of fulfilling a constraint,
that's the only one we'll return.
"""
token_list = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
advance = constraint.advance()
if isinstance(advance, int):
token_list.append(advance)
elif isinstance(advance, list):
token_list.extend(advance)
else:
advance = self.inprogress_constraint.advance()
if isinstance(advance, int):
token_list.append(advance)
elif isinstance(advance, list):
token_list.extend(advance)
if len(token_list) == 0:
return None
else:
return token_list
def reset(self, token_ids: Optional[List[int]]):
"""
token_ids: the tokens generated thus far to reset the state of the progress through constraints.
"""
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
complete, stepped = self.add(token)
# the entire list of constraints are fulfilled
if self.completed:
break
def add(self, token_id: int):
if not isinstance(token_id, int):
raise ValueError(f"`token_id` should be an `int`, but is `{token_id}`.")
complete, stepped = False, False
if self.completed:
complete = True
stepped = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
stepped, complete, reset = self.inprogress_constraint.update(token_id)
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=False))
self.inprogress_constraint = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint)
self.inprogress_constraint = None
if len(self.pending_constraints) == 0:
# we're done!
self.completed = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints):
if pending_constraint.does_advance(token_id):
stepped, complete, reset = pending_constraint.update(token_id)
if not stepped:
raise Exception(
"`constraint.update(token_id)` is not yielding incremental progress, "
"even though `constraint.does_advance(token_id)` is true."
)
if complete:
self.complete_constraints.append(pending_constraint)
self.inprogress_constraint = None
if not complete and stepped:
self.inprogress_constraint = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
self.pending_constraints = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
self.completed = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def copy(self, stateful=True):
new_state = ConstraintListState(self.constraints) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
new_state.complete_constraints = [
constraint.copy(stateful=True) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
new_state.inprogress_constraint = self.inprogress_constraint.copy(stateful=True)
new_state.pending_constraints = [constraint.copy() for constraint in self.pending_constraints]
return new_state
|
27182812/ChatGLM-LLaMA-chinese-insturct | 48,682 | src/transformers/generation/flax_utils.py | # coding=utf-8
# Copyright 2021 The Google AI Flax Team Authors, and The HuggingFace Inc. team.
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import inspect
import warnings
from functools import partial
from typing import Any, Dict, Optional, Union
import flax
import jax
import jax.numpy as jnp
import numpy as np
from jax import lax
from ..models.auto import (
FLAX_MODEL_FOR_CAUSAL_LM_MAPPING,
FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING,
)
from ..utils import ModelOutput, logging
from .configuration_utils import GenerationConfig
from .flax_logits_process import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxForceTokensLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxSuppressTokensAtBeginLogitsProcessor,
FlaxSuppressTokensLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
logger = logging.get_logger(__name__)
@flax.struct.dataclass
class FlaxGreedySearchOutput(ModelOutput):
"""
Flax Base class for outputs of decoder-only generation models using greedy search.
Args:
sequences (`jnp.ndarray` of shape `(batch_size, max_length)`):
The generated sequences.
"""
sequences: jnp.ndarray = None
@flax.struct.dataclass
class FlaxSampleOutput(ModelOutput):
"""
Flax Base class for outputs of decoder-only generation models using sampling.
Args:
sequences (`jnp.ndarray` of shape `(batch_size, max_length)`):
The generated sequences.
"""
sequences: jnp.ndarray = None
@flax.struct.dataclass
class FlaxBeamSearchOutput(ModelOutput):
"""
Flax Base class for outputs of decoder-only generation models using greedy search.
Args:
sequences (`jnp.ndarray` of shape `(batch_size, max_length)`):
The generated sequences.
scores (`jnp.ndarray` of shape `(batch_size,)`):
The scores (log probabilities) of the generated sequences.
"""
sequences: jnp.ndarray = None
scores: jnp.ndarray = None
@flax.struct.dataclass
class GreedyState:
cur_len: jnp.ndarray
sequences: jnp.ndarray
running_token: jnp.ndarray
is_sent_finished: jnp.ndarray
model_kwargs: Dict[str, jnp.ndarray]
@flax.struct.dataclass
class SampleState:
cur_len: jnp.ndarray
sequences: jnp.ndarray
running_token: jnp.ndarray
is_sent_finished: jnp.ndarray
prng_key: jnp.ndarray
model_kwargs: Dict[str, jnp.ndarray]
@flax.struct.dataclass
class BeamSearchState:
cur_len: jnp.ndarray
running_sequences: jnp.ndarray
running_scores: jnp.ndarray
sequences: jnp.ndarray
scores: jnp.ndarray
is_sent_finished: jnp.ndarray
model_kwargs: Dict[str, jnp.ndarray]
class FlaxGenerationMixin:
"""
A class containing all functions for auto-regressive text generation, to be used as a mixin in
[`FlaxPreTrainedModel`].
The class exposes [`~generation.FlaxGenerationMixin.generate`], which can be used for:
- *greedy decoding* by calling [`~generation.FlaxGenerationMixin._greedy_search`] if `num_beams=1` and
`do_sample=False`
- *multinomial sampling* by calling [`~generation.FlaxGenerationMixin._sample`] if `num_beams=1` and
`do_sample=True`
- *beam-search decoding* by calling [`~generation.FlaxGenerationMixin._beam_search`] if `num_beams>1` and
`do_sample=False`
You do not need to call any of the above methods directly. Pass custom parameter values to 'generate' instead. To
learn more about decoding strategies refer to the [text generation strategies guide](./generation_strategies).
"""
def prepare_inputs_for_generation(self, *args, **kwargs):
raise NotImplementedError(
"A model class needs to define a `prepare_inputs_for_generation` method in order to use `generate`."
)
@staticmethod
def _run_loop_in_debug(cond_fn, body_fn, init_state):
"""
Run generation in untraced mode. This should only be used for debugging purposes.
"""
state = init_state
while cond_fn(state):
state = body_fn(state)
return state
def _prepare_encoder_decoder_kwargs_for_generation(self, input_ids, params, model_kwargs):
encoder_kwargs = {
argument: value
for argument, value in model_kwargs.items()
if not (argument.startswith("decoder_") or argument.startswith("cross_attn"))
}
model_kwargs["encoder_outputs"] = self.encode(input_ids, params=params, return_dict=True, **encoder_kwargs)
return model_kwargs
def _prepare_decoder_input_ids_for_generation(
self,
batch_size: int,
decoder_start_token_id: int = None,
bos_token_id: int = None,
model_kwargs: Optional[Dict[str, jnp.ndarray]] = None,
) -> jnp.ndarray:
if model_kwargs is not None and "decoder_input_ids" in model_kwargs:
# Only use this arg if not None, otherwise just remove from model_kwargs
decoder_input_ids = model_kwargs.pop("decoder_input_ids")
if decoder_input_ids is not None:
return decoder_input_ids
decoder_start_token_id = self._get_decoder_start_token_id(decoder_start_token_id, bos_token_id)
return jnp.array(decoder_start_token_id, dtype="i4").reshape(1, -1).repeat(batch_size, axis=0)
def _get_decoder_start_token_id(self, decoder_start_token_id: int = None, bos_token_id: int = None) -> int:
# retrieve decoder_start_token_id for encoder-decoder models
# fall back to bos_token_id if necessary
decoder_start_token_id = (
decoder_start_token_id
if decoder_start_token_id is not None
else self.generation_config.decoder_start_token_id
)
bos_token_id = bos_token_id if bos_token_id is not None else self.generation_config.bos_token_id
if decoder_start_token_id is not None:
return decoder_start_token_id
elif (
hasattr(self.config, "decoder")
and hasattr(self.config.decoder, "decoder_start_token_id")
and self.config.decoder.decoder_start_token_id is not None
):
return self.config.decoder.decoder_start_token_id
elif bos_token_id is not None:
return bos_token_id
elif (
hasattr(self.config, "decoder")
and hasattr(self.config.decoder, "bos_token_id")
and self.config.decoder.bos_token_id is not None
):
return self.config.decoder.bos_token_id
raise ValueError(
"`decoder_start_token_id` or `bos_token_id` has to be defined for encoder-decoder generation."
)
@staticmethod
def _expand_to_num_beams(tensor, num_beams):
return jnp.broadcast_to(tensor[:, None], (tensor.shape[0], num_beams) + tensor.shape[1:])
def _adapt_logits_for_beam_search(self, logits):
"""
This function can be overwritten in the specific modeling_flax_<model-name>.py classes to allow for custom beam
search behavior. Note that the only model that overwrites this method is [`~transformes.FlaxMarianMTModel`].
"""
return logits
def _validate_model_class(self):
"""
Confirms that the model class is compatible with generation. If not, raises an exception that points to the
right class to use.
"""
if not self.can_generate():
generate_compatible_mappings = [
FLAX_MODEL_FOR_CAUSAL_LM_MAPPING,
FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING,
FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
]
generate_compatible_classes = set()
for model_mapping in generate_compatible_mappings:
supported_models = model_mapping.get(type(self.config), default=None)
if supported_models is not None:
generate_compatible_classes.add(supported_models.__name__)
exception_message = (
f"The current model class ({self.__class__.__name__}) is not compatible with `.generate()`, as "
"it doesn't have a language model head."
)
if generate_compatible_classes:
exception_message += f" Please use one of the following classes instead: {generate_compatible_classes}"
raise TypeError(exception_message)
def _validate_model_kwargs(self, model_kwargs: Dict[str, Any]):
"""Validates model kwargs for generation. Generate argument typos will also be caught here."""
unused_model_args = []
model_args = set(inspect.signature(self.prepare_inputs_for_generation).parameters)
# `kwargs`/`model_kwargs` is often used to handle optional forward pass inputs like `attention_mask`. If
# `prepare_inputs_for_generation` doesn't accept them, then a stricter check can be made ;)
if "kwargs" in model_args or "model_kwargs" in model_args:
model_args |= set(inspect.signature(self.__call__).parameters)
for key, value in model_kwargs.items():
if value is not None and key not in model_args:
unused_model_args.append(key)
if unused_model_args:
raise ValueError(
f"The following `model_kwargs` are not used by the model: {unused_model_args} (note: typos in the"
" generate arguments will also show up in this list)"
)
def generate(
self,
input_ids: jnp.ndarray,
generation_config: Optional[GenerationConfig] = None,
prng_key: Optional[jnp.ndarray] = None,
trace: bool = True,
params: Optional[Dict[str, jnp.ndarray]] = None,
logits_processor: Optional[FlaxLogitsProcessorList] = None,
**kwargs,
):
r"""
Generates sequences of token ids for models with a language modeling head.
Parameters:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
The sequence used as a prompt for the generation.
generation_config (`~generation.GenerationConfig`, *optional*):
The generation configuration to be used as base parametrization for the generation call. `**kwargs`
passed to generate matching the attributes of `generation_config` will override them. If
`generation_config` is not provided, the default will be used, which had the following loading
priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model
configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s
default values, whose documentation should be checked to parameterize generation.
trace (`bool`, *optional*, defaults to `True`):
Whether to trace generation. Setting `trace=False` should only be used for debugging and will lead to a
considerably slower runtime.
params (`Dict[str, jnp.ndarray]`, *optional*):
Optionally the model parameters can be passed. Can be useful for parallelized generation.
logits_processor (`FlaxLogitsProcessorList `, *optional*):
Custom logits processors that complement the default logits processors built from arguments and
generation config. If a logit processor is passed that is already created with the arguments or a
generation config an error is thrown. This feature is intended for advanced users.
kwargs:
Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be
forwarded to the `forward` function of the model. If the model is an encoder-decoder model, encoder
specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder_*.
Return:
[`~utils.ModelOutput`].
"""
# Handle `generation_config` and kwargs that might update it, and validate the `.generate()` call
self._validate_model_class()
# priority: `generation_config` argument > `model.generation_config` (the default generation config)
if generation_config is None:
# legacy: users may modify the model configuration to control generation -- update the generation config
# model attribute accordingly, if it was created from the model config
if self.generation_config._from_model_config:
new_generation_config = GenerationConfig.from_model_config(self.config)
if new_generation_config != self.generation_config:
warnings.warn(
"You have modified the pretrained model configuration to control generation. This is a"
" deprecated strategy to control generation and will be removed soon, in a future version."
" Please use a generation configuration file (see"
" https://huggingface.co/docs/transformers/main_classes/text_generation)"
)
self.generation_config = new_generation_config
generation_config = self.generation_config
generation_config = copy.deepcopy(generation_config)
model_kwargs = generation_config.update(**kwargs) # All unused kwargs must be model kwargs
generation_config.validate()
self._validate_model_kwargs(model_kwargs.copy())
logits_processor = logits_processor if logits_processor is not None else FlaxLogitsProcessorList()
# set init values
prng_key = prng_key if prng_key is not None else jax.random.PRNGKey(0)
if generation_config.pad_token_id is None and generation_config.eos_token_id is not None:
if model_kwargs.get("attention_mask") is None:
logger.warning(
"The attention mask and the pad token id were not set. As a consequence, you may observe "
"unexpected behavior. Please pass your input's `attention_mask` to obtain reliable results."
)
eos_token_id = generation_config.eos_token_id
if isinstance(eos_token_id, list):
eos_token_id = eos_token_id[0]
logger.warning(f"Setting `pad_token_id` to `eos_token_id`:{eos_token_id} for open-end generation.")
generation_config.pad_token_id = eos_token_id
if generation_config.decoder_start_token_id is None and self.config.is_encoder_decoder:
raise ValueError("`decoder_start_token_id` has to be defined for encoder-decoder generation.")
# decoder-only models should use left-padding for generation (can't be checked with `trace=True`)
if not self.config.is_encoder_decoder and not trace:
if (
generation_config.pad_token_id is not None
and jnp.sum(input_ids[:, -1] == generation_config.pad_token_id) > 0
):
logger.warning(
"A decoder-only architecture is being used, but right-padding was detected! For correct "
"generation results, please set `padding_side='left'` when initializing the tokenizer."
)
batch_size = input_ids.shape[0]
if self.config.is_encoder_decoder:
# add encoder_outputs to model_kwargs
if model_kwargs.get("encoder_outputs") is None:
model_kwargs = self._prepare_encoder_decoder_kwargs_for_generation(input_ids, params, model_kwargs)
# prepare decoder_input_ids for generation
input_ids = self._prepare_decoder_input_ids_for_generation(
batch_size,
decoder_start_token_id=generation_config.decoder_start_token_id,
bos_token_id=generation_config.bos_token_id,
model_kwargs=model_kwargs,
)
# Prepare `max_length` depending on other stopping criteria.
input_ids_seq_length = input_ids.shape[-1]
has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None
if has_default_max_length and generation_config.max_new_tokens is None:
warnings.warn(
f"Using `max_length`'s default ({generation_config.max_length}) to control the generation length. "
"This behaviour is deprecated and will be removed from the config in v5 of Transformers -- we"
" recommend using `max_new_tokens` to control the maximum length of the generation.",
UserWarning,
)
elif generation_config.max_new_tokens is not None:
generation_config.max_length = generation_config.max_new_tokens + input_ids_seq_length
if not has_default_max_length:
logger.warn(
f"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(="
f"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. "
"Please refer to the documentation for more information. "
"(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)",
UserWarning,
)
if generation_config.min_length is not None and generation_config.min_length > generation_config.max_length:
raise ValueError(
f"Unfeasable length constraints: the minimum length ({generation_config.min_length}) is larger than"
f" the maximum length ({generation_config.max_length})"
)
if input_ids_seq_length >= generation_config.max_length:
input_ids_string = "decoder_input_ids" if self.config.is_encoder_decoder else "input_ids"
logger.warning(
f"Input length of {input_ids_string} is {input_ids_seq_length}, but `max_length` is set to"
f" {generation_config.max_length}. This can lead to unexpected behavior. You should consider"
" increasing`max_new_tokens`."
)
logits_processor = self._get_logits_processor(
generation_config=generation_config,
input_ids_seq_length=input_ids_seq_length,
logits_processor=logits_processor,
)
if not generation_config.do_sample and generation_config.num_beams == 1:
return self._greedy_search(
input_ids,
generation_config.max_length,
generation_config.pad_token_id,
generation_config.eos_token_id,
logits_processor=logits_processor,
trace=trace,
params=params,
model_kwargs=model_kwargs,
)
elif generation_config.do_sample and generation_config.num_beams == 1:
logits_warper = self._get_logits_warper(generation_config=generation_config)
return self._sample(
input_ids,
generation_config.max_length,
generation_config.pad_token_id,
generation_config.eos_token_id,
prng_key,
logits_warper=logits_warper,
logits_processor=logits_processor,
trace=trace,
params=params,
model_kwargs=model_kwargs,
)
elif not generation_config.do_sample and generation_config.num_beams > 1:
# broadcast input_ids & encoder_outputs
input_ids = self._expand_to_num_beams(input_ids, num_beams=generation_config.num_beams)
if "encoder_outputs" in model_kwargs:
model_kwargs["encoder_outputs"]["last_hidden_state"] = self._expand_to_num_beams(
model_kwargs["encoder_outputs"]["last_hidden_state"], num_beams=generation_config.num_beams
)
for kwarg in ["attention_mask", "decoder_attention_mask"]:
if kwarg in model_kwargs:
model_kwargs[kwarg] = self._expand_to_num_beams(
model_kwargs[kwarg], num_beams=generation_config.num_beams
)
return self._beam_search(
input_ids,
generation_config.max_length,
generation_config.pad_token_id,
generation_config.eos_token_id,
length_penalty=generation_config.length_penalty,
early_stopping=generation_config.early_stopping,
logits_processor=logits_processor,
trace=trace,
params=params,
model_kwargs=model_kwargs,
)
else:
raise NotImplementedError("`Beam sampling is currently not implemented.")
def _get_logits_warper(self, generation_config: GenerationConfig) -> FlaxLogitsProcessorList:
"""
This class returns a [`FlaxLogitsProcessorList`] list object that contains all relevant [`FlaxLogitsWarper`]
instances used for multinomial sampling.
"""
warpers = FlaxLogitsProcessorList()
if generation_config.temperature is not None and generation_config.temperature != 1.0:
warpers.append(FlaxTemperatureLogitsWarper(generation_config.temperature))
if generation_config.top_k is not None and generation_config.top_k != 0:
warpers.append(FlaxTopKLogitsWarper(top_k=generation_config.top_k, min_tokens_to_keep=1))
if generation_config.top_p is not None and generation_config.top_p < 1.0:
warpers.append(FlaxTopPLogitsWarper(top_p=generation_config.top_p, min_tokens_to_keep=1))
return warpers
def _get_logits_processor(
self,
generation_config: GenerationConfig,
input_ids_seq_length: int,
logits_processor: Optional[FlaxLogitsProcessorList],
) -> FlaxLogitsProcessorList:
"""
This class returns a [`FlaxLogitsProcessorList`] list object that contains all relevant [`FlaxLogitsProcessor`]
instances used to modify the scores of the language model head.
"""
processors = FlaxLogitsProcessorList()
if (
generation_config.min_length is not None
and generation_config.eos_token_id is not None
and generation_config.min_length > -1
):
processors.append(
FlaxMinLengthLogitsProcessor(generation_config.min_length, generation_config.eos_token_id)
)
if generation_config.forced_bos_token_id is not None:
processors.append(FlaxForcedBOSTokenLogitsProcessor(generation_config.forced_bos_token_id))
if generation_config.forced_eos_token_id is not None:
processors.append(
FlaxForcedEOSTokenLogitsProcessor(generation_config.max_length, generation_config.forced_eos_token_id)
)
if generation_config.suppress_tokens is not None:
processors.append(FlaxSuppressTokensLogitsProcessor(generation_config.suppress_tokens))
if generation_config.begin_suppress_tokens is not None:
begin_index = input_ids_seq_length
begin_index = (
begin_index
if (input_ids_seq_length > 1 or generation_config.forced_bos_token_id is None)
else begin_index + 1
)
if generation_config.forced_decoder_ids is not None and len(generation_config.forced_decoder_ids) > 0:
# generation starts after the last token that is forced
begin_index += generation_config.forced_decoder_ids[-1][0]
processors.append(
FlaxSuppressTokensAtBeginLogitsProcessor(generation_config.begin_suppress_tokens, begin_index)
)
if generation_config.forced_decoder_ids is not None:
forced_decoder_ids = [
[input_ids_seq_length + i[0] - 1, i[1]] for i in generation_config.forced_decoder_ids
]
processors.append(FlaxForceTokensLogitsProcessor(forced_decoder_ids))
processors = self._merge_criteria_processor_list(processors, logits_processor)
return processors
def _merge_criteria_processor_list(
self,
default_list: FlaxLogitsProcessorList,
custom_list: FlaxLogitsProcessorList,
) -> FlaxLogitsProcessorList:
if len(custom_list) == 0:
return default_list
for default in default_list:
for custom in custom_list:
if type(custom) is type(default):
object_type = "logits processor"
raise ValueError(
f"A custom {object_type} of type {type(custom)} with values {custom} has been passed to"
f" `generate`, but it has already been created with the values {default}. {default} has been"
" created by passing the corresponding arguments to generate or by the model's config default"
f" values. If you just want to change the default values of {object_type} consider passing"
f" them as arguments to `generate` instead of using a custom {object_type}."
)
default_list.extend(custom_list)
return default_list
def _greedy_search(
self,
input_ids: None,
max_length: Optional[int] = None,
pad_token_id: Optional[int] = None,
eos_token_id: Optional[int] = None,
logits_processor: Optional[FlaxLogitsProcessorList] = None,
trace: bool = True,
params: Optional[Dict[str, jnp.ndarray]] = None,
model_kwargs: Optional[Dict[str, jnp.ndarray]] = None,
):
# init values
max_length = max_length if max_length is not None else self.generation_config.max_length
pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id
eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id
batch_size, cur_len = input_ids.shape
eos_token_id = jnp.array(eos_token_id, dtype=jnp.int32 if eos_token_id is not None else None)
pad_token_id = jnp.array(pad_token_id, dtype=jnp.int32)
cur_len = jnp.array(cur_len)
# per batch-item holding current token in loop.
sequences = jnp.full((batch_size, max_length), pad_token_id, dtype=jnp.int32)
sequences = lax.dynamic_update_slice(sequences, input_ids, (0, 0))
# per batch-item state bit indicating if sentence has finished.
is_sent_finished = jnp.zeros((batch_size,), dtype=jnp.bool_)
# For Seq2Seq generation, we only need to use the decoder instead of the whole model in generation loop
# and pass it the `encoder_outputs`, which are part of the `model_kwargs`.
model = self.decode if self.config.is_encoder_decoder else self
# initialize model specific kwargs
model_kwargs = self.prepare_inputs_for_generation(input_ids, max_length, **model_kwargs)
# initialize state
state = GreedyState(
cur_len=cur_len,
sequences=sequences,
running_token=input_ids,
is_sent_finished=is_sent_finished,
model_kwargs=model_kwargs,
)
def greedy_search_cond_fn(state):
"""state termination condition fn."""
has_reached_max_length = state.cur_len == max_length
all_sequence_finished = jnp.all(state.is_sent_finished)
finish_generation = jnp.logical_or(has_reached_max_length, all_sequence_finished)
return ~finish_generation
def greedy_search_body_fn(state):
"""state update fn."""
model_outputs = model(state.running_token, params=params, **state.model_kwargs)
logits = model_outputs.logits[:, -1]
# apply min_length, ...
logits = logits_processor(state.sequences, logits, state.cur_len)
next_token = jnp.argmax(logits, axis=-1)
next_token = next_token * ~state.is_sent_finished + pad_token_id * state.is_sent_finished
next_is_sent_finished = state.is_sent_finished | (next_token == eos_token_id)
next_token = next_token[:, None]
next_sequences = lax.dynamic_update_slice(state.sequences, next_token, (0, state.cur_len))
next_model_kwargs = self.update_inputs_for_generation(model_outputs, state.model_kwargs)
return GreedyState(
cur_len=state.cur_len + 1,
sequences=next_sequences,
running_token=next_token,
is_sent_finished=next_is_sent_finished,
model_kwargs=next_model_kwargs,
)
# The very first prompt often has sequence length > 1, so run outside of `lax.while_loop` to comply with TPU
if input_ids.shape[1] > 1:
state = greedy_search_body_fn(state)
if not trace:
state = self._run_loop_in_debug(greedy_search_cond_fn, greedy_search_body_fn, state)
else:
state = lax.while_loop(greedy_search_cond_fn, greedy_search_body_fn, state)
return FlaxGreedySearchOutput(sequences=state.sequences)
def _sample(
self,
input_ids: None,
max_length: Optional[int] = None,
pad_token_id: Optional[int] = None,
eos_token_id: Optional[int] = None,
prng_key: Optional[jnp.ndarray] = None,
logits_processor: Optional[FlaxLogitsProcessorList] = None,
logits_warper: Optional[FlaxLogitsProcessorList] = None,
trace: bool = True,
params: Optional[Dict[str, jnp.ndarray]] = None,
model_kwargs: Optional[Dict[str, jnp.ndarray]] = None,
):
# init values
max_length = max_length if max_length is not None else self.generation_config.max_length
pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id
eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id
prng_key = prng_key if prng_key is not None else jax.random.PRNGKey(0)
batch_size, cur_len = input_ids.shape
eos_token_id = jnp.array(eos_token_id, dtype=jnp.int32 if eos_token_id is not None else None)
pad_token_id = jnp.array(pad_token_id, dtype=jnp.int32)
cur_len = jnp.array(cur_len)
# per batch-item holding current token in loop.
sequences = jnp.full((batch_size, max_length), pad_token_id, dtype=jnp.int32)
sequences = lax.dynamic_update_slice(sequences, input_ids, (0, 0))
# per batch-item state bit indicating if sentence has finished.
is_sent_finished = jnp.zeros((batch_size,), dtype=jnp.bool_)
# For Seq2Seq generation, we only need to use the decoder instead of the whole model in generation loop
# and pass it the `encoder_outputs`, which are part of the `model_kwargs`.
model = self.decode if self.config.is_encoder_decoder else self
# initialize model specific kwargs
model_kwargs = self.prepare_inputs_for_generation(input_ids, max_length, **model_kwargs)
# initialize state
state = SampleState(
cur_len=cur_len,
sequences=sequences,
running_token=input_ids,
is_sent_finished=is_sent_finished,
prng_key=prng_key,
model_kwargs=model_kwargs,
)
def sample_search_cond_fn(state):
"""state termination condition fn."""
has_reached_max_length = state.cur_len == max_length
all_sequence_finished = jnp.all(state.is_sent_finished)
finish_generation = jnp.logical_or(has_reached_max_length, all_sequence_finished)
return ~finish_generation
def sample_search_body_fn(state):
"""state update fn."""
prng_key, prng_key_next = jax.random.split(state.prng_key)
model_outputs = model(state.running_token, params=params, **state.model_kwargs)
logits = model_outputs.logits[:, -1]
# apply min_length, ...
logits = logits_processor(state.sequences, logits, state.cur_len)
# apply top_p, top_k, temperature
logits = logits_warper(logits, logits, state.cur_len)
next_token = jax.random.categorical(prng_key, logits, axis=-1)
next_is_sent_finished = state.is_sent_finished | (next_token == eos_token_id)
next_token = next_token * ~next_is_sent_finished + pad_token_id * next_is_sent_finished
next_token = next_token[:, None]
next_sequences = lax.dynamic_update_slice(state.sequences, next_token, (0, state.cur_len))
next_model_kwargs = self.update_inputs_for_generation(model_outputs, state.model_kwargs)
return SampleState(
cur_len=state.cur_len + 1,
sequences=next_sequences,
running_token=next_token,
is_sent_finished=next_is_sent_finished,
model_kwargs=next_model_kwargs,
prng_key=prng_key_next,
)
# The very first prompt often has sequence length > 1, so run outside of `lax.while_loop` to comply with TPU
if input_ids.shape[1] > 1:
state = sample_search_body_fn(state)
if not trace:
state = self._run_loop_in_debug(sample_search_cond_fn, sample_search_body_fn, state)
else:
state = lax.while_loop(sample_search_cond_fn, sample_search_body_fn, state)
return FlaxSampleOutput(sequences=state.sequences)
def _beam_search(
self,
input_ids: None,
max_length: Optional[int] = None,
pad_token_id: Optional[int] = None,
eos_token_id: Optional[int] = None,
length_penalty: Optional[float] = None,
early_stopping: Optional[Union[bool, str]] = None,
logits_processor: Optional[FlaxLogitsProcessorList] = None,
trace: bool = True,
params: Optional[Dict[str, jnp.ndarray]] = None,
model_kwargs: Optional[Dict[str, jnp.ndarray]] = None,
):
"""
This beam search function is heavily inspired by Flax's official example:
https://github.com/google/flax/blob/main/examples/wmt/decode.py
"""
def flatten_beam_dim(tensor):
"""Flattens the first two dimensions of a non-scalar array."""
# ignore scalars (e.g. cache index)
if tensor.ndim == 0:
return tensor
return tensor.reshape((tensor.shape[0] * tensor.shape[1],) + tensor.shape[2:])
def unflatten_beam_dim(tensor, batch_size, num_beams):
"""Unflattens the first, flat batch*beam dimension of a non-scalar array."""
# ignore scalars (e.g. cache index)
if tensor.ndim == 0:
return tensor
return tensor.reshape((batch_size, num_beams) + tensor.shape[1:])
def gather_beams(nested, beam_indices, batch_size, new_num_beams):
"""
Gathers the beam slices indexed by beam_indices into new beam array.
"""
batch_indices = jnp.reshape(
jnp.arange(batch_size * new_num_beams) // new_num_beams, (batch_size, new_num_beams)
)
def gather_fn(tensor):
# ignore scalars (e.g. cache index)
if tensor.ndim == 0:
return tensor
else:
return tensor[batch_indices, beam_indices]
return jax.tree_util.tree_map(gather_fn, nested)
# init values
max_length = max_length if max_length is not None else self.generation_config.max_length
pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id
eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id
length_penalty = length_penalty if length_penalty is not None else self.generation_config.length_penalty
early_stopping = early_stopping if early_stopping is not None else self.generation_config.early_stopping
batch_size, num_beams, cur_len = input_ids.shape
eos_token_id = jnp.array(eos_token_id, dtype=jnp.int32 if eos_token_id is not None else None)
pad_token_id = jnp.array(pad_token_id, dtype=jnp.int32)
cur_len = jnp.array(cur_len)
# per batch,beam-item holding current token in loop.
sequences = jnp.full((batch_size, num_beams, max_length), pad_token_id, dtype=jnp.int32)
running_sequences = jnp.full((batch_size, num_beams, max_length), pad_token_id, dtype=jnp.int32)
running_sequences = lax.dynamic_update_slice(sequences, input_ids, (0, 0, 0))
# per batch,beam-item state bit indicating if sentence has finished.
is_sent_finished = jnp.zeros((batch_size, num_beams), dtype=jnp.bool_)
# per batch,beam-item score, logprobs
running_scores = jnp.tile(jnp.array([0.0] + [np.array(-1.0e7)] * (num_beams - 1)), [batch_size, 1])
scores = jnp.ones((batch_size, num_beams)) * np.array(-1.0e7)
# For Seq2Seq generation, we only need to use the decoder instead of the whole model in generation loop
# and pass it the `encoder_outputs`, which are part of the `model_kwargs`.
model = self.decode if self.config.is_encoder_decoder else self
# flatten beam dim
if "encoder_outputs" in model_kwargs:
model_kwargs["encoder_outputs"]["last_hidden_state"] = flatten_beam_dim(
model_kwargs["encoder_outputs"]["last_hidden_state"]
)
for kwarg in ["attention_mask", "decoder_attention_mask"]:
if kwarg in model_kwargs:
model_kwargs[kwarg] = flatten_beam_dim(model_kwargs[kwarg])
# initialize model specific kwargs
model_kwargs = self.prepare_inputs_for_generation(flatten_beam_dim(input_ids), max_length, **model_kwargs)
# initialize state
state = BeamSearchState(
cur_len=cur_len,
running_sequences=running_sequences,
running_scores=running_scores,
sequences=sequences,
scores=scores,
is_sent_finished=is_sent_finished,
model_kwargs=model_kwargs,
)
def beam_search_cond_fn(state):
"""beam search state termination condition fn."""
# 1. is less than max length?
not_max_length_yet = state.cur_len < max_length
# 2. can the new beams still improve?
# early_stopping == False -> apply heuristic = always get the best score from `cur_len`. See the discussion
# below for more details.
# https://github.com/huggingface/transformers/pull/20901#issuecomment-1369845565
# early_stopping == "never" -> compute the best score from max_length or cur_len, depending on the sign of
# length_penalty. Positive length_penalty favors longer sequences, thus we use max_length there.
if early_stopping == "never" and length_penalty > 0.0:
best_running_score = state.running_scores[:, :1] / (max_length**length_penalty)
else:
best_running_score = state.running_scores[:, :1] / (state.cur_len**length_penalty)
worst_finished_score = jnp.where(
state.is_sent_finished, jnp.min(state.scores, axis=1, keepdims=True), np.array(-1.0e7)
)
improvement_still_possible = jnp.any(best_running_score > worst_finished_score)
# 3. is there still a beam that has not finished?
still_open_beam = ~(jnp.all(state.is_sent_finished) & (early_stopping is True))
return not_max_length_yet & still_open_beam & improvement_still_possible
def beam_search_body_fn(state, input_ids_length=1):
"""beam search state update fn."""
# 1. Forward current tokens
# Collect the current position slice along length to feed the fast
# autoregressive decoder model. Flatten the beam dimension into batch
# dimension for feeding into the model.
# unflatten beam dimension
# Unflatten beam dimension in attention cache arrays
input_token = flatten_beam_dim(
lax.dynamic_slice(
state.running_sequences,
(0, 0, state.cur_len - input_ids_length),
(batch_size, num_beams, input_ids_length),
)
)
model_outputs = model(input_token, params=params, **state.model_kwargs)
logits = unflatten_beam_dim(model_outputs.logits[:, -1], batch_size, num_beams)
cache = jax.tree_util.tree_map(
lambda tensor: unflatten_beam_dim(tensor, batch_size, num_beams), model_outputs.past_key_values
)
# adapt logits for FlaxMarianMTModel
logits = self._adapt_logits_for_beam_search(logits)
# 2. Compute log probs
# get log probabilities from logits,
# process logits with processors (*e.g.* min_length, ...), and
# add new logprobs to existing running logprobs scores.
log_probs = jax.nn.log_softmax(logits)
log_probs = logits_processor(
flatten_beam_dim(running_sequences), flatten_beam_dim(log_probs), state.cur_len
)
log_probs = unflatten_beam_dim(log_probs, batch_size, num_beams)
log_probs = log_probs + jnp.expand_dims(state.running_scores, axis=2)
vocab_size = log_probs.shape[2]
log_probs = log_probs.reshape((batch_size, num_beams * vocab_size))
# 3. Retrieve top-K
# Each item in batch has num_beams * vocab_size candidate sequences.
# For each item, get the top 2*k candidates with the highest log-
# probabilities. We gather the top 2*K beams here so that even if the best
# K sequences reach EOS simultaneously, we have another K sequences
# remaining to continue the live beam search.
# Gather the top 2*K scores from _all_ beams.
# Gather 2*k top beams.
# Recover the beam index by floor division.
# Recover token id by modulo division and expand Id array for broadcasting.
# Update sequences for the 2*K top-k new sequences.
beams_to_keep = 2 * num_beams
topk_log_probs, topk_indices = lax.top_k(log_probs, k=beams_to_keep)
topk_beam_indices = topk_indices // vocab_size
topk_running_sequences = gather_beams(
state.running_sequences, topk_beam_indices, batch_size, beams_to_keep
)
topk_ids = jnp.expand_dims(topk_indices % vocab_size, axis=2)
topk_sequences = lax.dynamic_update_slice(topk_running_sequences, topk_ids, (0, 0, state.cur_len))
# 4. Check which sequences have ended
# Update current sequences:
# Did any of these sequences reach an end marker?
# To prevent these just finished sequences from being added to the current sequences
# set of active beam search sequences, set their log probs to a very large
# negative value.
did_topk_just_finished = topk_sequences[:, :, state.cur_len] == eos_token_id
running_topk_log_probs = topk_log_probs + did_topk_just_finished * np.array(-1.0e7)
# 5. Get running sequences scores for next
# Determine the top k beam indices (from top 2*k beams) from log probs
# and gather top k beams (from top 2*k beams).
next_topk_indices = lax.top_k(running_topk_log_probs, k=num_beams)[1]
next_running_sequences, next_running_scores = gather_beams(
[topk_sequences, running_topk_log_probs], next_topk_indices, batch_size, num_beams
)
# 6. Process topk logits
# Further process log probs:
# - add length penalty
# - make sure no scores can be added anymore if beam is full
# - make sure still running sequences cannot be chosen as finalized beam
topk_log_probs = topk_log_probs / (state.cur_len**length_penalty)
beams_in_batch_are_full = jnp.broadcast_to(
state.is_sent_finished.all(axis=-1, keepdims=True), did_topk_just_finished.shape
) & (early_stopping is True)
add_penalty = ~did_topk_just_finished | beams_in_batch_are_full
topk_log_probs += add_penalty * np.array(-1.0e7)
# 7. Get scores, sequences, is sentence finished for next.
# Combine sequences, scores, and flags along the beam dimension and compare
# new finished sequence scores to existing finished scores and select the
# best from the new set of beams
merged_sequences = jnp.concatenate([state.sequences, topk_sequences], axis=1)
merged_scores = jnp.concatenate([state.scores, topk_log_probs], axis=1)
merged_is_sent_finished = jnp.concatenate([state.is_sent_finished, did_topk_just_finished], axis=1)
topk_merged_indices = lax.top_k(merged_scores, k=num_beams)[1]
next_sequences, next_scores, next_is_sent_finished = gather_beams(
[merged_sequences, merged_scores, merged_is_sent_finished], topk_merged_indices, batch_size, num_beams
)
# 8. Update model kwargs.
# Determine the top k beam indices from the original set of all beams.
# With these, gather the top k beam-associated caches.
next_running_indices = gather_beams(topk_beam_indices, next_topk_indices, batch_size, num_beams)
next_cache = gather_beams(cache, next_running_indices, batch_size, num_beams)
model_outputs["past_key_values"] = jax.tree_util.tree_map(lambda x: flatten_beam_dim(x), next_cache)
next_model_kwargs = self.update_inputs_for_generation(model_outputs, state.model_kwargs)
return BeamSearchState(
cur_len=state.cur_len + 1,
running_scores=next_running_scores,
running_sequences=next_running_sequences,
scores=next_scores,
sequences=next_sequences,
is_sent_finished=next_is_sent_finished,
model_kwargs=next_model_kwargs,
)
# The very first prompt often has sequence length > 1, so run outside of `lax.while_loop` to comply with TPU
if input_ids.shape[-1] > 1:
state = partial(beam_search_body_fn, input_ids_length=input_ids.shape[-1])(state)
if not trace:
state = self._run_loop_in_debug(beam_search_cond_fn, beam_search_body_fn, state)
else:
state = lax.while_loop(beam_search_cond_fn, beam_search_body_fn, state)
# Account for the edge-case where there are no finished sequences for a
# particular batch item. If so, return running sequences for that batch item.
none_finished = jnp.any(state.is_sent_finished, axis=1)
sequences = jnp.where(none_finished[:, None, None], state.sequences, state.running_sequences)
scores = jnp.where(none_finished[:, None], state.scores, state.running_scores)
# take best beam for each batch
sequences = sequences[:, 0]
scores = scores[:, 0]
return FlaxBeamSearchOutput(sequences=sequences, scores=scores)
|
233zzh/TitanDataOperationSystem | 122,971 | 代码/web代码/titanApp/src/main/resources/static/src/assets/extra-libs/flot.tooltip/js/jquery.flot.js | /* Javascript plotting library for jQuery, version 0.8.3.
Copyright (c) 2007-2014 IOLA and Ole Laursen.
Licensed under the MIT license.
*/
// first an inline dependency, jquery.colorhelpers.js, we inline it here
// for convenience
/* Plugin for jQuery for working with colors.
*
* Version 1.1.
*
* Inspiration from jQuery color animation plugin by John Resig.
*
* Released under the MIT license by Ole Laursen, October 2009.
*
* Examples:
*
* $.color.parse("#fff").scale('rgb', 0.25).add('a', -0.5).toString()
* var c = $.color.extract($("#mydiv"), 'background-color');
* console.log(c.r, c.g, c.b, c.a);
* $.color.make(100, 50, 25, 0.4).toString() // returns "rgba(100,50,25,0.4)"
*
* Note that .scale() and .add() return the same modified object
* instead of making a new one.
*
* V. 1.1: Fix error handling so e.g. parsing an empty string does
* produce a color rather than just crashing.
*/
(function($){$.color={};$.color.make=function(r,g,b,a){var o={};o.r=r||0;o.g=g||0;o.b=b||0;o.a=a!=null?a:1;o.add=function(c,d){for(var i=0;i<c.length;++i)o[c.charAt(i)]+=d;return o.normalize()};o.scale=function(c,f){for(var i=0;i<c.length;++i)o[c.charAt(i)]*=f;return o.normalize()};o.toString=function(){if(o.a>=1){return"rgb("+[o.r,o.g,o.b].join(",")+")"}else{return"rgba("+[o.r,o.g,o.b,o.a].join(",")+")"}};o.normalize=function(){function clamp(min,value,max){return value<min?min:value>max?max:value}o.r=clamp(0,parseInt(o.r),255);o.g=clamp(0,parseInt(o.g),255);o.b=clamp(0,parseInt(o.b),255);o.a=clamp(0,o.a,1);return o};o.clone=function(){return $.color.make(o.r,o.b,o.g,o.a)};return o.normalize()};$.color.extract=function(elem,css){var c;do{c=elem.css(css).toLowerCase();if(c!=""&&c!="transparent")break;elem=elem.parent()}while(elem.length&&!$.nodeName(elem.get(0),"body"));if(c=="rgba(0, 0, 0, 0)")c="transparent";return $.color.parse(c)};$.color.parse=function(str){var res,m=$.color.make;if(res=/rgb\(\s*([0-9]{1,3})\s*,\s*([0-9]{1,3})\s*,\s*([0-9]{1,3})\s*\)/.exec(str))return m(parseInt(res[1],10),parseInt(res[2],10),parseInt(res[3],10));if(res=/rgba\(\s*([0-9]{1,3})\s*,\s*([0-9]{1,3})\s*,\s*([0-9]{1,3})\s*,\s*([0-9]+(?:\.[0-9]+)?)\s*\)/.exec(str))return m(parseInt(res[1],10),parseInt(res[2],10),parseInt(res[3],10),parseFloat(res[4]));if(res=/rgb\(\s*([0-9]+(?:\.[0-9]+)?)\%\s*,\s*([0-9]+(?:\.[0-9]+)?)\%\s*,\s*([0-9]+(?:\.[0-9]+)?)\%\s*\)/.exec(str))return m(parseFloat(res[1])*2.55,parseFloat(res[2])*2.55,parseFloat(res[3])*2.55);if(res=/rgba\(\s*([0-9]+(?:\.[0-9]+)?)\%\s*,\s*([0-9]+(?:\.[0-9]+)?)\%\s*,\s*([0-9]+(?:\.[0-9]+)?)\%\s*,\s*([0-9]+(?:\.[0-9]+)?)\s*\)/.exec(str))return m(parseFloat(res[1])*2.55,parseFloat(res[2])*2.55,parseFloat(res[3])*2.55,parseFloat(res[4]));if(res=/#([a-fA-F0-9]{2})([a-fA-F0-9]{2})([a-fA-F0-9]{2})/.exec(str))return m(parseInt(res[1],16),parseInt(res[2],16),parseInt(res[3],16));if(res=/#([a-fA-F0-9])([a-fA-F0-9])([a-fA-F0-9])/.exec(str))return m(parseInt(res[1]+res[1],16),parseInt(res[2]+res[2],16),parseInt(res[3]+res[3],16));var name=$.trim(str).toLowerCase();if(name=="transparent")return m(255,255,255,0);else{res=lookupColors[name]||[0,0,0];return m(res[0],res[1],res[2])}};var lookupColors={aqua:[0,255,255],azure:[240,255,255],beige:[245,245,220],black:[0,0,0],blue:[0,0,255],brown:[165,42,42],cyan:[0,255,255],darkblue:[0,0,139],darkcyan:[0,139,139],darkgrey:[169,169,169],darkgreen:[0,100,0],darkkhaki:[189,183,107],darkmagenta:[139,0,139],darkolivegreen:[85,107,47],darkorange:[255,140,0],darkorchid:[153,50,204],darkred:[139,0,0],darksalmon:[233,150,122],darkviolet:[148,0,211],fuchsia:[255,0,255],gold:[255,215,0],green:[0,128,0],indigo:[75,0,130],khaki:[240,230,140],lightblue:[173,216,230],lightcyan:[224,255,255],lightgreen:[144,238,144],lightgrey:[211,211,211],lightpink:[255,182,193],lightyellow:[255,255,224],lime:[0,255,0],magenta:[255,0,255],maroon:[128,0,0],navy:[0,0,128],olive:[128,128,0],orange:[255,165,0],pink:[255,192,203],purple:[128,0,128],violet:[128,0,128],red:[255,0,0],silver:[192,192,192],white:[255,255,255],yellow:[255,255,0]}})(jQuery);
// the actual Flot code
(function($) {
// Cache the prototype hasOwnProperty for faster access
var hasOwnProperty = Object.prototype.hasOwnProperty;
// A shim to provide 'detach' to jQuery versions prior to 1.4. Using a DOM
// operation produces the same effect as detach, i.e. removing the element
// without touching its jQuery data.
// Do not merge this into Flot 0.9, since it requires jQuery 1.4.4+.
if (!$.fn.detach) {
$.fn.detach = function() {
return this.each(function() {
if (this.parentNode) {
this.parentNode.removeChild( this );
}
});
};
}
///////////////////////////////////////////////////////////////////////////
// The Canvas object is a wrapper around an HTML5 <canvas> tag.
//
// @constructor
// @param {string} cls List of classes to apply to the canvas.
// @param {element} container Element onto which to append the canvas.
//
// Requiring a container is a little iffy, but unfortunately canvas
// operations don't work unless the canvas is attached to the DOM.
function Canvas(cls, container) {
var element = container.children("." + cls)[0];
if (element == null) {
element = document.createElement("canvas");
element.className = cls;
$(element).css({ direction: "ltr", position: "absolute", left: 0, top: 0 })
.appendTo(container);
// If HTML5 Canvas isn't available, fall back to [Ex|Flash]canvas
if (!element.getContext) {
if (window.G_vmlCanvasManager) {
element = window.G_vmlCanvasManager.initElement(element);
} else {
throw new Error("Canvas is not available. If you're using IE with a fall-back such as Excanvas, then there's either a mistake in your conditional include, or the page has no DOCTYPE and is rendering in Quirks Mode.");
}
}
}
this.element = element;
var context = this.context = element.getContext("2d");
// Determine the screen's ratio of physical to device-independent
// pixels. This is the ratio between the canvas width that the browser
// advertises and the number of pixels actually present in that space.
// The iPhone 4, for example, has a device-independent width of 320px,
// but its screen is actually 640px wide. It therefore has a pixel
// ratio of 2, while most normal devices have a ratio of 1.
var devicePixelRatio = window.devicePixelRatio || 1,
backingStoreRatio =
context.webkitBackingStorePixelRatio ||
context.mozBackingStorePixelRatio ||
context.msBackingStorePixelRatio ||
context.oBackingStorePixelRatio ||
context.backingStorePixelRatio || 1;
this.pixelRatio = devicePixelRatio / backingStoreRatio;
// Size the canvas to match the internal dimensions of its container
this.resize(container.width(), container.height());
// Collection of HTML div layers for text overlaid onto the canvas
this.textContainer = null;
this.text = {};
// Cache of text fragments and metrics, so we can avoid expensively
// re-calculating them when the plot is re-rendered in a loop.
this._textCache = {};
}
// Resizes the canvas to the given dimensions.
//
// @param {number} width New width of the canvas, in pixels.
// @param {number} width New height of the canvas, in pixels.
Canvas.prototype.resize = function(width, height) {
if (width <= 0 || height <= 0) {
throw new Error("Invalid dimensions for plot, width = " + width + ", height = " + height);
}
var element = this.element,
context = this.context,
pixelRatio = this.pixelRatio;
// Resize the canvas, increasing its density based on the display's
// pixel ratio; basically giving it more pixels without increasing the
// size of its element, to take advantage of the fact that retina
// displays have that many more pixels in the same advertised space.
// Resizing should reset the state (excanvas seems to be buggy though)
if (this.width != width) {
element.width = width * pixelRatio;
element.style.width = width + "px";
this.width = width;
}
if (this.height != height) {
element.height = height * pixelRatio;
element.style.height = height + "px";
this.height = height;
}
// Save the context, so we can reset in case we get replotted. The
// restore ensure that we're really back at the initial state, and
// should be safe even if we haven't saved the initial state yet.
context.restore();
context.save();
// Scale the coordinate space to match the display density; so even though we
// may have twice as many pixels, we still want lines and other drawing to
// appear at the same size; the extra pixels will just make them crisper.
context.scale(pixelRatio, pixelRatio);
};
// Clears the entire canvas area, not including any overlaid HTML text
Canvas.prototype.clear = function() {
this.context.clearRect(0, 0, this.width, this.height);
};
// Finishes rendering the canvas, including managing the text overlay.
Canvas.prototype.render = function() {
var cache = this._textCache;
// For each text layer, add elements marked as active that haven't
// already been rendered, and remove those that are no longer active.
for (var layerKey in cache) {
if (hasOwnProperty.call(cache, layerKey)) {
var layer = this.getTextLayer(layerKey),
layerCache = cache[layerKey];
layer.hide();
for (var styleKey in layerCache) {
if (hasOwnProperty.call(layerCache, styleKey)) {
var styleCache = layerCache[styleKey];
for (var key in styleCache) {
if (hasOwnProperty.call(styleCache, key)) {
var positions = styleCache[key].positions;
for (var i = 0, position; position = positions[i]; i++) {
if (position.active) {
if (!position.rendered) {
layer.append(position.element);
position.rendered = true;
}
} else {
positions.splice(i--, 1);
if (position.rendered) {
position.element.detach();
}
}
}
if (positions.length == 0) {
delete styleCache[key];
}
}
}
}
}
layer.show();
}
}
};
// Creates (if necessary) and returns the text overlay container.
//
// @param {string} classes String of space-separated CSS classes used to
// uniquely identify the text layer.
// @return {object} The jQuery-wrapped text-layer div.
Canvas.prototype.getTextLayer = function(classes) {
var layer = this.text[classes];
// Create the text layer if it doesn't exist
if (layer == null) {
// Create the text layer container, if it doesn't exist
if (this.textContainer == null) {
this.textContainer = $("<div class='flot-text'></div>")
.css({
position: "absolute",
top: 0,
left: 0,
bottom: 0,
right: 0,
'font-size': "smaller",
color: "#545454"
})
.insertAfter(this.element);
}
layer = this.text[classes] = $("<div></div>")
.addClass(classes)
.css({
position: "absolute",
top: 0,
left: 0,
bottom: 0,
right: 0
})
.appendTo(this.textContainer);
}
return layer;
};
// Creates (if necessary) and returns a text info object.
//
// The object looks like this:
//
// {
// width: Width of the text's wrapper div.
// height: Height of the text's wrapper div.
// element: The jQuery-wrapped HTML div containing the text.
// positions: Array of positions at which this text is drawn.
// }
//
// The positions array contains objects that look like this:
//
// {
// active: Flag indicating whether the text should be visible.
// rendered: Flag indicating whether the text is currently visible.
// element: The jQuery-wrapped HTML div containing the text.
// x: X coordinate at which to draw the text.
// y: Y coordinate at which to draw the text.
// }
//
// Each position after the first receives a clone of the original element.
//
// The idea is that that the width, height, and general 'identity' of the
// text is constant no matter where it is placed; the placements are a
// secondary property.
//
// Canvas maintains a cache of recently-used text info objects; getTextInfo
// either returns the cached element or creates a new entry.
//
// @param {string} layer A string of space-separated CSS classes uniquely
// identifying the layer containing this text.
// @param {string} text Text string to retrieve info for.
// @param {(string|object)=} font Either a string of space-separated CSS
// classes or a font-spec object, defining the text's font and style.
// @param {number=} angle Angle at which to rotate the text, in degrees.
// Angle is currently unused, it will be implemented in the future.
// @param {number=} width Maximum width of the text before it wraps.
// @return {object} a text info object.
Canvas.prototype.getTextInfo = function(layer, text, font, angle, width) {
var textStyle, layerCache, styleCache, info;
// Cast the value to a string, in case we were given a number or such
text = "" + text;
// If the font is a font-spec object, generate a CSS font definition
if (typeof font === "object") {
textStyle = font.style + " " + font.variant + " " + font.weight + " " + font.size + "px/" + font.lineHeight + "px " + font.family;
} else {
textStyle = font;
}
// Retrieve (or create) the cache for the text's layer and styles
layerCache = this._textCache[layer];
if (layerCache == null) {
layerCache = this._textCache[layer] = {};
}
styleCache = layerCache[textStyle];
if (styleCache == null) {
styleCache = layerCache[textStyle] = {};
}
info = styleCache[text];
// If we can't find a matching element in our cache, create a new one
if (info == null) {
var element = $("<div></div>").html(text)
.css({
position: "absolute",
'max-width': width,
top: -9999
})
.appendTo(this.getTextLayer(layer));
if (typeof font === "object") {
element.css({
font: textStyle,
color: font.color
});
} else if (typeof font === "string") {
element.addClass(font);
}
info = styleCache[text] = {
width: element.outerWidth(true),
height: element.outerHeight(true),
element: element,
positions: []
};
element.detach();
}
return info;
};
// Adds a text string to the canvas text overlay.
//
// The text isn't drawn immediately; it is marked as rendering, which will
// result in its addition to the canvas on the next render pass.
//
// @param {string} layer A string of space-separated CSS classes uniquely
// identifying the layer containing this text.
// @param {number} x X coordinate at which to draw the text.
// @param {number} y Y coordinate at which to draw the text.
// @param {string} text Text string to draw.
// @param {(string|object)=} font Either a string of space-separated CSS
// classes or a font-spec object, defining the text's font and style.
// @param {number=} angle Angle at which to rotate the text, in degrees.
// Angle is currently unused, it will be implemented in the future.
// @param {number=} width Maximum width of the text before it wraps.
// @param {string=} halign Horizontal alignment of the text; either "left",
// "center" or "right".
// @param {string=} valign Vertical alignment of the text; either "top",
// "middle" or "bottom".
Canvas.prototype.addText = function(layer, x, y, text, font, angle, width, halign, valign) {
var info = this.getTextInfo(layer, text, font, angle, width),
positions = info.positions;
// Tweak the div's position to match the text's alignment
if (halign == "center") {
x -= info.width / 2;
} else if (halign == "right") {
x -= info.width;
}
if (valign == "middle") {
y -= info.height / 2;
} else if (valign == "bottom") {
y -= info.height;
}
// Determine whether this text already exists at this position.
// If so, mark it for inclusion in the next render pass.
for (var i = 0, position; position = positions[i]; i++) {
if (position.x == x && position.y == y) {
position.active = true;
return;
}
}
// If the text doesn't exist at this position, create a new entry
// For the very first position we'll re-use the original element,
// while for subsequent ones we'll clone it.
position = {
active: true,
rendered: false,
element: positions.length ? info.element.clone() : info.element,
x: x,
y: y
};
positions.push(position);
// Move the element to its final position within the container
position.element.css({
top: Math.round(y),
left: Math.round(x),
'text-align': halign // In case the text wraps
});
};
// Removes one or more text strings from the canvas text overlay.
//
// If no parameters are given, all text within the layer is removed.
//
// Note that the text is not immediately removed; it is simply marked as
// inactive, which will result in its removal on the next render pass.
// This avoids the performance penalty for 'clear and redraw' behavior,
// where we potentially get rid of all text on a layer, but will likely
// add back most or all of it later, as when redrawing axes, for example.
//
// @param {string} layer A string of space-separated CSS classes uniquely
// identifying the layer containing this text.
// @param {number=} x X coordinate of the text.
// @param {number=} y Y coordinate of the text.
// @param {string=} text Text string to remove.
// @param {(string|object)=} font Either a string of space-separated CSS
// classes or a font-spec object, defining the text's font and style.
// @param {number=} angle Angle at which the text is rotated, in degrees.
// Angle is currently unused, it will be implemented in the future.
Canvas.prototype.removeText = function(layer, x, y, text, font, angle) {
if (text == null) {
var layerCache = this._textCache[layer];
if (layerCache != null) {
for (var styleKey in layerCache) {
if (hasOwnProperty.call(layerCache, styleKey)) {
var styleCache = layerCache[styleKey];
for (var key in styleCache) {
if (hasOwnProperty.call(styleCache, key)) {
var positions = styleCache[key].positions;
for (var i = 0, position; position = positions[i]; i++) {
position.active = false;
}
}
}
}
}
}
} else {
var positions = this.getTextInfo(layer, text, font, angle).positions;
for (var i = 0, position; position = positions[i]; i++) {
if (position.x == x && position.y == y) {
position.active = false;
}
}
}
};
///////////////////////////////////////////////////////////////////////////
// The top-level container for the entire plot.
function Plot(placeholder, data_, options_, plugins) {
// data is on the form:
// [ series1, series2 ... ]
// where series is either just the data as [ [x1, y1], [x2, y2], ... ]
// or { data: [ [x1, y1], [x2, y2], ... ], label: "some label", ... }
var series = [],
options = {
// the color theme used for graphs
colors: ["#edc240", "#afd8f8", "#cb4b4b", "#4da74d", "#9440ed"],
legend: {
show: true,
noColumns: 1, // number of colums in legend table
labelFormatter: null, // fn: string -> string
labelBoxBorderColor: "#ccc", // border color for the little label boxes
container: null, // container (as jQuery object) to put legend in, null means default on top of graph
position: "ne", // position of default legend container within plot
margin: 5, // distance from grid edge to default legend container within plot
backgroundColor: null, // null means auto-detect
backgroundOpacity: 0.85, // set to 0 to avoid background
sorted: null // default to no legend sorting
},
xaxis: {
show: null, // null = auto-detect, true = always, false = never
position: "bottom", // or "top"
mode: null, // null or "time"
font: null, // null (derived from CSS in placeholder) or object like { size: 11, lineHeight: 13, style: "italic", weight: "bold", family: "sans-serif", variant: "small-caps" }
color: null, // base color, labels, ticks
tickColor: null, // possibly different color of ticks, e.g. "rgba(0,0,0,0.15)"
transform: null, // null or f: number -> number to transform axis
inverseTransform: null, // if transform is set, this should be the inverse function
min: null, // min. value to show, null means set automatically
max: null, // max. value to show, null means set automatically
autoscaleMargin: null, // margin in % to add if auto-setting min/max
ticks: null, // either [1, 3] or [[1, "a"], 3] or (fn: axis info -> ticks) or app. number of ticks for auto-ticks
tickFormatter: null, // fn: number -> string
labelWidth: null, // size of tick labels in pixels
labelHeight: null,
reserveSpace: null, // whether to reserve space even if axis isn't shown
tickLength: null, // size in pixels of ticks, or "full" for whole line
alignTicksWithAxis: null, // axis number or null for no sync
tickDecimals: null, // no. of decimals, null means auto
tickSize: null, // number or [number, "unit"]
minTickSize: null // number or [number, "unit"]
},
yaxis: {
autoscaleMargin: 0.02,
position: "left" // or "right"
},
xaxes: [],
yaxes: [],
series: {
points: {
show: false,
radius: 3,
lineWidth: 2, // in pixels
fill: true,
fillColor: "#ffffff",
symbol: "circle" // or callback
},
lines: {
// we don't put in show: false so we can see
// whether lines were actively disabled
lineWidth: 2, // in pixels
fill: false,
fillColor: null,
steps: false
// Omit 'zero', so we can later default its value to
// match that of the 'fill' option.
},
bars: {
show: false,
lineWidth: 2, // in pixels
barWidth: 1, // in units of the x axis
fill: true,
fillColor: null,
align: "left", // "left", "right", or "center"
horizontal: false,
zero: true
},
shadowSize: 3,
highlightColor: null
},
grid: {
show: true,
aboveData: false,
color: "#545454", // primary color used for outline and labels
backgroundColor: null, // null for transparent, else color
borderColor: null, // set if different from the grid color
tickColor: null, // color for the ticks, e.g. "rgba(0,0,0,0.15)"
margin: 0, // distance from the canvas edge to the grid
labelMargin: 5, // in pixels
axisMargin: 8, // in pixels
borderWidth: 2, // in pixels
minBorderMargin: null, // in pixels, null means taken from points radius
markings: null, // array of ranges or fn: axes -> array of ranges
markingsColor: "#f4f4f4",
markingsLineWidth: 2,
// interactive stuff
clickable: false,
hoverable: false,
autoHighlight: true, // highlight in case mouse is near
mouseActiveRadius: 10 // how far the mouse can be away to activate an item
},
interaction: {
redrawOverlayInterval: 1000/60 // time between updates, -1 means in same flow
},
hooks: {}
},
surface = null, // the canvas for the plot itself
overlay = null, // canvas for interactive stuff on top of plot
eventHolder = null, // jQuery object that events should be bound to
ctx = null, octx = null,
xaxes = [], yaxes = [],
plotOffset = { left: 0, right: 0, top: 0, bottom: 0},
plotWidth = 0, plotHeight = 0,
hooks = {
processOptions: [],
processRawData: [],
processDatapoints: [],
processOffset: [],
drawBackground: [],
drawSeries: [],
draw: [],
bindEvents: [],
drawOverlay: [],
shutdown: []
},
plot = this;
// public functions
plot.setData = setData;
plot.setupGrid = setupGrid;
plot.draw = draw;
plot.getPlaceholder = function() { return placeholder; };
plot.getCanvas = function() { return surface.element; };
plot.getPlotOffset = function() { return plotOffset; };
plot.width = function () { return plotWidth; };
plot.height = function () { return plotHeight; };
plot.offset = function () {
var o = eventHolder.offset();
o.left += plotOffset.left;
o.top += plotOffset.top;
return o;
};
plot.getData = function () { return series; };
plot.getAxes = function () {
var res = {}, i;
$.each(xaxes.concat(yaxes), function (_, axis) {
if (axis)
res[axis.direction + (axis.n != 1 ? axis.n : "") + "axis"] = axis;
});
return res;
};
plot.getXAxes = function () { return xaxes; };
plot.getYAxes = function () { return yaxes; };
plot.c2p = canvasToAxisCoords;
plot.p2c = axisToCanvasCoords;
plot.getOptions = function () { return options; };
plot.highlight = highlight;
plot.unhighlight = unhighlight;
plot.triggerRedrawOverlay = triggerRedrawOverlay;
plot.pointOffset = function(point) {
return {
left: parseInt(xaxes[axisNumber(point, "x") - 1].p2c(+point.x) + plotOffset.left, 10),
top: parseInt(yaxes[axisNumber(point, "y") - 1].p2c(+point.y) + plotOffset.top, 10)
};
};
plot.shutdown = shutdown;
plot.destroy = function () {
shutdown();
placeholder.removeData("plot").empty();
series = [];
options = null;
surface = null;
overlay = null;
eventHolder = null;
ctx = null;
octx = null;
xaxes = [];
yaxes = [];
hooks = null;
highlights = [];
plot = null;
};
plot.resize = function () {
var width = placeholder.width(),
height = placeholder.height();
surface.resize(width, height);
overlay.resize(width, height);
};
// public attributes
plot.hooks = hooks;
// initialize
initPlugins(plot);
parseOptions(options_);
setupCanvases();
setData(data_);
setupGrid();
draw();
bindEvents();
function executeHooks(hook, args) {
args = [plot].concat(args);
for (var i = 0; i < hook.length; ++i)
hook[i].apply(this, args);
}
function initPlugins() {
// References to key classes, allowing plugins to modify them
var classes = {
Canvas: Canvas
};
for (var i = 0; i < plugins.length; ++i) {
var p = plugins[i];
p.init(plot, classes);
if (p.options)
$.extend(true, options, p.options);
}
}
function parseOptions(opts) {
$.extend(true, options, opts);
// $.extend merges arrays, rather than replacing them. When less
// colors are provided than the size of the default palette, we
// end up with those colors plus the remaining defaults, which is
// not expected behavior; avoid it by replacing them here.
if (opts && opts.colors) {
options.colors = opts.colors;
}
if (options.xaxis.color == null)
options.xaxis.color = $.color.parse(options.grid.color).scale('a', 0.22).toString();
if (options.yaxis.color == null)
options.yaxis.color = $.color.parse(options.grid.color).scale('a', 0.22).toString();
if (options.xaxis.tickColor == null) // grid.tickColor for back-compatibility
options.xaxis.tickColor = options.grid.tickColor || options.xaxis.color;
if (options.yaxis.tickColor == null) // grid.tickColor for back-compatibility
options.yaxis.tickColor = options.grid.tickColor || options.yaxis.color;
if (options.grid.borderColor == null)
options.grid.borderColor = options.grid.color;
if (options.grid.tickColor == null)
options.grid.tickColor = $.color.parse(options.grid.color).scale('a', 0.22).toString();
// Fill in defaults for axis options, including any unspecified
// font-spec fields, if a font-spec was provided.
// If no x/y axis options were provided, create one of each anyway,
// since the rest of the code assumes that they exist.
var i, axisOptions, axisCount,
fontSize = placeholder.css("font-size"),
fontSizeDefault = fontSize ? +fontSize.replace("px", "") : 13,
fontDefaults = {
style: placeholder.css("font-style"),
size: Math.round(0.8 * fontSizeDefault),
variant: placeholder.css("font-variant"),
weight: placeholder.css("font-weight"),
family: placeholder.css("font-family")
};
axisCount = options.xaxes.length || 1;
for (i = 0; i < axisCount; ++i) {
axisOptions = options.xaxes[i];
if (axisOptions && !axisOptions.tickColor) {
axisOptions.tickColor = axisOptions.color;
}
axisOptions = $.extend(true, {}, options.xaxis, axisOptions);
options.xaxes[i] = axisOptions;
if (axisOptions.font) {
axisOptions.font = $.extend({}, fontDefaults, axisOptions.font);
if (!axisOptions.font.color) {
axisOptions.font.color = axisOptions.color;
}
if (!axisOptions.font.lineHeight) {
axisOptions.font.lineHeight = Math.round(axisOptions.font.size * 1.15);
}
}
}
axisCount = options.yaxes.length || 1;
for (i = 0; i < axisCount; ++i) {
axisOptions = options.yaxes[i];
if (axisOptions && !axisOptions.tickColor) {
axisOptions.tickColor = axisOptions.color;
}
axisOptions = $.extend(true, {}, options.yaxis, axisOptions);
options.yaxes[i] = axisOptions;
if (axisOptions.font) {
axisOptions.font = $.extend({}, fontDefaults, axisOptions.font);
if (!axisOptions.font.color) {
axisOptions.font.color = axisOptions.color;
}
if (!axisOptions.font.lineHeight) {
axisOptions.font.lineHeight = Math.round(axisOptions.font.size * 1.15);
}
}
}
// backwards compatibility, to be removed in future
if (options.xaxis.noTicks && options.xaxis.ticks == null)
options.xaxis.ticks = options.xaxis.noTicks;
if (options.yaxis.noTicks && options.yaxis.ticks == null)
options.yaxis.ticks = options.yaxis.noTicks;
if (options.x2axis) {
options.xaxes[1] = $.extend(true, {}, options.xaxis, options.x2axis);
options.xaxes[1].position = "top";
// Override the inherit to allow the axis to auto-scale
if (options.x2axis.min == null) {
options.xaxes[1].min = null;
}
if (options.x2axis.max == null) {
options.xaxes[1].max = null;
}
}
if (options.y2axis) {
options.yaxes[1] = $.extend(true, {}, options.yaxis, options.y2axis);
options.yaxes[1].position = "right";
// Override the inherit to allow the axis to auto-scale
if (options.y2axis.min == null) {
options.yaxes[1].min = null;
}
if (options.y2axis.max == null) {
options.yaxes[1].max = null;
}
}
if (options.grid.coloredAreas)
options.grid.markings = options.grid.coloredAreas;
if (options.grid.coloredAreasColor)
options.grid.markingsColor = options.grid.coloredAreasColor;
if (options.lines)
$.extend(true, options.series.lines, options.lines);
if (options.points)
$.extend(true, options.series.points, options.points);
if (options.bars)
$.extend(true, options.series.bars, options.bars);
if (options.shadowSize != null)
options.series.shadowSize = options.shadowSize;
if (options.highlightColor != null)
options.series.highlightColor = options.highlightColor;
// save options on axes for future reference
for (i = 0; i < options.xaxes.length; ++i)
getOrCreateAxis(xaxes, i + 1).options = options.xaxes[i];
for (i = 0; i < options.yaxes.length; ++i)
getOrCreateAxis(yaxes, i + 1).options = options.yaxes[i];
// add hooks from options
for (var n in hooks)
if (options.hooks[n] && options.hooks[n].length)
hooks[n] = hooks[n].concat(options.hooks[n]);
executeHooks(hooks.processOptions, [options]);
}
function setData(d) {
series = parseData(d);
fillInSeriesOptions();
processData();
}
function parseData(d) {
var res = [];
for (var i = 0; i < d.length; ++i) {
var s = $.extend(true, {}, options.series);
if (d[i].data != null) {
s.data = d[i].data; // move the data instead of deep-copy
delete d[i].data;
$.extend(true, s, d[i]);
d[i].data = s.data;
}
else
s.data = d[i];
res.push(s);
}
return res;
}
function axisNumber(obj, coord) {
var a = obj[coord + "axis"];
if (typeof a == "object") // if we got a real axis, extract number
a = a.n;
if (typeof a != "number")
a = 1; // default to first axis
return a;
}
function allAxes() {
// return flat array without annoying null entries
return $.grep(xaxes.concat(yaxes), function (a) { return a; });
}
function canvasToAxisCoords(pos) {
// return an object with x/y corresponding to all used axes
var res = {}, i, axis;
for (i = 0; i < xaxes.length; ++i) {
axis = xaxes[i];
if (axis && axis.used)
res["x" + axis.n] = axis.c2p(pos.left);
}
for (i = 0; i < yaxes.length; ++i) {
axis = yaxes[i];
if (axis && axis.used)
res["y" + axis.n] = axis.c2p(pos.top);
}
if (res.x1 !== undefined)
res.x = res.x1;
if (res.y1 !== undefined)
res.y = res.y1;
return res;
}
function axisToCanvasCoords(pos) {
// get canvas coords from the first pair of x/y found in pos
var res = {}, i, axis, key;
for (i = 0; i < xaxes.length; ++i) {
axis = xaxes[i];
if (axis && axis.used) {
key = "x" + axis.n;
if (pos[key] == null && axis.n == 1)
key = "x";
if (pos[key] != null) {
res.left = axis.p2c(pos[key]);
break;
}
}
}
for (i = 0; i < yaxes.length; ++i) {
axis = yaxes[i];
if (axis && axis.used) {
key = "y" + axis.n;
if (pos[key] == null && axis.n == 1)
key = "y";
if (pos[key] != null) {
res.top = axis.p2c(pos[key]);
break;
}
}
}
return res;
}
function getOrCreateAxis(axes, number) {
if (!axes[number - 1])
axes[number - 1] = {
n: number, // save the number for future reference
direction: axes == xaxes ? "x" : "y",
options: $.extend(true, {}, axes == xaxes ? options.xaxis : options.yaxis)
};
return axes[number - 1];
}
function fillInSeriesOptions() {
var neededColors = series.length, maxIndex = -1, i;
// Subtract the number of series that already have fixed colors or
// color indexes from the number that we still need to generate.
for (i = 0; i < series.length; ++i) {
var sc = series[i].color;
if (sc != null) {
neededColors--;
if (typeof sc == "number" && sc > maxIndex) {
maxIndex = sc;
}
}
}
// If any of the series have fixed color indexes, then we need to
// generate at least as many colors as the highest index.
if (neededColors <= maxIndex) {
neededColors = maxIndex + 1;
}
// Generate all the colors, using first the option colors and then
// variations on those colors once they're exhausted.
var c, colors = [], colorPool = options.colors,
colorPoolSize = colorPool.length, variation = 0;
for (i = 0; i < neededColors; i++) {
c = $.color.parse(colorPool[i % colorPoolSize] || "#666");
// Each time we exhaust the colors in the pool we adjust
// a scaling factor used to produce more variations on
// those colors. The factor alternates negative/positive
// to produce lighter/darker colors.
// Reset the variation after every few cycles, or else
// it will end up producing only white or black colors.
if (i % colorPoolSize == 0 && i) {
if (variation >= 0) {
if (variation < 0.5) {
variation = -variation - 0.2;
} else variation = 0;
} else variation = -variation;
}
colors[i] = c.scale('rgb', 1 + variation);
}
// Finalize the series options, filling in their colors
var colori = 0, s;
for (i = 0; i < series.length; ++i) {
s = series[i];
// assign colors
if (s.color == null) {
s.color = colors[colori].toString();
++colori;
}
else if (typeof s.color == "number")
s.color = colors[s.color].toString();
// turn on lines automatically in case nothing is set
if (s.lines.show == null) {
var v, show = true;
for (v in s)
if (s[v] && s[v].show) {
show = false;
break;
}
if (show)
s.lines.show = true;
}
// If nothing was provided for lines.zero, default it to match
// lines.fill, since areas by default should extend to zero.
if (s.lines.zero == null) {
s.lines.zero = !!s.lines.fill;
}
// setup axes
s.xaxis = getOrCreateAxis(xaxes, axisNumber(s, "x"));
s.yaxis = getOrCreateAxis(yaxes, axisNumber(s, "y"));
}
}
function processData() {
var topSentry = Number.POSITIVE_INFINITY,
bottomSentry = Number.NEGATIVE_INFINITY,
fakeInfinity = Number.MAX_VALUE,
i, j, k, m, length,
s, points, ps, x, y, axis, val, f, p,
data, format;
function updateAxis(axis, min, max) {
if (min < axis.datamin && min != -fakeInfinity)
axis.datamin = min;
if (max > axis.datamax && max != fakeInfinity)
axis.datamax = max;
}
$.each(allAxes(), function (_, axis) {
// init axis
axis.datamin = topSentry;
axis.datamax = bottomSentry;
axis.used = false;
});
for (i = 0; i < series.length; ++i) {
s = series[i];
s.datapoints = { points: [] };
executeHooks(hooks.processRawData, [ s, s.data, s.datapoints ]);
}
// first pass: clean and copy data
for (i = 0; i < series.length; ++i) {
s = series[i];
data = s.data;
format = s.datapoints.format;
if (!format) {
format = [];
// find out how to copy
format.push({ x: true, number: true, required: true });
format.push({ y: true, number: true, required: true });
if (s.bars.show || (s.lines.show && s.lines.fill)) {
var autoscale = !!((s.bars.show && s.bars.zero) || (s.lines.show && s.lines.zero));
format.push({ y: true, number: true, required: false, defaultValue: 0, autoscale: autoscale });
if (s.bars.horizontal) {
delete format[format.length - 1].y;
format[format.length - 1].x = true;
}
}
s.datapoints.format = format;
}
if (s.datapoints.pointsize != null)
continue; // already filled in
s.datapoints.pointsize = format.length;
ps = s.datapoints.pointsize;
points = s.datapoints.points;
var insertSteps = s.lines.show && s.lines.steps;
s.xaxis.used = s.yaxis.used = true;
for (j = k = 0; j < data.length; ++j, k += ps) {
p = data[j];
var nullify = p == null;
if (!nullify) {
for (m = 0; m < ps; ++m) {
val = p[m];
f = format[m];
if (f) {
if (f.number && val != null) {
val = +val; // convert to number
if (isNaN(val))
val = null;
else if (val == Infinity)
val = fakeInfinity;
else if (val == -Infinity)
val = -fakeInfinity;
}
if (val == null) {
if (f.required)
nullify = true;
if (f.defaultValue != null)
val = f.defaultValue;
}
}
points[k + m] = val;
}
}
if (nullify) {
for (m = 0; m < ps; ++m) {
val = points[k + m];
if (val != null) {
f = format[m];
// extract min/max info
if (f.autoscale !== false) {
if (f.x) {
updateAxis(s.xaxis, val, val);
}
if (f.y) {
updateAxis(s.yaxis, val, val);
}
}
}
points[k + m] = null;
}
}
else {
// a little bit of line specific stuff that
// perhaps shouldn't be here, but lacking
// better means...
if (insertSteps && k > 0
&& points[k - ps] != null
&& points[k - ps] != points[k]
&& points[k - ps + 1] != points[k + 1]) {
// copy the point to make room for a middle point
for (m = 0; m < ps; ++m)
points[k + ps + m] = points[k + m];
// middle point has same y
points[k + 1] = points[k - ps + 1];
// we've added a point, better reflect that
k += ps;
}
}
}
}
// give the hooks a chance to run
for (i = 0; i < series.length; ++i) {
s = series[i];
executeHooks(hooks.processDatapoints, [ s, s.datapoints]);
}
// second pass: find datamax/datamin for auto-scaling
for (i = 0; i < series.length; ++i) {
s = series[i];
points = s.datapoints.points;
ps = s.datapoints.pointsize;
format = s.datapoints.format;
var xmin = topSentry, ymin = topSentry,
xmax = bottomSentry, ymax = bottomSentry;
for (j = 0; j < points.length; j += ps) {
if (points[j] == null)
continue;
for (m = 0; m < ps; ++m) {
val = points[j + m];
f = format[m];
if (!f || f.autoscale === false || val == fakeInfinity || val == -fakeInfinity)
continue;
if (f.x) {
if (val < xmin)
xmin = val;
if (val > xmax)
xmax = val;
}
if (f.y) {
if (val < ymin)
ymin = val;
if (val > ymax)
ymax = val;
}
}
}
if (s.bars.show) {
// make sure we got room for the bar on the dancing floor
var delta;
switch (s.bars.align) {
case "left":
delta = 0;
break;
case "right":
delta = -s.bars.barWidth;
break;
default:
delta = -s.bars.barWidth / 2;
}
if (s.bars.horizontal) {
ymin += delta;
ymax += delta + s.bars.barWidth;
}
else {
xmin += delta;
xmax += delta + s.bars.barWidth;
}
}
updateAxis(s.xaxis, xmin, xmax);
updateAxis(s.yaxis, ymin, ymax);
}
$.each(allAxes(), function (_, axis) {
if (axis.datamin == topSentry)
axis.datamin = null;
if (axis.datamax == bottomSentry)
axis.datamax = null;
});
}
function setupCanvases() {
// Make sure the placeholder is clear of everything except canvases
// from a previous plot in this container that we'll try to re-use.
placeholder.css("padding", 0) // padding messes up the positioning
.children().filter(function(){
return !$(this).hasClass("flot-overlay") && !$(this).hasClass('flot-base');
}).remove();
if (placeholder.css("position") == 'static')
placeholder.css("position", "relative"); // for positioning labels and overlay
surface = new Canvas("flot-base", placeholder);
overlay = new Canvas("flot-overlay", placeholder); // overlay canvas for interactive features
ctx = surface.context;
octx = overlay.context;
// define which element we're listening for events on
eventHolder = $(overlay.element).unbind();
// If we're re-using a plot object, shut down the old one
var existing = placeholder.data("plot");
if (existing) {
existing.shutdown();
overlay.clear();
}
// save in case we get replotted
placeholder.data("plot", plot);
}
function bindEvents() {
// bind events
if (options.grid.hoverable) {
eventHolder.mousemove(onMouseMove);
// Use bind, rather than .mouseleave, because we officially
// still support jQuery 1.2.6, which doesn't define a shortcut
// for mouseenter or mouseleave. This was a bug/oversight that
// was fixed somewhere around 1.3.x. We can return to using
// .mouseleave when we drop support for 1.2.6.
eventHolder.bind("mouseleave", onMouseLeave);
}
if (options.grid.clickable)
eventHolder.click(onClick);
executeHooks(hooks.bindEvents, [eventHolder]);
}
function shutdown() {
if (redrawTimeout)
clearTimeout(redrawTimeout);
eventHolder.unbind("mousemove", onMouseMove);
eventHolder.unbind("mouseleave", onMouseLeave);
eventHolder.unbind("click", onClick);
executeHooks(hooks.shutdown, [eventHolder]);
}
function setTransformationHelpers(axis) {
// set helper functions on the axis, assumes plot area
// has been computed already
function identity(x) { return x; }
var s, m, t = axis.options.transform || identity,
it = axis.options.inverseTransform;
// precompute how much the axis is scaling a point
// in canvas space
if (axis.direction == "x") {
s = axis.scale = plotWidth / Math.abs(t(axis.max) - t(axis.min));
m = Math.min(t(axis.max), t(axis.min));
}
else {
s = axis.scale = plotHeight / Math.abs(t(axis.max) - t(axis.min));
s = -s;
m = Math.max(t(axis.max), t(axis.min));
}
// data point to canvas coordinate
if (t == identity) // slight optimization
axis.p2c = function (p) { return (p - m) * s; };
else
axis.p2c = function (p) { return (t(p) - m) * s; };
// canvas coordinate to data point
if (!it)
axis.c2p = function (c) { return m + c / s; };
else
axis.c2p = function (c) { return it(m + c / s); };
}
function measureTickLabels(axis) {
var opts = axis.options,
ticks = axis.ticks || [],
labelWidth = opts.labelWidth || 0,
labelHeight = opts.labelHeight || 0,
maxWidth = labelWidth || (axis.direction == "x" ? Math.floor(surface.width / (ticks.length || 1)) : null),
legacyStyles = axis.direction + "Axis " + axis.direction + axis.n + "Axis",
layer = "flot-" + axis.direction + "-axis flot-" + axis.direction + axis.n + "-axis " + legacyStyles,
font = opts.font || "flot-tick-label tickLabel";
for (var i = 0; i < ticks.length; ++i) {
var t = ticks[i];
if (!t.label)
continue;
var info = surface.getTextInfo(layer, t.label, font, null, maxWidth);
labelWidth = Math.max(labelWidth, info.width);
labelHeight = Math.max(labelHeight, info.height);
}
axis.labelWidth = opts.labelWidth || labelWidth;
axis.labelHeight = opts.labelHeight || labelHeight;
}
function allocateAxisBoxFirstPhase(axis) {
// find the bounding box of the axis by looking at label
// widths/heights and ticks, make room by diminishing the
// plotOffset; this first phase only looks at one
// dimension per axis, the other dimension depends on the
// other axes so will have to wait
var lw = axis.labelWidth,
lh = axis.labelHeight,
pos = axis.options.position,
isXAxis = axis.direction === "x",
tickLength = axis.options.tickLength,
axisMargin = options.grid.axisMargin,
padding = options.grid.labelMargin,
innermost = true,
outermost = true,
first = true,
found = false;
// Determine the axis's position in its direction and on its side
$.each(isXAxis ? xaxes : yaxes, function(i, a) {
if (a && (a.show || a.reserveSpace)) {
if (a === axis) {
found = true;
} else if (a.options.position === pos) {
if (found) {
outermost = false;
} else {
innermost = false;
}
}
if (!found) {
first = false;
}
}
});
// The outermost axis on each side has no margin
if (outermost) {
axisMargin = 0;
}
// The ticks for the first axis in each direction stretch across
if (tickLength == null) {
tickLength = first ? "full" : 5;
}
if (!isNaN(+tickLength))
padding += +tickLength;
if (isXAxis) {
lh += padding;
if (pos == "bottom") {
plotOffset.bottom += lh + axisMargin;
axis.box = { top: surface.height - plotOffset.bottom, height: lh };
}
else {
axis.box = { top: plotOffset.top + axisMargin, height: lh };
plotOffset.top += lh + axisMargin;
}
}
else {
lw += padding;
if (pos == "left") {
axis.box = { left: plotOffset.left + axisMargin, width: lw };
plotOffset.left += lw + axisMargin;
}
else {
plotOffset.right += lw + axisMargin;
axis.box = { left: surface.width - plotOffset.right, width: lw };
}
}
// save for future reference
axis.position = pos;
axis.tickLength = tickLength;
axis.box.padding = padding;
axis.innermost = innermost;
}
function allocateAxisBoxSecondPhase(axis) {
// now that all axis boxes have been placed in one
// dimension, we can set the remaining dimension coordinates
if (axis.direction == "x") {
axis.box.left = plotOffset.left - axis.labelWidth / 2;
axis.box.width = surface.width - plotOffset.left - plotOffset.right + axis.labelWidth;
}
else {
axis.box.top = plotOffset.top - axis.labelHeight / 2;
axis.box.height = surface.height - plotOffset.bottom - plotOffset.top + axis.labelHeight;
}
}
function adjustLayoutForThingsStickingOut() {
// possibly adjust plot offset to ensure everything stays
// inside the canvas and isn't clipped off
var minMargin = options.grid.minBorderMargin,
axis, i;
// check stuff from the plot (FIXME: this should just read
// a value from the series, otherwise it's impossible to
// customize)
if (minMargin == null) {
minMargin = 0;
for (i = 0; i < series.length; ++i)
minMargin = Math.max(minMargin, 2 * (series[i].points.radius + series[i].points.lineWidth/2));
}
var margins = {
left: minMargin,
right: minMargin,
top: minMargin,
bottom: minMargin
};
// check axis labels, note we don't check the actual
// labels but instead use the overall width/height to not
// jump as much around with replots
$.each(allAxes(), function (_, axis) {
if (axis.reserveSpace && axis.ticks && axis.ticks.length) {
if (axis.direction === "x") {
margins.left = Math.max(margins.left, axis.labelWidth / 2);
margins.right = Math.max(margins.right, axis.labelWidth / 2);
} else {
margins.bottom = Math.max(margins.bottom, axis.labelHeight / 2);
margins.top = Math.max(margins.top, axis.labelHeight / 2);
}
}
});
plotOffset.left = Math.ceil(Math.max(margins.left, plotOffset.left));
plotOffset.right = Math.ceil(Math.max(margins.right, plotOffset.right));
plotOffset.top = Math.ceil(Math.max(margins.top, plotOffset.top));
plotOffset.bottom = Math.ceil(Math.max(margins.bottom, plotOffset.bottom));
}
function setupGrid() {
var i, axes = allAxes(), showGrid = options.grid.show;
// Initialize the plot's offset from the edge of the canvas
for (var a in plotOffset) {
var margin = options.grid.margin || 0;
plotOffset[a] = typeof margin == "number" ? margin : margin[a] || 0;
}
executeHooks(hooks.processOffset, [plotOffset]);
// If the grid is visible, add its border width to the offset
for (var a in plotOffset) {
if(typeof(options.grid.borderWidth) == "object") {
plotOffset[a] += showGrid ? options.grid.borderWidth[a] : 0;
}
else {
plotOffset[a] += showGrid ? options.grid.borderWidth : 0;
}
}
$.each(axes, function (_, axis) {
var axisOpts = axis.options;
axis.show = axisOpts.show == null ? axis.used : axisOpts.show;
axis.reserveSpace = axisOpts.reserveSpace == null ? axis.show : axisOpts.reserveSpace;
setRange(axis);
});
if (showGrid) {
var allocatedAxes = $.grep(axes, function (axis) {
return axis.show || axis.reserveSpace;
});
$.each(allocatedAxes, function (_, axis) {
// make the ticks
setupTickGeneration(axis);
setTicks(axis);
snapRangeToTicks(axis, axis.ticks);
// find labelWidth/Height for axis
measureTickLabels(axis);
});
// with all dimensions calculated, we can compute the
// axis bounding boxes, start from the outside
// (reverse order)
for (i = allocatedAxes.length - 1; i >= 0; --i)
allocateAxisBoxFirstPhase(allocatedAxes[i]);
// make sure we've got enough space for things that
// might stick out
adjustLayoutForThingsStickingOut();
$.each(allocatedAxes, function (_, axis) {
allocateAxisBoxSecondPhase(axis);
});
}
plotWidth = surface.width - plotOffset.left - plotOffset.right;
plotHeight = surface.height - plotOffset.bottom - plotOffset.top;
// now we got the proper plot dimensions, we can compute the scaling
$.each(axes, function (_, axis) {
setTransformationHelpers(axis);
});
if (showGrid) {
drawAxisLabels();
}
insertLegend();
}
function setRange(axis) {
var opts = axis.options,
min = +(opts.min != null ? opts.min : axis.datamin),
max = +(opts.max != null ? opts.max : axis.datamax),
delta = max - min;
if (delta == 0.0) {
// degenerate case
var widen = max == 0 ? 1 : 0.01;
if (opts.min == null)
min -= widen;
// always widen max if we couldn't widen min to ensure we
// don't fall into min == max which doesn't work
if (opts.max == null || opts.min != null)
max += widen;
}
else {
// consider autoscaling
var margin = opts.autoscaleMargin;
if (margin != null) {
if (opts.min == null) {
min -= delta * margin;
// make sure we don't go below zero if all values
// are positive
if (min < 0 && axis.datamin != null && axis.datamin >= 0)
min = 0;
}
if (opts.max == null) {
max += delta * margin;
if (max > 0 && axis.datamax != null && axis.datamax <= 0)
max = 0;
}
}
}
axis.min = min;
axis.max = max;
}
function setupTickGeneration(axis) {
var opts = axis.options;
// estimate number of ticks
var noTicks;
if (typeof opts.ticks == "number" && opts.ticks > 0)
noTicks = opts.ticks;
else
// heuristic based on the model a*sqrt(x) fitted to
// some data points that seemed reasonable
noTicks = 0.3 * Math.sqrt(axis.direction == "x" ? surface.width : surface.height);
var delta = (axis.max - axis.min) / noTicks,
dec = -Math.floor(Math.log(delta) / Math.LN10),
maxDec = opts.tickDecimals;
if (maxDec != null && dec > maxDec) {
dec = maxDec;
}
var magn = Math.pow(10, -dec),
norm = delta / magn, // norm is between 1.0 and 10.0
size;
if (norm < 1.5) {
size = 1;
} else if (norm < 3) {
size = 2;
// special case for 2.5, requires an extra decimal
if (norm > 2.25 && (maxDec == null || dec + 1 <= maxDec)) {
size = 2.5;
++dec;
}
} else if (norm < 7.5) {
size = 5;
} else {
size = 10;
}
size *= magn;
if (opts.minTickSize != null && size < opts.minTickSize) {
size = opts.minTickSize;
}
axis.delta = delta;
axis.tickDecimals = Math.max(0, maxDec != null ? maxDec : dec);
axis.tickSize = opts.tickSize || size;
// Time mode was moved to a plug-in in 0.8, and since so many people use it
// we'll add an especially friendly reminder to make sure they included it.
if (opts.mode == "time" && !axis.tickGenerator) {
throw new Error("Time mode requires the flot.time plugin.");
}
// Flot supports base-10 axes; any other mode else is handled by a plug-in,
// like flot.time.js.
if (!axis.tickGenerator) {
axis.tickGenerator = function (axis) {
var ticks = [],
start = floorInBase(axis.min, axis.tickSize),
i = 0,
v = Number.NaN,
prev;
do {
prev = v;
v = start + i * axis.tickSize;
ticks.push(v);
++i;
} while (v < axis.max && v != prev);
return ticks;
};
axis.tickFormatter = function (value, axis) {
var factor = axis.tickDecimals ? Math.pow(10, axis.tickDecimals) : 1;
var formatted = "" + Math.round(value * factor) / factor;
// If tickDecimals was specified, ensure that we have exactly that
// much precision; otherwise default to the value's own precision.
if (axis.tickDecimals != null) {
var decimal = formatted.indexOf(".");
var precision = decimal == -1 ? 0 : formatted.length - decimal - 1;
if (precision < axis.tickDecimals) {
return (precision ? formatted : formatted + ".") + ("" + factor).substr(1, axis.tickDecimals - precision);
}
}
return formatted;
};
}
if ($.isFunction(opts.tickFormatter))
axis.tickFormatter = function (v, axis) { return "" + opts.tickFormatter(v, axis); };
if (opts.alignTicksWithAxis != null) {
var otherAxis = (axis.direction == "x" ? xaxes : yaxes)[opts.alignTicksWithAxis - 1];
if (otherAxis && otherAxis.used && otherAxis != axis) {
// consider snapping min/max to outermost nice ticks
var niceTicks = axis.tickGenerator(axis);
if (niceTicks.length > 0) {
if (opts.min == null)
axis.min = Math.min(axis.min, niceTicks[0]);
if (opts.max == null && niceTicks.length > 1)
axis.max = Math.max(axis.max, niceTicks[niceTicks.length - 1]);
}
axis.tickGenerator = function (axis) {
// copy ticks, scaled to this axis
var ticks = [], v, i;
for (i = 0; i < otherAxis.ticks.length; ++i) {
v = (otherAxis.ticks[i].v - otherAxis.min) / (otherAxis.max - otherAxis.min);
v = axis.min + v * (axis.max - axis.min);
ticks.push(v);
}
return ticks;
};
// we might need an extra decimal since forced
// ticks don't necessarily fit naturally
if (!axis.mode && opts.tickDecimals == null) {
var extraDec = Math.max(0, -Math.floor(Math.log(axis.delta) / Math.LN10) + 1),
ts = axis.tickGenerator(axis);
// only proceed if the tick interval rounded
// with an extra decimal doesn't give us a
// zero at end
if (!(ts.length > 1 && /\..*0$/.test((ts[1] - ts[0]).toFixed(extraDec))))
axis.tickDecimals = extraDec;
}
}
}
}
function setTicks(axis) {
var oticks = axis.options.ticks, ticks = [];
if (oticks == null || (typeof oticks == "number" && oticks > 0))
ticks = axis.tickGenerator(axis);
else if (oticks) {
if ($.isFunction(oticks))
// generate the ticks
ticks = oticks(axis);
else
ticks = oticks;
}
// clean up/labelify the supplied ticks, copy them over
var i, v;
axis.ticks = [];
for (i = 0; i < ticks.length; ++i) {
var label = null;
var t = ticks[i];
if (typeof t == "object") {
v = +t[0];
if (t.length > 1)
label = t[1];
}
else
v = +t;
if (label == null)
label = axis.tickFormatter(v, axis);
if (!isNaN(v))
axis.ticks.push({ v: v, label: label });
}
}
function snapRangeToTicks(axis, ticks) {
if (axis.options.autoscaleMargin && ticks.length > 0) {
// snap to ticks
if (axis.options.min == null)
axis.min = Math.min(axis.min, ticks[0].v);
if (axis.options.max == null && ticks.length > 1)
axis.max = Math.max(axis.max, ticks[ticks.length - 1].v);
}
}
function draw() {
surface.clear();
executeHooks(hooks.drawBackground, [ctx]);
var grid = options.grid;
// draw background, if any
if (grid.show && grid.backgroundColor)
drawBackground();
if (grid.show && !grid.aboveData) {
drawGrid();
}
for (var i = 0; i < series.length; ++i) {
executeHooks(hooks.drawSeries, [ctx, series[i]]);
drawSeries(series[i]);
}
executeHooks(hooks.draw, [ctx]);
if (grid.show && grid.aboveData) {
drawGrid();
}
surface.render();
// A draw implies that either the axes or data have changed, so we
// should probably update the overlay highlights as well.
triggerRedrawOverlay();
}
function extractRange(ranges, coord) {
var axis, from, to, key, axes = allAxes();
for (var i = 0; i < axes.length; ++i) {
axis = axes[i];
if (axis.direction == coord) {
key = coord + axis.n + "axis";
if (!ranges[key] && axis.n == 1)
key = coord + "axis"; // support x1axis as xaxis
if (ranges[key]) {
from = ranges[key].from;
to = ranges[key].to;
break;
}
}
}
// backwards-compat stuff - to be removed in future
if (!ranges[key]) {
axis = coord == "x" ? xaxes[0] : yaxes[0];
from = ranges[coord + "1"];
to = ranges[coord + "2"];
}
// auto-reverse as an added bonus
if (from != null && to != null && from > to) {
var tmp = from;
from = to;
to = tmp;
}
return { from: from, to: to, axis: axis };
}
function drawBackground() {
ctx.save();
ctx.translate(plotOffset.left, plotOffset.top);
ctx.fillStyle = getColorOrGradient(options.grid.backgroundColor, plotHeight, 0, "rgba(255, 255, 255, 0)");
ctx.fillRect(0, 0, plotWidth, plotHeight);
ctx.restore();
}
function drawGrid() {
var i, axes, bw, bc;
ctx.save();
ctx.translate(plotOffset.left, plotOffset.top);
// draw markings
var markings = options.grid.markings;
if (markings) {
if ($.isFunction(markings)) {
axes = plot.getAxes();
// xmin etc. is backwards compatibility, to be
// removed in the future
axes.xmin = axes.xaxis.min;
axes.xmax = axes.xaxis.max;
axes.ymin = axes.yaxis.min;
axes.ymax = axes.yaxis.max;
markings = markings(axes);
}
for (i = 0; i < markings.length; ++i) {
var m = markings[i],
xrange = extractRange(m, "x"),
yrange = extractRange(m, "y");
// fill in missing
if (xrange.from == null)
xrange.from = xrange.axis.min;
if (xrange.to == null)
xrange.to = xrange.axis.max;
if (yrange.from == null)
yrange.from = yrange.axis.min;
if (yrange.to == null)
yrange.to = yrange.axis.max;
// clip
if (xrange.to < xrange.axis.min || xrange.from > xrange.axis.max ||
yrange.to < yrange.axis.min || yrange.from > yrange.axis.max)
continue;
xrange.from = Math.max(xrange.from, xrange.axis.min);
xrange.to = Math.min(xrange.to, xrange.axis.max);
yrange.from = Math.max(yrange.from, yrange.axis.min);
yrange.to = Math.min(yrange.to, yrange.axis.max);
var xequal = xrange.from === xrange.to,
yequal = yrange.from === yrange.to;
if (xequal && yequal) {
continue;
}
// then draw
xrange.from = Math.floor(xrange.axis.p2c(xrange.from));
xrange.to = Math.floor(xrange.axis.p2c(xrange.to));
yrange.from = Math.floor(yrange.axis.p2c(yrange.from));
yrange.to = Math.floor(yrange.axis.p2c(yrange.to));
if (xequal || yequal) {
var lineWidth = m.lineWidth || options.grid.markingsLineWidth,
subPixel = lineWidth % 2 ? 0.5 : 0;
ctx.beginPath();
ctx.strokeStyle = m.color || options.grid.markingsColor;
ctx.lineWidth = lineWidth;
if (xequal) {
ctx.moveTo(xrange.to + subPixel, yrange.from);
ctx.lineTo(xrange.to + subPixel, yrange.to);
} else {
ctx.moveTo(xrange.from, yrange.to + subPixel);
ctx.lineTo(xrange.to, yrange.to + subPixel);
}
ctx.stroke();
} else {
ctx.fillStyle = m.color || options.grid.markingsColor;
ctx.fillRect(xrange.from, yrange.to,
xrange.to - xrange.from,
yrange.from - yrange.to);
}
}
}
// draw the ticks
axes = allAxes();
bw = options.grid.borderWidth;
for (var j = 0; j < axes.length; ++j) {
var axis = axes[j], box = axis.box,
t = axis.tickLength, x, y, xoff, yoff;
if (!axis.show || axis.ticks.length == 0)
continue;
ctx.lineWidth = 1;
// find the edges
if (axis.direction == "x") {
x = 0;
if (t == "full")
y = (axis.position == "top" ? 0 : plotHeight);
else
y = box.top - plotOffset.top + (axis.position == "top" ? box.height : 0);
}
else {
y = 0;
if (t == "full")
x = (axis.position == "left" ? 0 : plotWidth);
else
x = box.left - plotOffset.left + (axis.position == "left" ? box.width : 0);
}
// draw tick bar
if (!axis.innermost) {
ctx.strokeStyle = axis.options.color;
ctx.beginPath();
xoff = yoff = 0;
if (axis.direction == "x")
xoff = plotWidth + 1;
else
yoff = plotHeight + 1;
if (ctx.lineWidth == 1) {
if (axis.direction == "x") {
y = Math.floor(y) + 0.5;
} else {
x = Math.floor(x) + 0.5;
}
}
ctx.moveTo(x, y);
ctx.lineTo(x + xoff, y + yoff);
ctx.stroke();
}
// draw ticks
ctx.strokeStyle = axis.options.tickColor;
ctx.beginPath();
for (i = 0; i < axis.ticks.length; ++i) {
var v = axis.ticks[i].v;
xoff = yoff = 0;
if (isNaN(v) || v < axis.min || v > axis.max
// skip those lying on the axes if we got a border
|| (t == "full"
&& ((typeof bw == "object" && bw[axis.position] > 0) || bw > 0)
&& (v == axis.min || v == axis.max)))
continue;
if (axis.direction == "x") {
x = axis.p2c(v);
yoff = t == "full" ? -plotHeight : t;
if (axis.position == "top")
yoff = -yoff;
}
else {
y = axis.p2c(v);
xoff = t == "full" ? -plotWidth : t;
if (axis.position == "left")
xoff = -xoff;
}
if (ctx.lineWidth == 1) {
if (axis.direction == "x")
x = Math.floor(x) + 0.5;
else
y = Math.floor(y) + 0.5;
}
ctx.moveTo(x, y);
ctx.lineTo(x + xoff, y + yoff);
}
ctx.stroke();
}
// draw border
if (bw) {
// If either borderWidth or borderColor is an object, then draw the border
// line by line instead of as one rectangle
bc = options.grid.borderColor;
if(typeof bw == "object" || typeof bc == "object") {
if (typeof bw !== "object") {
bw = {top: bw, right: bw, bottom: bw, left: bw};
}
if (typeof bc !== "object") {
bc = {top: bc, right: bc, bottom: bc, left: bc};
}
if (bw.top > 0) {
ctx.strokeStyle = bc.top;
ctx.lineWidth = bw.top;
ctx.beginPath();
ctx.moveTo(0 - bw.left, 0 - bw.top/2);
ctx.lineTo(plotWidth, 0 - bw.top/2);
ctx.stroke();
}
if (bw.right > 0) {
ctx.strokeStyle = bc.right;
ctx.lineWidth = bw.right;
ctx.beginPath();
ctx.moveTo(plotWidth + bw.right / 2, 0 - bw.top);
ctx.lineTo(plotWidth + bw.right / 2, plotHeight);
ctx.stroke();
}
if (bw.bottom > 0) {
ctx.strokeStyle = bc.bottom;
ctx.lineWidth = bw.bottom;
ctx.beginPath();
ctx.moveTo(plotWidth + bw.right, plotHeight + bw.bottom / 2);
ctx.lineTo(0, plotHeight + bw.bottom / 2);
ctx.stroke();
}
if (bw.left > 0) {
ctx.strokeStyle = bc.left;
ctx.lineWidth = bw.left;
ctx.beginPath();
ctx.moveTo(0 - bw.left/2, plotHeight + bw.bottom);
ctx.lineTo(0- bw.left/2, 0);
ctx.stroke();
}
}
else {
ctx.lineWidth = bw;
ctx.strokeStyle = options.grid.borderColor;
ctx.strokeRect(-bw/2, -bw/2, plotWidth + bw, plotHeight + bw);
}
}
ctx.restore();
}
function drawAxisLabels() {
$.each(allAxes(), function (_, axis) {
var box = axis.box,
legacyStyles = axis.direction + "Axis " + axis.direction + axis.n + "Axis",
layer = "flot-" + axis.direction + "-axis flot-" + axis.direction + axis.n + "-axis " + legacyStyles,
font = axis.options.font || "flot-tick-label tickLabel",
tick, x, y, halign, valign;
// Remove text before checking for axis.show and ticks.length;
// otherwise plugins, like flot-tickrotor, that draw their own
// tick labels will end up with both theirs and the defaults.
surface.removeText(layer);
if (!axis.show || axis.ticks.length == 0)
return;
for (var i = 0; i < axis.ticks.length; ++i) {
tick = axis.ticks[i];
if (!tick.label || tick.v < axis.min || tick.v > axis.max)
continue;
if (axis.direction == "x") {
halign = "center";
x = plotOffset.left + axis.p2c(tick.v);
if (axis.position == "bottom") {
y = box.top + box.padding;
} else {
y = box.top + box.height - box.padding;
valign = "bottom";
}
} else {
valign = "middle";
y = plotOffset.top + axis.p2c(tick.v);
if (axis.position == "left") {
x = box.left + box.width - box.padding;
halign = "right";
} else {
x = box.left + box.padding;
}
}
surface.addText(layer, x, y, tick.label, font, null, null, halign, valign);
}
});
}
function drawSeries(series) {
if (series.lines.show)
drawSeriesLines(series);
if (series.bars.show)
drawSeriesBars(series);
if (series.points.show)
drawSeriesPoints(series);
}
function drawSeriesLines(series) {
function plotLine(datapoints, xoffset, yoffset, axisx, axisy) {
var points = datapoints.points,
ps = datapoints.pointsize,
prevx = null, prevy = null;
ctx.beginPath();
for (var i = ps; i < points.length; i += ps) {
var x1 = points[i - ps], y1 = points[i - ps + 1],
x2 = points[i], y2 = points[i + 1];
if (x1 == null || x2 == null)
continue;
// clip with ymin
if (y1 <= y2 && y1 < axisy.min) {
if (y2 < axisy.min)
continue; // line segment is outside
// compute new intersection point
x1 = (axisy.min - y1) / (y2 - y1) * (x2 - x1) + x1;
y1 = axisy.min;
}
else if (y2 <= y1 && y2 < axisy.min) {
if (y1 < axisy.min)
continue;
x2 = (axisy.min - y1) / (y2 - y1) * (x2 - x1) + x1;
y2 = axisy.min;
}
// clip with ymax
if (y1 >= y2 && y1 > axisy.max) {
if (y2 > axisy.max)
continue;
x1 = (axisy.max - y1) / (y2 - y1) * (x2 - x1) + x1;
y1 = axisy.max;
}
else if (y2 >= y1 && y2 > axisy.max) {
if (y1 > axisy.max)
continue;
x2 = (axisy.max - y1) / (y2 - y1) * (x2 - x1) + x1;
y2 = axisy.max;
}
// clip with xmin
if (x1 <= x2 && x1 < axisx.min) {
if (x2 < axisx.min)
continue;
y1 = (axisx.min - x1) / (x2 - x1) * (y2 - y1) + y1;
x1 = axisx.min;
}
else if (x2 <= x1 && x2 < axisx.min) {
if (x1 < axisx.min)
continue;
y2 = (axisx.min - x1) / (x2 - x1) * (y2 - y1) + y1;
x2 = axisx.min;
}
// clip with xmax
if (x1 >= x2 && x1 > axisx.max) {
if (x2 > axisx.max)
continue;
y1 = (axisx.max - x1) / (x2 - x1) * (y2 - y1) + y1;
x1 = axisx.max;
}
else if (x2 >= x1 && x2 > axisx.max) {
if (x1 > axisx.max)
continue;
y2 = (axisx.max - x1) / (x2 - x1) * (y2 - y1) + y1;
x2 = axisx.max;
}
if (x1 != prevx || y1 != prevy)
ctx.moveTo(axisx.p2c(x1) + xoffset, axisy.p2c(y1) + yoffset);
prevx = x2;
prevy = y2;
ctx.lineTo(axisx.p2c(x2) + xoffset, axisy.p2c(y2) + yoffset);
}
ctx.stroke();
}
function plotLineArea(datapoints, axisx, axisy) {
var points = datapoints.points,
ps = datapoints.pointsize,
bottom = Math.min(Math.max(0, axisy.min), axisy.max),
i = 0, top, areaOpen = false,
ypos = 1, segmentStart = 0, segmentEnd = 0;
// we process each segment in two turns, first forward
// direction to sketch out top, then once we hit the
// end we go backwards to sketch the bottom
while (true) {
if (ps > 0 && i > points.length + ps)
break;
i += ps; // ps is negative if going backwards
var x1 = points[i - ps],
y1 = points[i - ps + ypos],
x2 = points[i], y2 = points[i + ypos];
if (areaOpen) {
if (ps > 0 && x1 != null && x2 == null) {
// at turning point
segmentEnd = i;
ps = -ps;
ypos = 2;
continue;
}
if (ps < 0 && i == segmentStart + ps) {
// done with the reverse sweep
ctx.fill();
areaOpen = false;
ps = -ps;
ypos = 1;
i = segmentStart = segmentEnd + ps;
continue;
}
}
if (x1 == null || x2 == null)
continue;
// clip x values
// clip with xmin
if (x1 <= x2 && x1 < axisx.min) {
if (x2 < axisx.min)
continue;
y1 = (axisx.min - x1) / (x2 - x1) * (y2 - y1) + y1;
x1 = axisx.min;
}
else if (x2 <= x1 && x2 < axisx.min) {
if (x1 < axisx.min)
continue;
y2 = (axisx.min - x1) / (x2 - x1) * (y2 - y1) + y1;
x2 = axisx.min;
}
// clip with xmax
if (x1 >= x2 && x1 > axisx.max) {
if (x2 > axisx.max)
continue;
y1 = (axisx.max - x1) / (x2 - x1) * (y2 - y1) + y1;
x1 = axisx.max;
}
else if (x2 >= x1 && x2 > axisx.max) {
if (x1 > axisx.max)
continue;
y2 = (axisx.max - x1) / (x2 - x1) * (y2 - y1) + y1;
x2 = axisx.max;
}
if (!areaOpen) {
// open area
ctx.beginPath();
ctx.moveTo(axisx.p2c(x1), axisy.p2c(bottom));
areaOpen = true;
}
// now first check the case where both is outside
if (y1 >= axisy.max && y2 >= axisy.max) {
ctx.lineTo(axisx.p2c(x1), axisy.p2c(axisy.max));
ctx.lineTo(axisx.p2c(x2), axisy.p2c(axisy.max));
continue;
}
else if (y1 <= axisy.min && y2 <= axisy.min) {
ctx.lineTo(axisx.p2c(x1), axisy.p2c(axisy.min));
ctx.lineTo(axisx.p2c(x2), axisy.p2c(axisy.min));
continue;
}
// else it's a bit more complicated, there might
// be a flat maxed out rectangle first, then a
// triangular cutout or reverse; to find these
// keep track of the current x values
var x1old = x1, x2old = x2;
// clip the y values, without shortcutting, we
// go through all cases in turn
// clip with ymin
if (y1 <= y2 && y1 < axisy.min && y2 >= axisy.min) {
x1 = (axisy.min - y1) / (y2 - y1) * (x2 - x1) + x1;
y1 = axisy.min;
}
else if (y2 <= y1 && y2 < axisy.min && y1 >= axisy.min) {
x2 = (axisy.min - y1) / (y2 - y1) * (x2 - x1) + x1;
y2 = axisy.min;
}
// clip with ymax
if (y1 >= y2 && y1 > axisy.max && y2 <= axisy.max) {
x1 = (axisy.max - y1) / (y2 - y1) * (x2 - x1) + x1;
y1 = axisy.max;
}
else if (y2 >= y1 && y2 > axisy.max && y1 <= axisy.max) {
x2 = (axisy.max - y1) / (y2 - y1) * (x2 - x1) + x1;
y2 = axisy.max;
}
// if the x value was changed we got a rectangle
// to fill
if (x1 != x1old) {
ctx.lineTo(axisx.p2c(x1old), axisy.p2c(y1));
// it goes to (x1, y1), but we fill that below
}
// fill triangular section, this sometimes result
// in redundant points if (x1, y1) hasn't changed
// from previous line to, but we just ignore that
ctx.lineTo(axisx.p2c(x1), axisy.p2c(y1));
ctx.lineTo(axisx.p2c(x2), axisy.p2c(y2));
// fill the other rectangle if it's there
if (x2 != x2old) {
ctx.lineTo(axisx.p2c(x2), axisy.p2c(y2));
ctx.lineTo(axisx.p2c(x2old), axisy.p2c(y2));
}
}
}
ctx.save();
ctx.translate(plotOffset.left, plotOffset.top);
ctx.lineJoin = "round";
var lw = series.lines.lineWidth,
sw = series.shadowSize;
// FIXME: consider another form of shadow when filling is turned on
if (lw > 0 && sw > 0) {
// draw shadow as a thick and thin line with transparency
ctx.lineWidth = sw;
ctx.strokeStyle = "rgba(0,0,0,0.1)";
// position shadow at angle from the mid of line
var angle = Math.PI/18;
plotLine(series.datapoints, Math.sin(angle) * (lw/2 + sw/2), Math.cos(angle) * (lw/2 + sw/2), series.xaxis, series.yaxis);
ctx.lineWidth = sw/2;
plotLine(series.datapoints, Math.sin(angle) * (lw/2 + sw/4), Math.cos(angle) * (lw/2 + sw/4), series.xaxis, series.yaxis);
}
ctx.lineWidth = lw;
ctx.strokeStyle = series.color;
var fillStyle = getFillStyle(series.lines, series.color, 0, plotHeight);
if (fillStyle) {
ctx.fillStyle = fillStyle;
plotLineArea(series.datapoints, series.xaxis, series.yaxis);
}
if (lw > 0)
plotLine(series.datapoints, 0, 0, series.xaxis, series.yaxis);
ctx.restore();
}
function drawSeriesPoints(series) {
function plotPoints(datapoints, radius, fillStyle, offset, shadow, axisx, axisy, symbol) {
var points = datapoints.points, ps = datapoints.pointsize;
for (var i = 0; i < points.length; i += ps) {
var x = points[i], y = points[i + 1];
if (x == null || x < axisx.min || x > axisx.max || y < axisy.min || y > axisy.max)
continue;
ctx.beginPath();
x = axisx.p2c(x);
y = axisy.p2c(y) + offset;
if (symbol == "circle")
ctx.arc(x, y, radius, 0, shadow ? Math.PI : Math.PI * 2, false);
else
symbol(ctx, x, y, radius, shadow);
ctx.closePath();
if (fillStyle) {
ctx.fillStyle = fillStyle;
ctx.fill();
}
ctx.stroke();
}
}
ctx.save();
ctx.translate(plotOffset.left, plotOffset.top);
var lw = series.points.lineWidth,
sw = series.shadowSize,
radius = series.points.radius,
symbol = series.points.symbol;
// If the user sets the line width to 0, we change it to a very
// small value. A line width of 0 seems to force the default of 1.
// Doing the conditional here allows the shadow setting to still be
// optional even with a lineWidth of 0.
if( lw == 0 )
lw = 0.0001;
if (lw > 0 && sw > 0) {
// draw shadow in two steps
var w = sw / 2;
ctx.lineWidth = w;
ctx.strokeStyle = "rgba(0,0,0,0.1)";
plotPoints(series.datapoints, radius, null, w + w/2, true,
series.xaxis, series.yaxis, symbol);
ctx.strokeStyle = "rgba(0,0,0,0.2)";
plotPoints(series.datapoints, radius, null, w/2, true,
series.xaxis, series.yaxis, symbol);
}
ctx.lineWidth = lw;
ctx.strokeStyle = series.color;
plotPoints(series.datapoints, radius,
getFillStyle(series.points, series.color), 0, false,
series.xaxis, series.yaxis, symbol);
ctx.restore();
}
function drawBar(x, y, b, barLeft, barRight, fillStyleCallback, axisx, axisy, c, horizontal, lineWidth) {
var left, right, bottom, top,
drawLeft, drawRight, drawTop, drawBottom,
tmp;
// in horizontal mode, we start the bar from the left
// instead of from the bottom so it appears to be
// horizontal rather than vertical
if (horizontal) {
drawBottom = drawRight = drawTop = true;
drawLeft = false;
left = b;
right = x;
top = y + barLeft;
bottom = y + barRight;
// account for negative bars
if (right < left) {
tmp = right;
right = left;
left = tmp;
drawLeft = true;
drawRight = false;
}
}
else {
drawLeft = drawRight = drawTop = true;
drawBottom = false;
left = x + barLeft;
right = x + barRight;
bottom = b;
top = y;
// account for negative bars
if (top < bottom) {
tmp = top;
top = bottom;
bottom = tmp;
drawBottom = true;
drawTop = false;
}
}
// clip
if (right < axisx.min || left > axisx.max ||
top < axisy.min || bottom > axisy.max)
return;
if (left < axisx.min) {
left = axisx.min;
drawLeft = false;
}
if (right > axisx.max) {
right = axisx.max;
drawRight = false;
}
if (bottom < axisy.min) {
bottom = axisy.min;
drawBottom = false;
}
if (top > axisy.max) {
top = axisy.max;
drawTop = false;
}
left = axisx.p2c(left);
bottom = axisy.p2c(bottom);
right = axisx.p2c(right);
top = axisy.p2c(top);
// fill the bar
if (fillStyleCallback) {
c.fillStyle = fillStyleCallback(bottom, top);
c.fillRect(left, top, right - left, bottom - top)
}
// draw outline
if (lineWidth > 0 && (drawLeft || drawRight || drawTop || drawBottom)) {
c.beginPath();
// FIXME: inline moveTo is buggy with excanvas
c.moveTo(left, bottom);
if (drawLeft)
c.lineTo(left, top);
else
c.moveTo(left, top);
if (drawTop)
c.lineTo(right, top);
else
c.moveTo(right, top);
if (drawRight)
c.lineTo(right, bottom);
else
c.moveTo(right, bottom);
if (drawBottom)
c.lineTo(left, bottom);
else
c.moveTo(left, bottom);
c.stroke();
}
}
function drawSeriesBars(series) {
function plotBars(datapoints, barLeft, barRight, fillStyleCallback, axisx, axisy) {
var points = datapoints.points, ps = datapoints.pointsize;
for (var i = 0; i < points.length; i += ps) {
if (points[i] == null)
continue;
drawBar(points[i], points[i + 1], points[i + 2], barLeft, barRight, fillStyleCallback, axisx, axisy, ctx, series.bars.horizontal, series.bars.lineWidth);
}
}
ctx.save();
ctx.translate(plotOffset.left, plotOffset.top);
// FIXME: figure out a way to add shadows (for instance along the right edge)
ctx.lineWidth = series.bars.lineWidth;
ctx.strokeStyle = series.color;
var barLeft;
switch (series.bars.align) {
case "left":
barLeft = 0;
break;
case "right":
barLeft = -series.bars.barWidth;
break;
default:
barLeft = -series.bars.barWidth / 2;
}
var fillStyleCallback = series.bars.fill ? function (bottom, top) { return getFillStyle(series.bars, series.color, bottom, top); } : null;
plotBars(series.datapoints, barLeft, barLeft + series.bars.barWidth, fillStyleCallback, series.xaxis, series.yaxis);
ctx.restore();
}
function getFillStyle(filloptions, seriesColor, bottom, top) {
var fill = filloptions.fill;
if (!fill)
return null;
if (filloptions.fillColor)
return getColorOrGradient(filloptions.fillColor, bottom, top, seriesColor);
var c = $.color.parse(seriesColor);
c.a = typeof fill == "number" ? fill : 0.4;
c.normalize();
return c.toString();
}
function insertLegend() {
if (options.legend.container != null) {
$(options.legend.container).html("");
} else {
placeholder.find(".legend").remove();
}
if (!options.legend.show) {
return;
}
var fragments = [], entries = [], rowStarted = false,
lf = options.legend.labelFormatter, s, label;
// Build a list of legend entries, with each having a label and a color
for (var i = 0; i < series.length; ++i) {
s = series[i];
if (s.label) {
label = lf ? lf(s.label, s) : s.label;
if (label) {
entries.push({
label: label,
color: s.color
});
}
}
}
// Sort the legend using either the default or a custom comparator
if (options.legend.sorted) {
if ($.isFunction(options.legend.sorted)) {
entries.sort(options.legend.sorted);
} else if (options.legend.sorted == "reverse") {
entries.reverse();
} else {
var ascending = options.legend.sorted != "descending";
entries.sort(function(a, b) {
return a.label == b.label ? 0 : (
(a.label < b.label) != ascending ? 1 : -1 // Logical XOR
);
});
}
}
// Generate markup for the list of entries, in their final order
for (var i = 0; i < entries.length; ++i) {
var entry = entries[i];
if (i % options.legend.noColumns == 0) {
if (rowStarted)
fragments.push('</tr>');
fragments.push('<tr>');
rowStarted = true;
}
fragments.push(
'<td class="legendColorBox"><div style="border:1px solid ' + options.legend.labelBoxBorderColor + ';padding:1px"><div style="width:4px;height:0;border:5px solid ' + entry.color + ';overflow:hidden"></div></div></td>' +
'<td class="legendLabel">' + entry.label + '</td>'
);
}
if (rowStarted)
fragments.push('</tr>');
if (fragments.length == 0)
return;
var table = '<table style="font-size:smaller;color:' + options.grid.color + '">' + fragments.join("") + '</table>';
if (options.legend.container != null)
$(options.legend.container).html(table);
else {
var pos = "",
p = options.legend.position,
m = options.legend.margin;
if (m[0] == null)
m = [m, m];
if (p.charAt(0) == "n")
pos += 'top:' + (m[1] + plotOffset.top) + 'px;';
else if (p.charAt(0) == "s")
pos += 'bottom:' + (m[1] + plotOffset.bottom) + 'px;';
if (p.charAt(1) == "e")
pos += 'right:' + (m[0] + plotOffset.right) + 'px;';
else if (p.charAt(1) == "w")
pos += 'left:' + (m[0] + plotOffset.left) + 'px;';
var legend = $('<div class="legend">' + table.replace('style="', 'style="position:absolute;' + pos +';') + '</div>').appendTo(placeholder);
if (options.legend.backgroundOpacity != 0.0) {
// put in the transparent background
// separately to avoid blended labels and
// label boxes
var c = options.legend.backgroundColor;
if (c == null) {
c = options.grid.backgroundColor;
if (c && typeof c == "string")
c = $.color.parse(c);
else
c = $.color.extract(legend, 'background-color');
c.a = 1;
c = c.toString();
}
var div = legend.children();
$('<div style="position:absolute;width:' + div.width() + 'px;height:' + div.height() + 'px;' + pos +'background-color:' + c + ';"> </div>').prependTo(legend).css('opacity', options.legend.backgroundOpacity);
}
}
}
// interactive features
var highlights = [],
redrawTimeout = null;
// returns the data item the mouse is over, or null if none is found
function findNearbyItem(mouseX, mouseY, seriesFilter) {
var maxDistance = options.grid.mouseActiveRadius,
smallestDistance = maxDistance * maxDistance + 1,
item = null, foundPoint = false, i, j, ps;
for (i = series.length - 1; i >= 0; --i) {
if (!seriesFilter(series[i]))
continue;
var s = series[i],
axisx = s.xaxis,
axisy = s.yaxis,
points = s.datapoints.points,
mx = axisx.c2p(mouseX), // precompute some stuff to make the loop faster
my = axisy.c2p(mouseY),
maxx = maxDistance / axisx.scale,
maxy = maxDistance / axisy.scale;
ps = s.datapoints.pointsize;
// with inverse transforms, we can't use the maxx/maxy
// optimization, sadly
if (axisx.options.inverseTransform)
maxx = Number.MAX_VALUE;
if (axisy.options.inverseTransform)
maxy = Number.MAX_VALUE;
if (s.lines.show || s.points.show) {
for (j = 0; j < points.length; j += ps) {
var x = points[j], y = points[j + 1];
if (x == null)
continue;
// For points and lines, the cursor must be within a
// certain distance to the data point
if (x - mx > maxx || x - mx < -maxx ||
y - my > maxy || y - my < -maxy)
continue;
// We have to calculate distances in pixels, not in
// data units, because the scales of the axes may be different
var dx = Math.abs(axisx.p2c(x) - mouseX),
dy = Math.abs(axisy.p2c(y) - mouseY),
dist = dx * dx + dy * dy; // we save the sqrt
// use <= to ensure last point takes precedence
// (last generally means on top of)
if (dist < smallestDistance) {
smallestDistance = dist;
item = [i, j / ps];
}
}
}
if (s.bars.show && !item) { // no other point can be nearby
var barLeft, barRight;
switch (s.bars.align) {
case "left":
barLeft = 0;
break;
case "right":
barLeft = -s.bars.barWidth;
break;
default:
barLeft = -s.bars.barWidth / 2;
}
barRight = barLeft + s.bars.barWidth;
for (j = 0; j < points.length; j += ps) {
var x = points[j], y = points[j + 1], b = points[j + 2];
if (x == null)
continue;
// for a bar graph, the cursor must be inside the bar
if (series[i].bars.horizontal ?
(mx <= Math.max(b, x) && mx >= Math.min(b, x) &&
my >= y + barLeft && my <= y + barRight) :
(mx >= x + barLeft && mx <= x + barRight &&
my >= Math.min(b, y) && my <= Math.max(b, y)))
item = [i, j / ps];
}
}
}
if (item) {
i = item[0];
j = item[1];
ps = series[i].datapoints.pointsize;
return { datapoint: series[i].datapoints.points.slice(j * ps, (j + 1) * ps),
dataIndex: j,
series: series[i],
seriesIndex: i };
}
return null;
}
function onMouseMove(e) {
if (options.grid.hoverable)
triggerClickHoverEvent("plothover", e,
function (s) { return s["hoverable"] != false; });
}
function onMouseLeave(e) {
if (options.grid.hoverable)
triggerClickHoverEvent("plothover", e,
function (s) { return false; });
}
function onClick(e) {
triggerClickHoverEvent("plotclick", e,
function (s) { return s["clickable"] != false; });
}
// trigger click or hover event (they send the same parameters
// so we share their code)
function triggerClickHoverEvent(eventname, event, seriesFilter) {
var offset = eventHolder.offset(),
canvasX = event.pageX - offset.left - plotOffset.left,
canvasY = event.pageY - offset.top - plotOffset.top,
pos = canvasToAxisCoords({ left: canvasX, top: canvasY });
pos.pageX = event.pageX;
pos.pageY = event.pageY;
var item = findNearbyItem(canvasX, canvasY, seriesFilter);
if (item) {
// fill in mouse pos for any listeners out there
item.pageX = parseInt(item.series.xaxis.p2c(item.datapoint[0]) + offset.left + plotOffset.left, 10);
item.pageY = parseInt(item.series.yaxis.p2c(item.datapoint[1]) + offset.top + plotOffset.top, 10);
}
if (options.grid.autoHighlight) {
// clear auto-highlights
for (var i = 0; i < highlights.length; ++i) {
var h = highlights[i];
if (h.auto == eventname &&
!(item && h.series == item.series &&
h.point[0] == item.datapoint[0] &&
h.point[1] == item.datapoint[1]))
unhighlight(h.series, h.point);
}
if (item)
highlight(item.series, item.datapoint, eventname);
}
placeholder.trigger(eventname, [ pos, item ]);
}
function triggerRedrawOverlay() {
var t = options.interaction.redrawOverlayInterval;
if (t == -1) { // skip event queue
drawOverlay();
return;
}
if (!redrawTimeout)
redrawTimeout = setTimeout(drawOverlay, t);
}
function drawOverlay() {
redrawTimeout = null;
// draw highlights
octx.save();
overlay.clear();
octx.translate(plotOffset.left, plotOffset.top);
var i, hi;
for (i = 0; i < highlights.length; ++i) {
hi = highlights[i];
if (hi.series.bars.show)
drawBarHighlight(hi.series, hi.point);
else
drawPointHighlight(hi.series, hi.point);
}
octx.restore();
executeHooks(hooks.drawOverlay, [octx]);
}
function highlight(s, point, auto) {
if (typeof s == "number")
s = series[s];
if (typeof point == "number") {
var ps = s.datapoints.pointsize;
point = s.datapoints.points.slice(ps * point, ps * (point + 1));
}
var i = indexOfHighlight(s, point);
if (i == -1) {
highlights.push({ series: s, point: point, auto: auto });
triggerRedrawOverlay();
}
else if (!auto)
highlights[i].auto = false;
}
function unhighlight(s, point) {
if (s == null && point == null) {
highlights = [];
triggerRedrawOverlay();
return;
}
if (typeof s == "number")
s = series[s];
if (typeof point == "number") {
var ps = s.datapoints.pointsize;
point = s.datapoints.points.slice(ps * point, ps * (point + 1));
}
var i = indexOfHighlight(s, point);
if (i != -1) {
highlights.splice(i, 1);
triggerRedrawOverlay();
}
}
function indexOfHighlight(s, p) {
for (var i = 0; i < highlights.length; ++i) {
var h = highlights[i];
if (h.series == s && h.point[0] == p[0]
&& h.point[1] == p[1])
return i;
}
return -1;
}
function drawPointHighlight(series, point) {
var x = point[0], y = point[1],
axisx = series.xaxis, axisy = series.yaxis,
highlightColor = (typeof series.highlightColor === "string") ? series.highlightColor : $.color.parse(series.color).scale('a', 0.5).toString();
if (x < axisx.min || x > axisx.max || y < axisy.min || y > axisy.max)
return;
var pointRadius = series.points.radius + series.points.lineWidth / 2;
octx.lineWidth = pointRadius;
octx.strokeStyle = highlightColor;
var radius = 1.5 * pointRadius;
x = axisx.p2c(x);
y = axisy.p2c(y);
octx.beginPath();
if (series.points.symbol == "circle")
octx.arc(x, y, radius, 0, 2 * Math.PI, false);
else
series.points.symbol(octx, x, y, radius, false);
octx.closePath();
octx.stroke();
}
function drawBarHighlight(series, point) {
var highlightColor = (typeof series.highlightColor === "string") ? series.highlightColor : $.color.parse(series.color).scale('a', 0.5).toString(),
fillStyle = highlightColor,
barLeft;
switch (series.bars.align) {
case "left":
barLeft = 0;
break;
case "right":
barLeft = -series.bars.barWidth;
break;
default:
barLeft = -series.bars.barWidth / 2;
}
octx.lineWidth = series.bars.lineWidth;
octx.strokeStyle = highlightColor;
drawBar(point[0], point[1], point[2] || 0, barLeft, barLeft + series.bars.barWidth,
function () { return fillStyle; }, series.xaxis, series.yaxis, octx, series.bars.horizontal, series.bars.lineWidth);
}
function getColorOrGradient(spec, bottom, top, defaultColor) {
if (typeof spec == "string")
return spec;
else {
// assume this is a gradient spec; IE currently only
// supports a simple vertical gradient properly, so that's
// what we support too
var gradient = ctx.createLinearGradient(0, top, 0, bottom);
for (var i = 0, l = spec.colors.length; i < l; ++i) {
var c = spec.colors[i];
if (typeof c != "string") {
var co = $.color.parse(defaultColor);
if (c.brightness != null)
co = co.scale('rgb', c.brightness);
if (c.opacity != null)
co.a *= c.opacity;
c = co.toString();
}
gradient.addColorStop(i / (l - 1), c);
}
return gradient;
}
}
}
// Add the plot function to the top level of the jQuery object
$.plot = function(placeholder, data, options) {
//var t0 = new Date();
var plot = new Plot($(placeholder), data, options, $.plot.plugins);
//(window.console ? console.log : alert)("time used (msecs): " + ((new Date()).getTime() - t0.getTime()));
return plot;
};
$.plot.version = "0.8.3";
$.plot.plugins = [];
// Also add the plot function as a chainable property
$.fn.plot = function(data, options) {
return this.each(function() {
$.plot(this, data, options);
});
};
// round to nearby lower multiple of base
function floorInBase(n, base) {
return base * Math.floor(n / base);
}
})(jQuery);
|
27182812/ChatGLM-LLaMA-chinese-insturct | 214,953 | src/transformers/generation/utils.py | # coding=utf-8
# Copyright 2020 The Google AI Language Team Authors, Facebook AI Research authors and The HuggingFace Inc. team.
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import inspect
import warnings
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
import torch.distributed as dist
from torch import nn
from ..modeling_outputs import CausalLMOutputWithPast, Seq2SeqLMOutput
from ..models.auto import (
MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING,
MODEL_FOR_CAUSAL_LM_MAPPING,
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING,
MODEL_FOR_VISION_2_SEQ_MAPPING,
)
from ..pytorch_utils import torch_int_div
from ..utils import ModelOutput, logging
from .beam_constraints import DisjunctiveConstraint, PhrasalConstraint
from .beam_search import BeamScorer, BeamSearchScorer, ConstrainedBeamSearchScorer
from .configuration_utils import GenerationConfig
from .logits_process import (
EncoderNoRepeatNGramLogitsProcessor,
EncoderRepetitionPenaltyLogitsProcessor,
EpsilonLogitsWarper,
EtaLogitsWarper,
ExponentialDecayLengthPenalty,
ForcedBOSTokenLogitsProcessor,
ForcedEOSTokenLogitsProcessor,
ForceTokensLogitsProcessor,
HammingDiversityLogitsProcessor,
InfNanRemoveLogitsProcessor,
LogitNormalization,
LogitsProcessorList,
MinLengthLogitsProcessor,
MinNewTokensLengthLogitsProcessor,
NoBadWordsLogitsProcessor,
NoRepeatNGramLogitsProcessor,
PrefixConstrainedLogitsProcessor,
RepetitionPenaltyLogitsProcessor,
SuppressTokensAtBeginLogitsProcessor,
SuppressTokensLogitsProcessor,
TemperatureLogitsWarper,
TopKLogitsWarper,
TopPLogitsWarper,
TypicalLogitsWarper,
)
from .stopping_criteria import (
MaxLengthCriteria,
MaxTimeCriteria,
StoppingCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
logger = logging.get_logger(__name__)
@dataclass
class GreedySearchDecoderOnlyOutput(ModelOutput):
"""
Base class for outputs of decoder-only generation models using greedy search.
Args:
sequences (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter
if all batches finished early due to the `eos_token_id`.
scores (`tuple(torch.FloatTensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax)
at each generation step. Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for
each generated token), with each tensor of shape `(batch_size, config.vocab_size)`.
attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`torch.FloatTensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`.
hidden_states (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`torch.FloatTensor` of shape `(batch_size, generated_length, hidden_size)`.
"""
sequences: torch.LongTensor = None
scores: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
@dataclass
class ContrastiveSearchEncoderDecoderOutput(ModelOutput):
"""
Base class for outputs of decoder-only generation models using contrastive search.
Args:
sequences (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter
if all batches finished early due to the `eos_token_id`.
scores (`tuple(torch.FloatTensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax)
at each generation step. Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for
each generated token), with each tensor of shape `(batch_size, config.vocab_size)`.
encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer of the decoder) of shape `(batch_size, num_heads,
sequence_length, sequence_length)`.
encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`.
decoder_attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`torch.FloatTensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`.
cross_attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`torch.FloatTensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`.
decoder_hidden_states (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`torch.FloatTensor` of shape `(batch_size, generated_length, hidden_size)`.
"""
sequences: torch.LongTensor = None
scores: Optional[Tuple[torch.FloatTensor]] = None
encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
decoder_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
cross_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
decoder_hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
@dataclass
class ContrastiveSearchDecoderOnlyOutput(ModelOutput):
"""
Base class for outputs of decoder-only generation models using contrastive search.
Args:
sequences (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter
if all batches finished early due to the `eos_token_id`.
scores (`tuple(torch.FloatTensor)` *optional*, returned when `output_scores=True` is passed or when
`config.output_scores=True`):
Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax)
at each generation step. Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for
each generated token), with each tensor of shape `(batch_size, config.vocab_size)`.
attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`torch.FloatTensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`.
hidden_states (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_hidden_states=True` is
passed or when `config.output_hidden_states=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`torch.FloatTensor` of shape `(batch_size, generated_length, hidden_size)`.
"""
sequences: torch.LongTensor = None
scores: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
@dataclass
class GreedySearchEncoderDecoderOutput(ModelOutput):
"""
Base class for outputs of encoder-decoder generation models using greedy search. Hidden states and attention
weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the
encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes)
Args:
sequences (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter
if all batches finished early due to the `eos_token_id`.
scores (`tuple(torch.FloatTensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax)
at each generation step. Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for
each generated token), with each tensor of shape `(batch_size, config.vocab_size)`.
encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer of the decoder) of shape `(batch_size, num_heads,
sequence_length, sequence_length)`.
encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`.
decoder_attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`torch.FloatTensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`.
cross_attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`torch.FloatTensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`.
decoder_hidden_states (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`torch.FloatTensor` of shape `(batch_size, generated_length, hidden_size)`.
"""
sequences: torch.LongTensor = None
scores: Optional[Tuple[torch.FloatTensor]] = None
encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
decoder_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
cross_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
decoder_hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
@dataclass
class SampleDecoderOnlyOutput(ModelOutput):
"""
Base class for outputs of decoder-only generation models using sampling.
Args:
sequences (`torch.LongTensor` of shape `(batch_size*num_return_sequences, sequence_length)`):
The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter
if all batches finished early due to the `eos_token_id`.
scores (`tuple(torch.FloatTensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax)
at each generation step. Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for
each generated token), with each tensor of shape `(batch_size*num_return_sequences, config.vocab_size)`.
attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`torch.FloatTensor` of shape `(num_return_sequences*batch_size, num_heads, generated_length,
sequence_length)`.
hidden_states (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`torch.FloatTensor` of shape `(num_return_sequences*batch_size, generated_length, hidden_size)`.
"""
sequences: torch.LongTensor = None
scores: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
@dataclass
class SampleEncoderDecoderOutput(ModelOutput):
"""
Base class for outputs of encoder-decoder generation models using sampling. Hidden states and attention weights of
the decoder (respectively the encoder) can be accessed via the encoder_attentions and the encoder_hidden_states
attributes (respectively the decoder_attentions and the decoder_hidden_states attributes)
Args:
sequences (`torch.LongTensor` of shape `(batch_size*num_return_sequences, sequence_length)`):
The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter
if all batches finished early due to the `eos_token_id`.
scores (`tuple(torch.FloatTensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax)
at each generation step. Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for
each generated token), with each tensor of shape `(batch_size*num_return_sequences, config.vocab_size)`.
encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer of the decoder) of shape
`(batch_size*num_return_sequences, num_heads, sequence_length, sequence_length)`.
encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size*num_return_sequences, sequence_length, hidden_size)`.
decoder_attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`torch.FloatTensor` of shape `(batch_size*num_return_sequences, num_heads, generated_length,
sequence_length)`.
cross_attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`torch.FloatTensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`.
decoder_hidden_states (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`torch.FloatTensor` of shape `(batch_size*num_return_sequences, generated_length, hidden_size)`.
"""
sequences: torch.LongTensor = None
scores: Optional[Tuple[torch.FloatTensor]] = None
encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
decoder_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
cross_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
decoder_hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
@dataclass
class BeamSearchDecoderOnlyOutput(ModelOutput):
"""
Base class for outputs of decoder-only generation models using beam search.
Args:
sequences (`torch.LongTensor` of shape `(batch_size*num_return_sequences, sequence_length)`):
The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter
if all batches finished early due to the `eos_token_id`.
sequences_scores (`torch.FloatTensor` of shape `(batch_size*num_return_sequences)`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
Final beam scores of the generated `sequences`.
scores (`tuple(torch.FloatTensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
Beam transition scores for each vocabulary token at each generation step. Beam transition scores consisting
of log probabilities of tokens conditioned on log softmax of previously generated tokens in this beam.
Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for each generated token),
with each tensor of shape `(batch_size*num_beams*num_return_sequences, config.vocab_size)`.
beam_indices (`torch.LongTensor`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
Beam indices of generated token id at each generation step. `torch.LongTensor` of shape
`(batch_size*num_return_sequences, sequence_length)`.
attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`torch.FloatTensor` of shape `(batch_size*num_beams, num_heads, generated_length, sequence_length)`.
hidden_states (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`torch.FloatTensor` of shape `(batch_size*num_beams*num_return_sequences, generated_length, hidden_size)`.
"""
sequences: torch.LongTensor = None
sequences_scores: Optional[torch.FloatTensor] = None
scores: Optional[Tuple[torch.FloatTensor]] = None
beam_indices: Optional[torch.LongTensor] = None
attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
@dataclass
class BeamSearchEncoderDecoderOutput(ModelOutput):
"""
Base class for outputs of encoder-decoder generation models using beam search. Hidden states and attention weights
of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the encoder_hidden_states
attributes (respectively the decoder_attentions and the decoder_hidden_states attributes)
Args:
sequences (`torch.LongTensor` of shape `(batch_size*num_return_sequences, sequence_length)`):
The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter
if all batches finished early due to the `eos_token_id`.
sequences_scores (`torch.FloatTensor` of shape `(batch_size*num_return_sequences)`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
Final beam scores of the generated `sequences`.
scores (`tuple(torch.FloatTensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
Beam transition scores for each vocabulary token at each generation step. Beam transition scores consisting
of log probabilities of tokens conditioned on log softmax of previously generated tokens in this beam.
Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for each generated token),
with each tensor of shape `(batch_size*num_beams, config.vocab_size)`.
beam_indices (`torch.LongTensor`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
Beam indices of generated token id at each generation step. `torch.LongTensor` of shape
`(batch_size*num_return_sequences, sequence_length)`.
encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer of the decoder) of shape `(batch_size, num_heads,
sequence_length, sequence_length)`.
encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size*num_beams*num_return_sequences, sequence_length, hidden_size)`.
decoder_attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`torch.FloatTensor` of shape `(batch_size*num_beams*num_return_sequences, num_heads, generated_length,
sequence_length)`.
cross_attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`torch.FloatTensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`.
decoder_hidden_states (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`torch.FloatTensor` of shape `(batch_size*num_beams*num_return_sequences, generated_length, hidden_size)`.
"""
sequences: torch.LongTensor = None
sequences_scores: Optional[torch.FloatTensor] = None
scores: Optional[Tuple[torch.FloatTensor]] = None
beam_indices: Optional[torch.LongTensor] = None
encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
decoder_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
cross_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
decoder_hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
@dataclass
class BeamSampleDecoderOnlyOutput(ModelOutput):
"""
Base class for outputs of decoder-only generation models using beam sample.
Args:
sequences (`torch.LongTensor` of shape `(batch_size*num_return_sequences, sequence_length)`):
The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter
if all batches finished early due to the `eos_token_id`.
sequences_scores (`torch.FloatTensor` of shape `(batch_size * num_return_sequence)`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
Final beam scores of the generated `sequences`.
scores (`tuple(torch.FloatTensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
Beam transition scores for each vocabulary token at each generation step. Beam transition scores consisting
of log probabilities of tokens conditioned on log softmax of previously generated tokens in this beam.
Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for each generated token),
with each tensor of shape `(batch_size*num_beams*num_return_sequences, config.vocab_size)`.
beam_indices (`torch.LongTensor`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
Beam indices of generated token id at each generation step. `torch.LongTensor` of shape
`(batch_size*num_return_sequences, sequence_length)`.
attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`torch.FloatTensor` of shape `(batch_size*num_beams, num_heads, generated_length, sequence_length)`.
hidden_states (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`torch.FloatTensor` of shape `(batch_size*num_beams, generated_length, hidden_size)`.
"""
sequences: torch.LongTensor = None
sequences_scores: Optional[torch.FloatTensor] = None
scores: Optional[Tuple[torch.FloatTensor]] = None
beam_indices: Optional[torch.LongTensor] = None
attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
@dataclass
class BeamSampleEncoderDecoderOutput(ModelOutput):
"""
Base class for outputs of encoder-decoder generation models using beam sampling. Hidden states and attention
weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the
encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes)
Args:
sequences (`torch.LongTensor` of shape `(batch_size*num_beams, sequence_length)`):
The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter
if all batches finished early due to the `eos_token_id`.
sequences_scores (`torch.FloatTensor` of shape `(batch_size * num_return_sequence)`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
Final beam scores of the generated `sequences`.
scores (`tuple(torch.FloatTensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
Beam transition scores for each vocabulary token at each generation step. Beam transition scores consisting
of log probabilities of tokens conditioned on log softmax of previously generated tokens in this beam.
Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for each generated token),
with each tensor of shape `(batch_size*num_beams, config.vocab_size)`).
beam_indices (`torch.LongTensor`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
Beam indices of generated token id at each generation step. `torch.LongTensor` of shape
`(batch_size*num_return_sequences, sequence_length)`.
encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer of the decoder) of shape `(batch_size, num_heads,
sequence_length, sequence_length)`.
encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size*num_beams, sequence_length, hidden_size)`.
decoder_attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`torch.FloatTensor` of shape `(batch_size*num_beams, num_heads, generated_length, sequence_length)`.
cross_attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`torch.FloatTensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`.
decoder_hidden_states (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`torch.FloatTensor` of shape `(batch_size*num_beams, generated_length, hidden_size)`.
"""
sequences: torch.LongTensor = None
sequences_scores: Optional[torch.FloatTensor] = None
scores: Optional[Tuple[torch.FloatTensor]] = None
beam_indices: Optional[torch.LongTensor] = None
encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
decoder_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
cross_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
decoder_hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
GreedySearchOutput = Union[GreedySearchEncoderDecoderOutput, GreedySearchDecoderOnlyOutput]
SampleOutput = Union[SampleEncoderDecoderOutput, SampleDecoderOnlyOutput]
BeamSearchOutput = Union[BeamSearchEncoderDecoderOutput, BeamSearchDecoderOnlyOutput]
BeamSampleOutput = Union[BeamSampleEncoderDecoderOutput, BeamSampleDecoderOnlyOutput]
ContrastiveSearchOutput = Union[ContrastiveSearchEncoderDecoderOutput, ContrastiveSearchDecoderOnlyOutput]
GenerateOutput = Union[GreedySearchOutput, SampleOutput, BeamSearchOutput, BeamSampleOutput, ContrastiveSearchOutput]
class GenerationMixin:
"""
A class containing all functions for auto-regressive text generation, to be used as a mixin in [`PreTrainedModel`].
The class exposes [`~generation.GenerationMixin.generate`], which can be used for:
- *greedy decoding* by calling [`~generation.GenerationMixin.greedy_search`] if `num_beams=1` and
`do_sample=False`
- *contrastive search* by calling [`~generation.GenerationMixin.contrastive_search`] if `penalty_alpha>0` and
`top_k>1`
- *multinomial sampling* by calling [`~generation.GenerationMixin.sample`] if `num_beams=1` and
`do_sample=True`
- *beam-search decoding* by calling [`~generation.GenerationMixin.beam_search`] if `num_beams>1` and
`do_sample=False`
- *beam-search multinomial sampling* by calling [`~generation.GenerationMixin.beam_sample`] if `num_beams>1`
and `do_sample=True`
- *diverse beam-search decoding* by calling [`~generation.GenerationMixin.group_beam_search`], if `num_beams>1`
and `num_beam_groups>1`
- *constrained beam-search decoding* by calling [`~generation.GenerationMixin.constrained_beam_search`], if
`constraints!=None` or `force_words_ids!=None`
You do not need to call any of the above methods directly. Pass custom parameter values to 'generate' instead. To
learn more about decoding strategies refer to the [text generation strategies guide](./generation_strategies).
"""
def prepare_inputs_for_generation(self, *args, **kwargs):
raise NotImplementedError(
"A model class needs to define a `prepare_inputs_for_generation` method in order to use `generate`."
)
def _prepare_model_inputs(
self,
inputs: Optional[torch.Tensor] = None,
bos_token_id: Optional[int] = None,
model_kwargs: Optional[Dict[str, torch.Tensor]] = None,
) -> Tuple[torch.Tensor, Optional[str], Dict[str, torch.Tensor]]:
"""
This function extracts the model-specific `inputs` for generation.
"""
# 1. retrieve all kwargs that are non-None or non-model input related.
# some encoder-decoder models have different names for model and encoder
if (
self.config.is_encoder_decoder
and hasattr(self, "encoder")
and self.encoder.main_input_name != self.main_input_name
):
input_name = self.encoder.main_input_name
else:
input_name = self.main_input_name
model_kwargs = {k: v for k, v in model_kwargs.items() if v is not None or k != input_name}
# 2. check whether model_input_name is passed as kwarg
# if yes and `inputs` is None use kwarg inputs
inputs_kwarg = model_kwargs.pop(input_name, None)
if inputs_kwarg is not None and inputs is not None:
raise ValueError(
f"`inputs`: {inputs}` were passed alongside {input_name} which is not allowed."
f"Make sure to either pass {inputs} or {input_name}=..."
)
elif inputs_kwarg is not None:
inputs = inputs_kwarg
# 3. In the presence of `inputs_embeds` for text models:
# - decoder-only models should complain if the user attempts to pass `inputs_embeds`, but the model
# doesn't have its forwarding implemented. `inputs_embeds` is kept in `model_kwargs` and can coexist with
# input_ids (`inputs_embeds` will be used in the 1st generation step, as opposed to `input_ids`)
# - encoder-decoder models should complain if the user attempts to pass `inputs_embeds` and `input_ids`, and
# pull the former to inputs. It will be used in place of `input_ids` to get the encoder hidden states.
if input_name == "input_ids" and "inputs_embeds" in model_kwargs:
if not self.config.is_encoder_decoder:
has_inputs_embeds_forwarding = "inputs_embeds" in set(
inspect.signature(self.prepare_inputs_for_generation).parameters.keys()
)
if not has_inputs_embeds_forwarding:
raise ValueError(
f"You passed `inputs_embeds` to `.generate()`, but the model class {self.__class__.__name__} "
"doesn't have its forwarding implemented. See the GPT2 implementation for an example "
"(https://github.com/huggingface/transformers/pull/21405), and feel free to open a PR with it!"
)
# In this case, `input_ids` is moved to the `model_kwargs`, so a few automations (like the creation of
# the attention mask) can rely on the actual model input.
model_kwargs["input_ids"] = self._maybe_initialize_input_ids_for_generation(
inputs, bos_token_id, model_kwargs=model_kwargs
)
else:
if inputs is not None:
raise ValueError("You passed `inputs_embeds` and `input_ids` to `.generate()`. Please pick one.")
inputs, input_name = model_kwargs["inputs_embeds"], "inputs_embeds"
# 4. if `inputs` is still None, try to create `input_ids` from BOS token
inputs = self._maybe_initialize_input_ids_for_generation(inputs, bos_token_id, model_kwargs)
return inputs, input_name, model_kwargs
def adjust_logits_during_generation(self, logits: torch.FloatTensor, **kwargs) -> torch.FloatTensor:
"""
Implement in subclasses of [`PreTrainedModel`] for custom behavior to adjust the logits in the generate method.
"""
return logits
def _maybe_initialize_input_ids_for_generation(
self,
inputs: Optional[torch.Tensor] = None,
bos_token_id: Optional[int] = None,
model_kwargs: Optional[Dict[str, torch.Tensor]] = None,
) -> torch.LongTensor:
"""Initializes input ids for generation, if necessary."""
if inputs is not None:
return inputs
encoder_outputs = model_kwargs.get("encoder_outputs")
if self.config.is_encoder_decoder and encoder_outputs is not None:
# make dummy input_ids with value -100, as a sanity check ensuring that they won't be used for encoding
shape = encoder_outputs.last_hidden_state.size()[:-1]
return torch.ones(shape, dtype=torch.long, device=self.device) * -100
if bos_token_id is None:
raise ValueError("`bos_token_id` has to be defined when no `input_ids` are provided.")
# If there is some tensor in `model_kwargs`, we can infer the batch size from it. This is helpful with
# soft-prompting or in multimodal implementations built on top of decoder-only language models.
batch_size = 1
for value in model_kwargs.values():
if isinstance(value, torch.Tensor):
batch_size = value.shape[0]
break
return torch.ones((batch_size, 1), dtype=torch.long, device=self.device) * bos_token_id
def _prepare_attention_mask_for_generation(
self,
inputs: torch.Tensor,
pad_token_id: Optional[int],
eos_token_id: Optional[Union[int, List[int]]],
) -> torch.LongTensor:
is_input_ids = len(inputs.shape) == 2 and inputs.dtype in [torch.int, torch.long]
is_pad_token_in_inputs = (pad_token_id is not None) and (pad_token_id in inputs)
if isinstance(eos_token_id, int):
eos_token_id = [eos_token_id]
is_pad_token_not_equal_to_eos_token_id = (eos_token_id is None) or (pad_token_id not in eos_token_id)
# Check if input is input_ids and padded -> only then is attention_mask defined
if is_input_ids and is_pad_token_in_inputs and is_pad_token_not_equal_to_eos_token_id:
return inputs.ne(pad_token_id).long()
else:
return torch.ones(inputs.shape[:2], dtype=torch.long, device=inputs.device)
def _prepare_encoder_decoder_kwargs_for_generation(
self, inputs_tensor: torch.Tensor, model_kwargs, model_input_name: Optional[str] = None
) -> Dict[str, Any]:
# 1. get encoder
encoder = self.get_encoder()
# 2. Prepare encoder args and encoder kwargs from model kwargs.
irrelevant_prefix = ["decoder_", "cross_attn", "use_cache"]
encoder_kwargs = {
argument: value
for argument, value in model_kwargs.items()
if not any(argument.startswith(p) for p in irrelevant_prefix)
}
encoder_signature = set(inspect.signature(encoder.forward).parameters)
encoder_accepts_wildcard = "kwargs" in encoder_signature or "model_kwargs" in encoder_signature
if not encoder_accepts_wildcard:
encoder_kwargs = {
argument: value for argument, value in encoder_kwargs.items() if argument in encoder_signature
}
# 3. make sure that encoder returns `ModelOutput`
model_input_name = model_input_name if model_input_name is not None else self.main_input_name
encoder_kwargs["return_dict"] = True
encoder_kwargs[model_input_name] = inputs_tensor
model_kwargs["encoder_outputs"]: ModelOutput = encoder(**encoder_kwargs)
return model_kwargs
def _prepare_decoder_input_ids_for_generation(
self,
batch_size: int,
decoder_start_token_id: int = None,
bos_token_id: int = None,
model_kwargs: Optional[Dict[str, torch.Tensor]] = None,
device: torch.device = None,
) -> torch.LongTensor:
if model_kwargs is not None and "decoder_input_ids" in model_kwargs:
return model_kwargs.pop("decoder_input_ids")
else:
decoder_start_token_id = self._get_decoder_start_token_id(decoder_start_token_id, bos_token_id)
if device is None:
device = self.device
return torch.ones((batch_size, 1), dtype=torch.long, device=device) * decoder_start_token_id
def _get_decoder_start_token_id(self, decoder_start_token_id: int = None, bos_token_id: int = None) -> int:
decoder_start_token_id = (
decoder_start_token_id
if decoder_start_token_id is not None
else self.generation_config.decoder_start_token_id
)
bos_token_id = bos_token_id if bos_token_id is not None else self.generation_config.bos_token_id
if decoder_start_token_id is not None:
return decoder_start_token_id
elif bos_token_id is not None:
return bos_token_id
raise ValueError(
"`decoder_start_token_id` or `bos_token_id` has to be defined for encoder-decoder generation."
)
@staticmethod
def _expand_inputs_for_generation(
expand_size: int = 1,
is_encoder_decoder: bool = False,
input_ids: Optional[torch.LongTensor] = None,
**model_kwargs,
) -> Tuple[torch.LongTensor, Dict[str, Any]]:
"""Expands tensors from [batch_size, ...] to [batch_size * expand_size, ...]"""
def _expand_dict_for_generation(dict_to_expand):
for key in dict_to_expand:
if dict_to_expand[key] is not None and isinstance(dict_to_expand[key], torch.Tensor):
dict_to_expand[key] = dict_to_expand[key].repeat_interleave(expand_size, dim=0)
return dict_to_expand
if input_ids is not None:
input_ids = input_ids.repeat_interleave(expand_size, dim=0)
model_kwargs = _expand_dict_for_generation(model_kwargs)
if is_encoder_decoder:
if model_kwargs.get("encoder_outputs") is None:
raise ValueError("If `is_encoder_decoder` is True, make sure that `encoder_outputs` is defined.")
model_kwargs["encoder_outputs"] = _expand_dict_for_generation(model_kwargs["encoder_outputs"])
return input_ids, model_kwargs
def _extract_past_from_model_output(self, outputs: ModelOutput, standardize_cache_format: bool = False):
past_key_values = None
if "past_key_values" in outputs:
past_key_values = outputs.past_key_values
elif "mems" in outputs:
past_key_values = outputs.mems
elif "past_buckets_states" in outputs:
past_key_values = outputs.past_buckets_states
# Bloom fix: standardizes the cache format when requested
if standardize_cache_format and hasattr(self, "_convert_to_standard_cache"):
batch_size = outputs.logits.shape[0]
past_key_values = self._convert_to_standard_cache(past_key_values, batch_size=batch_size)
return past_key_values
def _update_model_kwargs_for_generation(
self,
outputs: ModelOutput,
model_kwargs: Dict[str, Any],
is_encoder_decoder: bool = False,
standardize_cache_format: bool = False,
) -> Dict[str, Any]:
# update past_key_values
model_kwargs["past_key_values"] = self._extract_past_from_model_output(
outputs, standardize_cache_format=standardize_cache_format
)
# update token_type_ids with last value
if "token_type_ids" in model_kwargs:
token_type_ids = model_kwargs["token_type_ids"]
model_kwargs["token_type_ids"] = torch.cat([token_type_ids, token_type_ids[:, -1].unsqueeze(-1)], dim=-1)
if not is_encoder_decoder:
# update attention mask
if "attention_mask" in model_kwargs:
attention_mask = model_kwargs["attention_mask"]
model_kwargs["attention_mask"] = torch.cat(
[attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1
)
else:
# update decoder attention mask
if "decoder_attention_mask" in model_kwargs:
decoder_attention_mask = model_kwargs["decoder_attention_mask"]
model_kwargs["decoder_attention_mask"] = torch.cat(
[decoder_attention_mask, decoder_attention_mask.new_ones((decoder_attention_mask.shape[0], 1))],
dim=-1,
)
return model_kwargs
def _reorder_cache(self, past_key_values, beam_idx):
raise NotImplementedError(
f"Make sure that a `_reorder_cache` function is correctly implemented in {self.__class__.__module__} to"
f" enable beam search for {self.__class__}"
)
def _get_logits_warper(
self,
generation_config: GenerationConfig,
) -> LogitsProcessorList:
"""
This class returns a [`LogitsProcessorList`] list object that contains all relevant [`LogitsWarper`] instances
used for multinomial sampling.
"""
# instantiate warpers list
warpers = LogitsProcessorList()
# the following idea is largely copied from this PR: https://github.com/huggingface/transformers/pull/5420/files
# all samplers can be found in `generation_utils_samplers.py`
if generation_config.temperature is not None and generation_config.temperature != 1.0:
warpers.append(TemperatureLogitsWarper(generation_config.temperature))
min_tokens_to_keep = 2 if generation_config.num_beams > 1 else 1
if generation_config.top_k is not None and generation_config.top_k != 0:
warpers.append(TopKLogitsWarper(top_k=generation_config.top_k, min_tokens_to_keep=min_tokens_to_keep))
if generation_config.top_p is not None and generation_config.top_p < 1.0:
warpers.append(TopPLogitsWarper(top_p=generation_config.top_p, min_tokens_to_keep=min_tokens_to_keep))
if generation_config.typical_p is not None and generation_config.typical_p < 1.0:
warpers.append(
TypicalLogitsWarper(mass=generation_config.typical_p, min_tokens_to_keep=min_tokens_to_keep)
)
if generation_config.epsilon_cutoff is not None and 0.0 < generation_config.epsilon_cutoff < 1.0:
warpers.append(
EpsilonLogitsWarper(epsilon=generation_config.epsilon_cutoff, min_tokens_to_keep=min_tokens_to_keep)
)
if generation_config.eta_cutoff is not None and 0.0 < generation_config.eta_cutoff < 1.0:
warpers.append(
EtaLogitsWarper(epsilon=generation_config.eta_cutoff, min_tokens_to_keep=min_tokens_to_keep)
)
# `LogitNormalization` should always be the last logit processor, when present
if generation_config.renormalize_logits is True:
warpers.append(LogitNormalization())
return warpers
def _get_logits_processor(
self,
generation_config: GenerationConfig,
input_ids_seq_length: int,
encoder_input_ids: torch.LongTensor,
prefix_allowed_tokens_fn: Callable[[int, torch.Tensor], List[int]],
logits_processor: Optional[LogitsProcessorList],
) -> LogitsProcessorList:
"""
This class returns a [`LogitsProcessorList`] list object that contains all relevant [`LogitsProcessor`]
instances used to modify the scores of the language model head.
"""
# instantiate processors list
processors = LogitsProcessorList()
# the following idea is largely copied from this PR: https://github.com/huggingface/transformers/pull/5420/files
# all samplers can be found in `generation_utils_samplers.py`
if generation_config.diversity_penalty is not None and generation_config.diversity_penalty > 0.0:
processors.append(
HammingDiversityLogitsProcessor(
diversity_penalty=generation_config.diversity_penalty,
num_beams=generation_config.num_beams,
num_beam_groups=generation_config.num_beam_groups,
)
)
if (
generation_config.encoder_repetition_penalty is not None
and generation_config.encoder_repetition_penalty != 1.0
):
processors.append(
EncoderRepetitionPenaltyLogitsProcessor(
penalty=generation_config.encoder_repetition_penalty, encoder_input_ids=encoder_input_ids
)
)
if generation_config.repetition_penalty is not None and generation_config.repetition_penalty != 1.0:
processors.append(RepetitionPenaltyLogitsProcessor(penalty=generation_config.repetition_penalty))
if generation_config.no_repeat_ngram_size is not None and generation_config.no_repeat_ngram_size > 0:
processors.append(NoRepeatNGramLogitsProcessor(generation_config.no_repeat_ngram_size))
if (
generation_config.encoder_no_repeat_ngram_size is not None
and generation_config.encoder_no_repeat_ngram_size > 0
):
if self.config.is_encoder_decoder:
processors.append(
EncoderNoRepeatNGramLogitsProcessor(
generation_config.encoder_no_repeat_ngram_size, encoder_input_ids
)
)
else:
raise ValueError(
"It's impossible to use `encoder_no_repeat_ngram_size` with decoder-only architecture"
)
if generation_config.bad_words_ids is not None:
processors.append(
NoBadWordsLogitsProcessor(generation_config.bad_words_ids, generation_config.eos_token_id)
)
if (
generation_config.min_length is not None
and generation_config.eos_token_id is not None
and generation_config.min_length > 0
):
processors.append(MinLengthLogitsProcessor(generation_config.min_length, generation_config.eos_token_id))
if (
generation_config.min_new_tokens is not None
and generation_config.eos_token_id is not None
and generation_config.min_new_tokens > 0
):
processors.append(
MinNewTokensLengthLogitsProcessor(
input_ids_seq_length, generation_config.min_new_tokens, generation_config.eos_token_id
)
)
if prefix_allowed_tokens_fn is not None:
processors.append(
PrefixConstrainedLogitsProcessor(
prefix_allowed_tokens_fn, generation_config.num_beams // generation_config.num_beam_groups
)
)
if generation_config.forced_bos_token_id is not None:
processors.append(ForcedBOSTokenLogitsProcessor(generation_config.forced_bos_token_id))
if generation_config.forced_eos_token_id is not None:
processors.append(
ForcedEOSTokenLogitsProcessor(generation_config.max_length, generation_config.forced_eos_token_id)
)
if generation_config.remove_invalid_values is True:
processors.append(InfNanRemoveLogitsProcessor())
if generation_config.exponential_decay_length_penalty is not None:
processors.append(
ExponentialDecayLengthPenalty(
generation_config.exponential_decay_length_penalty,
generation_config.eos_token_id,
input_ids_seq_length,
)
)
if generation_config.suppress_tokens is not None:
processors.append(SuppressTokensLogitsProcessor(generation_config.suppress_tokens))
if generation_config.begin_suppress_tokens is not None:
begin_index = input_ids_seq_length
begin_index = (
begin_index
if (input_ids_seq_length > 1 or generation_config.forced_bos_token_id is None)
else begin_index + 1
)
if generation_config.forced_decoder_ids is not None:
# generation starts after the last token that is forced
begin_index += generation_config.forced_decoder_ids[-1][0]
processors.append(
SuppressTokensAtBeginLogitsProcessor(generation_config.begin_suppress_tokens, begin_index)
)
if generation_config.forced_decoder_ids is not None:
processors.append(ForceTokensLogitsProcessor(generation_config.forced_decoder_ids))
processors = self._merge_criteria_processor_list(processors, logits_processor)
# `LogitNormalization` should always be the last logit processor, when present
if generation_config.renormalize_logits is True:
processors.append(LogitNormalization())
return processors
def _get_stopping_criteria(
self, generation_config: GenerationConfig, stopping_criteria: Optional[StoppingCriteriaList]
) -> StoppingCriteriaList:
criteria = StoppingCriteriaList()
if generation_config.max_length is not None:
criteria.append(MaxLengthCriteria(max_length=generation_config.max_length))
if generation_config.max_time is not None:
criteria.append(MaxTimeCriteria(max_time=generation_config.max_time))
criteria = self._merge_criteria_processor_list(criteria, stopping_criteria)
return criteria
def _merge_criteria_processor_list(
self,
default_list: Union[LogitsProcessorList, StoppingCriteriaList],
custom_list: Union[LogitsProcessorList, StoppingCriteriaList],
) -> Union[LogitsProcessorList, StoppingCriteriaList]:
if len(custom_list) == 0:
return default_list
for default in default_list:
for custom in custom_list:
if type(custom) is type(default):
object_type = "stopping criteria" if isinstance(custom, StoppingCriteria) else "logits processor"
raise ValueError(
f"A custom {object_type} of type {type(custom)} with values {custom} has been passed to"
f" `generate`, but it has already been created with the values {default}. {default} has been"
" created by passing the corresponding arguments to generate or by the model's config default"
f" values. If you just want to change the default values of {object_type} consider passing"
f" them as arguments to `generate` instead of using a custom {object_type}."
)
default_list.extend(custom_list)
return default_list
def compute_transition_scores(
self,
sequences: torch.Tensor,
scores: Tuple[torch.Tensor],
beam_indices: Optional[torch.Tensor] = None,
normalize_logits: bool = False,
) -> torch.Tensor:
"""
Computes the transition scores of sequences given the generation scores (and beam indices, if beam search was
used). This is a convenient method to quicky obtain the scores of the selected tokens at generation time.
Parameters:
sequences (`torch.LongTensor`):
The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or
shorter if all batches finished early due to the `eos_token_id`.
scores (`tuple(torch.FloatTensor)`):
Transition scores for each vocabulary token at each generation step. Beam transition scores consisting
of log probabilities of tokens conditioned on log softmax of previously generated tokens Tuple of
`torch.FloatTensor` with up to `max_new_tokens` elements (one element for each generated token), with
each tensor of shape `(batch_size*num_beams, config.vocab_size)`.
beam_indices (`torch.LongTensor`, *optional*):
Beam indices of generated token id at each generation step. `torch.LongTensor` of shape
`(batch_size*num_return_sequences, sequence_length)`. Only required if a `num_beams>1` at
generate-time.
normalize_logits (`bool`, *optional*, defaults to `False`):
Whether to normalize the logits (which, for legacy reasons, may be unnormalized).
Return:
`torch.Tensor`: A `torch.Tensor` of shape `(batch_size*num_return_sequences, sequence_length)` containing
the transition scores (logits)
Examples:
```python
>>> from transformers import GPT2Tokenizer, AutoModelForCausalLM
>>> import numpy as np
>>> tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
>>> model = AutoModelForCausalLM.from_pretrained("gpt2")
>>> tokenizer.pad_token_id = tokenizer.eos_token_id
>>> inputs = tokenizer(["Today is"], return_tensors="pt")
>>> # Example 1: Print the scores for each token generated with Greedy Search
>>> outputs = model.generate(**inputs, max_new_tokens=5, return_dict_in_generate=True, output_scores=True)
>>> transition_scores = model.compute_transition_scores(
... outputs.sequences, outputs.scores, normalize_logits=True
... )
>>> # input_length is the length of the input prompt for decoder-only models, like the GPT family, and 1 for
>>> # encoder-decoder models, like BART or T5.
>>> input_length = 1 if model.config.is_encoder_decoder else inputs.input_ids.shape[1]
>>> generated_tokens = outputs.sequences[:, input_length:]
>>> for tok, score in zip(generated_tokens[0], transition_scores[0]):
... # | token | token string | logits | probability
... print(f"| {tok:5d} | {tokenizer.decode(tok):8s} | {score.numpy():.3f} | {np.exp(score.numpy()):.2%}")
| 262 | the | -1.414 | 24.33%
| 1110 | day | -2.609 | 7.36%
| 618 | when | -2.010 | 13.40%
| 356 | we | -1.859 | 15.58%
| 460 | can | -2.508 | 8.14%
>>> # Example 2: Reconstruct the sequence scores from Beam Search
>>> outputs = model.generate(
... **inputs,
... max_new_tokens=5,
... num_beams=4,
... num_return_sequences=4,
... return_dict_in_generate=True,
... output_scores=True,
... )
>>> transition_scores = model.compute_transition_scores(
... outputs.sequences, outputs.scores, outputs.beam_indices, normalize_logits=False
... )
>>> # If you sum the generated tokens' scores and apply the length penalty, you'll get the sequence scores.
>>> # Tip: recomputing the scores is only guaranteed to match with `normalize_logits=False`. Depending on the
>>> # use case, you might want to recompute it with `normalize_logits=True`.
>>> output_length = input_length + np.sum(transition_scores.numpy() < 0, axis=1)
>>> length_penalty = model.generation_config.length_penalty
>>> reconstructed_scores = transition_scores.sum(axis=1) / (output_length**length_penalty)
>>> print(np.allclose(outputs.sequences_scores, reconstructed_scores))
True
```"""
# 1. In absence of `beam_indices`, we can assume that we come from e.g. greedy search, which is equivalent
# to a beam search approach were the first (and only) beam is always selected
if beam_indices is None:
beam_indices = torch.arange(scores[0].shape[0]).view(-1, 1).to(sequences.device)
beam_indices = beam_indices.expand(-1, len(scores))
# 2. reshape scores as [batch_size*vocab_size, # generation steps] with # generation steps being
# seq_len - input_length
scores = torch.stack(scores).reshape(len(scores), -1).transpose(0, 1)
# 3. Optionally normalize the logits (across the vocab dimension)
if normalize_logits:
scores = scores.reshape(-1, self.config.vocab_size, scores.shape[-1])
scores = torch.nn.functional.log_softmax(scores, dim=1)
scores = scores.reshape(-1, scores.shape[-1])
# 4. cut beam_indices to longest beam length
beam_indices_mask = beam_indices < 0
max_beam_length = (1 - beam_indices_mask.long()).sum(-1).max()
beam_indices = beam_indices.clone()[:, :max_beam_length]
beam_indices_mask = beam_indices_mask[:, :max_beam_length]
# 5. Set indices of beams that finished early to 0; such indices will be masked correctly afterwards
beam_indices[beam_indices_mask] = 0
# 6. multiply beam_indices with vocab size to gather correctly from scores
beam_sequence_indices = beam_indices * self.config.vocab_size
# 7. Define which indices contributed to scores
cut_idx = sequences.shape[-1] - max_beam_length
indices = sequences[:, cut_idx:] + beam_sequence_indices
# 8. Compute scores
transition_scores = scores.gather(0, indices)
# 9. Mask out transition_scores of beams that stopped early
transition_scores[beam_indices_mask] = 0
return transition_scores
def _validate_model_class(self):
"""
Confirms that the model class is compatible with generation. If not, raises an exception that points to the
right class to use.
"""
if not self.can_generate():
generate_compatible_mappings = [
MODEL_FOR_CAUSAL_LM_MAPPING,
MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING,
MODEL_FOR_VISION_2_SEQ_MAPPING,
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING,
]
generate_compatible_classes = set()
for model_mapping in generate_compatible_mappings:
supported_models = model_mapping.get(type(self.config), default=None)
if supported_models is not None:
generate_compatible_classes.add(supported_models.__name__)
exception_message = (
f"The current model class ({self.__class__.__name__}) is not compatible with `.generate()`, as "
"it doesn't have a language model head."
)
if generate_compatible_classes:
exception_message += f" Please use one of the following classes instead: {generate_compatible_classes}"
raise TypeError(exception_message)
def _validate_model_kwargs(self, model_kwargs: Dict[str, Any]):
"""Validates model kwargs for generation. Generate argument typos will also be caught here."""
# Excludes arguments that are handled before calling any model function
if self.config.is_encoder_decoder:
for key in ["decoder_input_ids"]:
model_kwargs.pop(key, None)
unused_model_args = []
model_args = set(inspect.signature(self.prepare_inputs_for_generation).parameters)
# `kwargs`/`model_kwargs` is often used to handle optional forward pass inputs like `attention_mask`. If
# `prepare_inputs_for_generation` doesn't accept them, then a stricter check can be made ;)
if "kwargs" in model_args or "model_kwargs" in model_args:
model_args |= set(inspect.signature(self.forward).parameters)
for key, value in model_kwargs.items():
if value is not None and key not in model_args:
unused_model_args.append(key)
if unused_model_args:
raise ValueError(
f"The following `model_kwargs` are not used by the model: {unused_model_args} (note: typos in the"
" generate arguments will also show up in this list)"
)
@torch.no_grad()
def generate(
self,
inputs: Optional[torch.Tensor] = None,
generation_config: Optional[GenerationConfig] = None,
logits_processor: Optional[LogitsProcessorList] = None,
stopping_criteria: Optional[StoppingCriteriaList] = None,
prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], List[int]]] = None,
synced_gpus: Optional[bool] = False,
**kwargs,
) -> Union[GenerateOutput, torch.LongTensor]:
r"""
Generates sequences of token ids for models with a language modeling head.
<Tip warning={true}>
Most generation-controlling parameters are set in `generation_config` which, if not passed, will be set to the
model's default generation configuration. You can override any `generation_config` by passing the corresponding
parameters to generate(), e.g. `.generate(inputs, num_beams=4, do_sample=True)`.
For an overview of generation strategies and code examples, check out the [following
guide](./generation_strategies).
</Tip>
Parameters:
inputs (`torch.Tensor` of varying shape depending on the modality, *optional*):
The sequence used as a prompt for the generation or as model inputs to the encoder. If `None` the
method initializes it with `bos_token_id` and a batch size of 1. For decoder-only models `inputs`
should of in the format of `input_ids`. For encoder-decoder models *inputs* can represent any of
`input_ids`, `input_values`, `input_features`, or `pixel_values`.
generation_config (`~generation.GenerationConfig`, *optional*):
The generation configuration to be used as base parametrization for the generation call. `**kwargs`
passed to generate matching the attributes of `generation_config` will override them. If
`generation_config` is not provided, the default will be used, which had the following loading
priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model
configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s
default values, whose documentation should be checked to parameterize generation.
logits_processor (`LogitsProcessorList`, *optional*):
Custom logits processors that complement the default logits processors built from arguments and
generation config. If a logit processor is passed that is already created with the arguments or a
generation config an error is thrown. This feature is intended for advanced users.
stopping_criteria (`StoppingCriteriaList`, *optional*):
Custom stopping criteria that complement the default stopping criteria built from arguments and a
generation config. If a stopping criteria is passed that is already created with the arguments or a
generation config an error is thrown. This feature is intended for advanced users.
prefix_allowed_tokens_fn (`Callable[[int, torch.Tensor], List[int]]`, *optional*):
If provided, this function constraints the beam search to allowed tokens only at each step. If not
provided no constraint is applied. This function takes 2 arguments: the batch ID `batch_id` and
`input_ids`. It has to return a list with the allowed tokens for the next generation step conditioned
on the batch ID `batch_id` and the previously generated tokens `inputs_ids`. This argument is useful
for constrained generation conditioned on the prefix, as described in [Autoregressive Entity
Retrieval](https://arxiv.org/abs/2010.00904).
synced_gpus (`bool`, *optional*, defaults to `False`):
Whether to continue running the while loop until max_length (needed for ZeRO stage 3)
kwargs:
Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be
forwarded to the `forward` function of the model. If the model is an encoder-decoder model, encoder
specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder_*.
Return:
[`~utils.ModelOutput`] or `torch.LongTensor`: A [`~utils.ModelOutput`] (if `return_dict_in_generate=True`
or when `config.return_dict_in_generate=True`) or a `torch.FloatTensor`.
If the model is *not* an encoder-decoder model (`model.config.is_encoder_decoder=False`), the possible
[`~utils.ModelOutput`] types are:
- [`~generation.GreedySearchDecoderOnlyOutput`],
- [`~generation.SampleDecoderOnlyOutput`],
- [`~generation.BeamSearchDecoderOnlyOutput`],
- [`~generation.BeamSampleDecoderOnlyOutput`]
If the model is an encoder-decoder model (`model.config.is_encoder_decoder=True`), the possible
[`~utils.ModelOutput`] types are:
- [`~generation.GreedySearchEncoderDecoderOutput`],
- [`~generation.SampleEncoderDecoderOutput`],
- [`~generation.BeamSearchEncoderDecoderOutput`],
- [`~generation.BeamSampleEncoderDecoderOutput`]
"""
# 1. Handle `generation_config` and kwargs that might update it, and validate the `.generate()` call
self._validate_model_class()
# priority: `generation_config` argument > `model.generation_config` (the default generation config)
if generation_config is None:
# legacy: users may modify the model configuration to control generation -- update the generation config
# model attribute accordingly, if it was created from the model config
if self.generation_config._from_model_config:
new_generation_config = GenerationConfig.from_model_config(self.config)
if new_generation_config != self.generation_config:
warnings.warn(
"You have modified the pretrained model configuration to control generation. This is a"
" deprecated strategy to control generation and will be removed soon, in a future version."
" Please use a generation configuration file (see"
" https://huggingface.co/docs/transformers/main_classes/text_generation)"
)
self.generation_config = new_generation_config
generation_config = self.generation_config
generation_config = copy.deepcopy(generation_config)
model_kwargs = generation_config.update(**kwargs) # All unused kwargs must be model kwargs
generation_config.validate()
self._validate_model_kwargs(model_kwargs.copy())
# 2. Set generation parameters if not already defined
logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()
stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()
if generation_config.pad_token_id is None and generation_config.eos_token_id is not None:
if model_kwargs.get("attention_mask", None) is None:
logger.warning(
"The attention mask and the pad token id were not set. As a consequence, you may observe "
"unexpected behavior. Please pass your input's `attention_mask` to obtain reliable results."
)
eos_token_id = generation_config.eos_token_id
if isinstance(eos_token_id, list):
eos_token_id = eos_token_id[0]
logger.warning(f"Setting `pad_token_id` to `eos_token_id`:{eos_token_id} for open-end generation.")
generation_config.pad_token_id = eos_token_id
# 3. Define model inputs
# inputs_tensor has to be defined
# model_input_name is defined if model-specific keyword input is passed
# otherwise model_input_name is None
# all model-specific keyword inputs are removed from `model_kwargs`
inputs_tensor, model_input_name, model_kwargs = self._prepare_model_inputs(
inputs, generation_config.bos_token_id, model_kwargs
)
batch_size = inputs_tensor.shape[0]
# 4. Define other model kwargs
model_kwargs["output_attentions"] = generation_config.output_attentions
model_kwargs["output_hidden_states"] = generation_config.output_hidden_states
model_kwargs["use_cache"] = generation_config.use_cache
accepts_attention_mask = "attention_mask" in set(inspect.signature(self.forward).parameters.keys())
requires_attention_mask = "encoder_outputs" not in model_kwargs
if model_kwargs.get("attention_mask", None) is None and requires_attention_mask and accepts_attention_mask:
model_kwargs["attention_mask"] = self._prepare_attention_mask_for_generation(
inputs_tensor, generation_config.pad_token_id, generation_config.eos_token_id
)
# decoder-only models should use left-padding for generation
if not self.config.is_encoder_decoder:
if (
generation_config.pad_token_id is not None
and torch.sum(inputs_tensor[:, -1] == generation_config.pad_token_id) > 0
):
logger.warning(
"A decoder-only architecture is being used, but right-padding was detected! For correct "
"generation results, please set `padding_side='left'` when initializing the tokenizer."
)
if self.config.is_encoder_decoder and "encoder_outputs" not in model_kwargs:
# if model is encoder decoder encoder_outputs are created
# and added to `model_kwargs`
model_kwargs = self._prepare_encoder_decoder_kwargs_for_generation(
inputs_tensor, model_kwargs, model_input_name
)
# 5. Prepare `input_ids` which will be used for auto-regressive generation
if self.config.is_encoder_decoder:
input_ids = self._prepare_decoder_input_ids_for_generation(
batch_size,
decoder_start_token_id=generation_config.decoder_start_token_id,
bos_token_id=generation_config.bos_token_id,
model_kwargs=model_kwargs,
device=inputs_tensor.device,
)
else:
input_ids = inputs_tensor if model_input_name == "input_ids" else model_kwargs.pop("input_ids")
# 6. Prepare `max_length` depending on other stopping criteria.
input_ids_seq_length = input_ids.shape[-1]
has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None
if has_default_max_length and generation_config.max_new_tokens is None:
warnings.warn(
f"Using `max_length`'s default ({generation_config.max_length}) to control the generation length. "
"This behaviour is deprecated and will be removed from the config in v5 of Transformers -- we"
" recommend using `max_new_tokens` to control the maximum length of the generation.",
UserWarning,
)
elif generation_config.max_new_tokens is not None:
generation_config.max_length = generation_config.max_new_tokens + input_ids_seq_length
if not has_default_max_length:
logger.warn(
f"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(="
f"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. "
"Please refer to the documentation for more information. "
"(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)",
UserWarning,
)
if generation_config.min_length is not None and generation_config.min_length > generation_config.max_length:
raise ValueError(
f"Unfeasible length constraints: the minimum length ({generation_config.min_length}) is larger than"
f" the maximum length ({generation_config.max_length})"
)
if input_ids_seq_length >= generation_config.max_length:
input_ids_string = "decoder_input_ids" if self.config.is_encoder_decoder else "input_ids"
logger.warning(
f"Input length of {input_ids_string} is {input_ids_seq_length}, but `max_length` is set to"
f" {generation_config.max_length}. This can lead to unexpected behavior. You should consider"
" increasing `max_new_tokens`."
)
# 7. determine generation mode
is_constraint_gen_mode = (
generation_config.constraints is not None or generation_config.force_words_ids is not None
)
is_contrastive_search_gen_mode = (
generation_config.top_k is not None
and generation_config.top_k > 1
and generation_config.do_sample is False
and generation_config.penalty_alpha is not None
and generation_config.penalty_alpha > 0
)
is_greedy_gen_mode = (
(generation_config.num_beams == 1)
and (generation_config.num_beam_groups == 1)
and generation_config.do_sample is False
and not is_constraint_gen_mode
and not is_contrastive_search_gen_mode
)
is_sample_gen_mode = (
(generation_config.num_beams == 1)
and (generation_config.num_beam_groups == 1)
and generation_config.do_sample is True
and not is_constraint_gen_mode
and not is_contrastive_search_gen_mode
)
is_beam_gen_mode = (
(generation_config.num_beams > 1)
and (generation_config.num_beam_groups == 1)
and generation_config.do_sample is False
and not is_constraint_gen_mode
and not is_contrastive_search_gen_mode
)
is_beam_sample_gen_mode = (
(generation_config.num_beams > 1)
and (generation_config.num_beam_groups == 1)
and generation_config.do_sample is True
and not is_constraint_gen_mode
and not is_contrastive_search_gen_mode
)
is_group_beam_gen_mode = (
(generation_config.num_beams > 1)
and (generation_config.num_beam_groups > 1)
and not is_constraint_gen_mode
and not is_contrastive_search_gen_mode
)
if generation_config.num_beam_groups > generation_config.num_beams:
raise ValueError("`num_beam_groups` has to be smaller or equal to `num_beams`")
if is_group_beam_gen_mode and generation_config.do_sample is True:
raise ValueError(
"Diverse beam search cannot be used in sampling mode. Make sure that `do_sample` is set to `False`."
)
if self.device.type != input_ids.device.type:
warnings.warn(
"You are calling .generate() with the `input_ids` being on a device type different"
f" than your model's device. `input_ids` is on {input_ids.device.type}, whereas the model"
f" is on {self.device.type}. You may experience unexpected behaviors or slower generation."
" Please make sure that you have put `input_ids` to the"
f" correct device by calling for example input_ids = input_ids.to('{self.device.type}') before"
" running `.generate()`.",
UserWarning,
)
# 8. prepare distribution pre_processing samplers
logits_processor = self._get_logits_processor(
generation_config=generation_config,
input_ids_seq_length=input_ids_seq_length,
encoder_input_ids=inputs_tensor,
prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,
logits_processor=logits_processor,
)
# 9. prepare stopping criteria
stopping_criteria = self._get_stopping_criteria(
generation_config=generation_config, stopping_criteria=stopping_criteria
)
# 10. go into different generation modes
if is_greedy_gen_mode:
if generation_config.num_return_sequences > 1:
raise ValueError(
f"num_return_sequences has to be 1, but is {generation_config.num_return_sequences} when doing"
" greedy search."
)
# 11. run greedy search
return self.greedy_search(
input_ids,
logits_processor=logits_processor,
stopping_criteria=stopping_criteria,
pad_token_id=generation_config.pad_token_id,
eos_token_id=generation_config.eos_token_id,
output_scores=generation_config.output_scores,
return_dict_in_generate=generation_config.return_dict_in_generate,
synced_gpus=synced_gpus,
**model_kwargs,
)
elif is_contrastive_search_gen_mode:
if generation_config.num_return_sequences > 1:
raise ValueError(
f"num_return_sequences has to be 1, but is {generation_config.num_return_sequences} when doing"
" contrastive search."
)
return self.contrastive_search(
input_ids,
top_k=generation_config.top_k,
penalty_alpha=generation_config.penalty_alpha,
logits_processor=logits_processor,
stopping_criteria=stopping_criteria,
pad_token_id=generation_config.pad_token_id,
eos_token_id=generation_config.eos_token_id,
output_scores=generation_config.output_scores,
return_dict_in_generate=generation_config.return_dict_in_generate,
synced_gpus=synced_gpus,
**model_kwargs,
)
elif is_sample_gen_mode:
# 11. prepare logits warper
logits_warper = self._get_logits_warper(generation_config)
# 12. expand input_ids with `num_return_sequences` additional sequences per batch
input_ids, model_kwargs = self._expand_inputs_for_generation(
input_ids=input_ids,
expand_size=generation_config.num_return_sequences,
is_encoder_decoder=self.config.is_encoder_decoder,
**model_kwargs,
)
# 13. run sample
return self.sample(
input_ids,
logits_processor=logits_processor,
logits_warper=logits_warper,
stopping_criteria=stopping_criteria,
pad_token_id=generation_config.pad_token_id,
eos_token_id=generation_config.eos_token_id,
output_scores=generation_config.output_scores,
return_dict_in_generate=generation_config.return_dict_in_generate,
synced_gpus=synced_gpus,
**model_kwargs,
)
elif is_beam_gen_mode:
if generation_config.num_return_sequences > generation_config.num_beams:
raise ValueError("`num_return_sequences` has to be smaller or equal to `num_beams`.")
if stopping_criteria.max_length is None:
raise ValueError("`max_length` needs to be a stopping_criteria for now.")
# 11. prepare beam search scorer
beam_scorer = BeamSearchScorer(
batch_size=batch_size,
num_beams=generation_config.num_beams,
device=inputs_tensor.device,
length_penalty=generation_config.length_penalty,
do_early_stopping=generation_config.early_stopping,
num_beam_hyps_to_keep=generation_config.num_return_sequences,
max_length=generation_config.max_length,
)
# 12. interleave input_ids with `num_beams` additional sequences per batch
input_ids, model_kwargs = self._expand_inputs_for_generation(
input_ids=input_ids,
expand_size=generation_config.num_beams,
is_encoder_decoder=self.config.is_encoder_decoder,
**model_kwargs,
)
# 13. run beam search
return self.beam_search(
input_ids,
beam_scorer,
logits_processor=logits_processor,
stopping_criteria=stopping_criteria,
pad_token_id=generation_config.pad_token_id,
eos_token_id=generation_config.eos_token_id,
output_scores=generation_config.output_scores,
return_dict_in_generate=generation_config.return_dict_in_generate,
synced_gpus=synced_gpus,
**model_kwargs,
)
elif is_beam_sample_gen_mode:
# 11. prepare logits warper
logits_warper = self._get_logits_warper(generation_config)
if stopping_criteria.max_length is None:
raise ValueError("`max_length` needs to be a stopping_criteria for now.")
# 12. prepare beam search scorer
beam_scorer = BeamSearchScorer(
batch_size=batch_size * generation_config.num_return_sequences,
num_beams=generation_config.num_beams,
device=inputs_tensor.device,
length_penalty=generation_config.length_penalty,
do_early_stopping=generation_config.early_stopping,
max_length=generation_config.max_length,
)
# 13. interleave input_ids with `num_beams` additional sequences per batch
input_ids, model_kwargs = self._expand_inputs_for_generation(
input_ids=input_ids,
expand_size=generation_config.num_beams * generation_config.num_return_sequences,
is_encoder_decoder=self.config.is_encoder_decoder,
**model_kwargs,
)
# 14. run beam sample
return self.beam_sample(
input_ids,
beam_scorer,
logits_processor=logits_processor,
logits_warper=logits_warper,
stopping_criteria=stopping_criteria,
pad_token_id=generation_config.pad_token_id,
eos_token_id=generation_config.eos_token_id,
output_scores=generation_config.output_scores,
return_dict_in_generate=generation_config.return_dict_in_generate,
synced_gpus=synced_gpus,
**model_kwargs,
)
elif is_group_beam_gen_mode:
if generation_config.num_return_sequences > generation_config.num_beams:
raise ValueError("`num_return_sequences` has to be smaller or equal to `num_beams`.")
if generation_config.num_beams % generation_config.num_beam_groups != 0:
raise ValueError("`num_beams` should be divisible by `num_beam_groups` for group beam search.")
if stopping_criteria.max_length is None:
raise ValueError("`max_length` needs to be a stopping_criteria for now.")
has_default_typical_p = kwargs.get("typical_p") is None and generation_config.typical_p == 1.0
if not has_default_typical_p:
raise ValueError("Decoder argument `typical_p` is not supported with beam groups.")
# 11. prepare beam search scorer
beam_scorer = BeamSearchScorer(
batch_size=batch_size,
num_beams=generation_config.num_beams,
device=inputs_tensor.device,
length_penalty=generation_config.length_penalty,
do_early_stopping=generation_config.early_stopping,
num_beam_hyps_to_keep=generation_config.num_return_sequences,
num_beam_groups=generation_config.num_beam_groups,
max_length=generation_config.max_length,
)
# 12. interleave input_ids with `num_beams` additional sequences per batch
input_ids, model_kwargs = self._expand_inputs_for_generation(
input_ids=input_ids,
expand_size=generation_config.num_beams,
is_encoder_decoder=self.config.is_encoder_decoder,
**model_kwargs,
)
# 13. run beam search
return self.group_beam_search(
input_ids,
beam_scorer,
logits_processor=logits_processor,
stopping_criteria=stopping_criteria,
pad_token_id=generation_config.pad_token_id,
eos_token_id=generation_config.eos_token_id,
output_scores=generation_config.output_scores,
return_dict_in_generate=generation_config.return_dict_in_generate,
synced_gpus=synced_gpus,
**model_kwargs,
)
elif is_constraint_gen_mode:
if generation_config.num_return_sequences > generation_config.num_beams:
raise ValueError("`num_return_sequences` has to be smaller or equal to `num_beams`.")
if stopping_criteria.max_length is None:
raise ValueError("`max_length` needs to be a stopping_criteria for now.")
if generation_config.num_beams <= 1:
raise ValueError("`num_beams` needs to be greater than 1 for constrained generation.")
if generation_config.do_sample:
raise ValueError("`do_sample` needs to be false for constrained generation.")
if generation_config.num_beam_groups is not None and generation_config.num_beam_groups > 1:
raise ValueError("`num_beam_groups` not supported yet for constrained generation.")
final_constraints = []
if generation_config.constraints is not None:
final_constraints = generation_config.constraints
if generation_config.force_words_ids is not None:
def typeerror():
raise ValueError(
"`force_words_ids` has to either be a `List[List[List[int]]]` or `List[List[int]]`"
f"of positive integers, but is {generation_config.force_words_ids}."
)
if (
not isinstance(generation_config.force_words_ids, list)
or len(generation_config.force_words_ids) == 0
):
typeerror()
for word_ids in generation_config.force_words_ids:
if isinstance(word_ids[0], list):
if not isinstance(word_ids, list) or len(word_ids) == 0:
typeerror()
if any(not isinstance(token_ids, list) for token_ids in word_ids):
typeerror()
if any(
any((not isinstance(token_id, int) or token_id < 0) for token_id in token_ids)
for token_ids in word_ids
):
typeerror()
constraint = DisjunctiveConstraint(word_ids)
else:
if not isinstance(word_ids, list) or len(word_ids) == 0:
typeerror()
if any((not isinstance(token_id, int) or token_id < 0) for token_id in word_ids):
typeerror()
constraint = PhrasalConstraint(word_ids)
final_constraints.append(constraint)
# 11. prepare beam search scorer
constrained_beam_scorer = ConstrainedBeamSearchScorer(
constraints=final_constraints,
batch_size=batch_size,
num_beams=generation_config.num_beams,
device=inputs_tensor.device,
length_penalty=generation_config.length_penalty,
do_early_stopping=generation_config.early_stopping,
num_beam_hyps_to_keep=generation_config.num_return_sequences,
max_length=generation_config.max_length,
)
# 12. interleave input_ids with `num_beams` additional sequences per batch
input_ids, model_kwargs = self._expand_inputs_for_generation(
input_ids=input_ids,
expand_size=generation_config.num_beams,
is_encoder_decoder=self.config.is_encoder_decoder,
**model_kwargs,
)
# 13. run beam search
return self.constrained_beam_search(
input_ids,
constrained_beam_scorer=constrained_beam_scorer,
logits_processor=logits_processor,
stopping_criteria=stopping_criteria,
pad_token_id=generation_config.pad_token_id,
eos_token_id=generation_config.eos_token_id,
output_scores=generation_config.output_scores,
return_dict_in_generate=generation_config.return_dict_in_generate,
synced_gpus=synced_gpus,
**model_kwargs,
)
@torch.no_grad()
def contrastive_search(
self,
input_ids: torch.LongTensor,
top_k: Optional[int] = 1,
penalty_alpha: Optional[float] = 0,
logits_processor: Optional[LogitsProcessorList] = None,
logits_warper: Optional[LogitsProcessorList] = None,
stopping_criteria: Optional[StoppingCriteriaList] = None,
pad_token_id: Optional[int] = None,
eos_token_id: Optional[Union[int, List[int]]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
output_scores: Optional[bool] = None,
return_dict_in_generate: Optional[bool] = None,
synced_gpus: Optional[bool] = False,
**model_kwargs,
) -> Union[ContrastiveSearchOutput, torch.LongTensor]:
r"""
Generates sequences of token ids for models with a language modeling head using **contrastive search** and can
be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models.
<Tip warning={true}>
In most cases, you do not need to call [`~generation.GenerationMixin.contrastive_search`] directly. Use
generate() instead. For an overview of generation strategies and code examples, check the [following
guide](./generation_strategies).
</Tip>
Parameters:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
The sequence used as a prompt for the generation.
top_k (`int`, *optional*, defaults to 1):
The size of the candidate set that is used to re-rank for contrastive search
penalty_alpha (`float`, *optional*, defaults to 0):
The degeneration penalty for contrastive search; activate when it is larger than 0
logits_processor (`LogitsProcessorList`, *optional*):
An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`]
used to modify the prediction scores of the language modeling head applied at each generation step.
logits_warper (`LogitsProcessorList`, *optional*):
An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsWarper`] used
to warp the prediction score distribution of the language modeling head applied before multinomial
sampling at each generation step.
stopping_criteria (`StoppingCriteriaList`, *optional*):
An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`]
used to tell if the generation loop should stop.
pad_token_id (`int`, *optional*):
The id of the *padding* token.
eos_token_id (`Union[int, List[int]]`, *optional*):
The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
output_attentions (`bool`, *optional*, defaults to `False`):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more details.
output_hidden_states (`bool`, *optional*, defaults to `False`):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more details.
output_scores (`bool`, *optional*, defaults to `False`):
Whether or not to return the prediction scores. See `scores` under returned tensors for more details.
return_dict_in_generate (`bool`, *optional*, defaults to `False`):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
synced_gpus (`bool`, *optional*, defaults to `False`):
Whether to continue running the while loop until max_length (needed for ZeRO stage 3)
model_kwargs:
Additional model specific keyword arguments will be forwarded to the `forward` function of the model.
If model is an encoder-decoder model the kwargs should include `encoder_outputs`.
Return:
[`~generation.ContrastiveSearchDecoderOnlyOutput`], [`~generation.ContrastiveSearchEncoderDecoderOutput`]
or `torch.LongTensor`: A `torch.LongTensor` containing the generated tokens (default behaviour) or a
[`~generation.ContrastiveSearchDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and
`return_dict_in_generate=True` or a [`~generation.ContrastiveSearchEncoderDecoderOutput`] if
`model.config.is_encoder_decoder=True`.
Examples:
```python
>>> from transformers import (
... AutoTokenizer,
... AutoModelForCausalLM,
... StoppingCriteriaList,
... MaxLengthCriteria,
... )
>>> tokenizer = AutoTokenizer.from_pretrained("facebook/opt-125m")
>>> model = AutoModelForCausalLM.from_pretrained("facebook/opt-125m")
>>> # set pad_token_id to eos_token_id because OPT does not have a PAD token
>>> model.config.pad_token_id = model.config.eos_token_id
>>> input_prompt = "DeepMind Company is"
>>> input_ids = tokenizer(input_prompt, return_tensors="pt")
>>> stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=64)])
>>> outputs = model.contrastive_search(
... **input_ids, penalty_alpha=0.6, top_k=4, stopping_criteria=stopping_criteria
... )
>>> tokenizer.batch_decode(outputs, skip_special_tokens=True)
['DeepMind Company is a company that focuses on the development and commercialization of artificial intelligence (AI). DeepMind’s mission is to help people understand and solve problems that are difficult to solve in the world today.\n\nIn this post, we talk about the benefits of deep learning in business and how it']
```"""
# init values
logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()
logits_warper = logits_warper if logits_warper is not None else LogitsProcessorList()
stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()
pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id
eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id
if isinstance(eos_token_id, int):
eos_token_id = [eos_token_id]
eos_token_id_tensor = torch.tensor(eos_token_id).to(input_ids.device) if eos_token_id is not None else None
output_scores = output_scores if output_scores is not None else self.generation_config.output_scores
output_attentions = (
output_attentions if output_attentions is not None else self.generation_config.output_attentions
)
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states
)
return_dict_in_generate = (
return_dict_in_generate
if return_dict_in_generate is not None
else self.generation_config.return_dict_in_generate
)
# init attention / hidden states / scores tuples
scores = () if (return_dict_in_generate and output_scores) else None
decoder_attentions = () if (return_dict_in_generate and output_attentions) else None
cross_attentions = () if (return_dict_in_generate and output_attentions) else None
decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None
# if model is an encoder-decoder, retrieve encoder attention weights and hidden states
if return_dict_in_generate and self.config.is_encoder_decoder:
encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None
encoder_hidden_states = (
model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None
)
# keep track of which sequences are already finished
unfinished_sequences = input_ids.new(input_ids.shape[0]).fill_(1)
this_peer_finished = False # used by synced_gpus only
batch_size = input_ids.shape[0]
while True:
if synced_gpus:
# Under synced_gpus the `forward` call must continue until all gpus complete their sequence.
# The following logic allows an early break if all peers finished generating their sequence
this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(input_ids.device)
# send 0.0 if we finished, 1.0 otherwise
dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM)
# did all peers finish? the reduced sum will be 0.0 then
if this_peer_finished_flag.item() == 0.0:
break
# if the first step in the loop, encode all the prefix and obtain: (1) past_key_values;
# (2) last_hidden_states; (3) logit_for_next_step; (4) update model kwargs for the next step
if model_kwargs.get("past_key_values") is None:
# prepare inputs
model_kwargs["use_cache"] = True
model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)
# encode the given prefix and prepare model inputs; encoder-decoder model process the prefix and save
# the `encoder_outputs`
outputs = self(
**model_inputs, return_dict=True, output_hidden_states=True, output_attentions=output_attentions
)
# last decoder hidden states will be used to compute the degeneration penalty (cosine similarity with
# previous tokens)
if self.config.is_encoder_decoder:
last_hidden_states = outputs.decoder_hidden_states[-1]
else:
last_hidden_states = outputs.hidden_states[-1]
# next logit for contrastive search to select top-k candidate tokens
logit_for_next_step = outputs.logits[:, -1, :]
model_kwargs = self._update_model_kwargs_for_generation(
outputs,
model_kwargs,
is_encoder_decoder=self.config.is_encoder_decoder,
standardize_cache_format=True,
)
# Expands model inputs top_k times, for batched forward passes (akin to beam search).
_, model_kwargs = self._expand_inputs_for_generation(
expand_size=top_k, is_encoder_decoder=self.config.is_encoder_decoder, **model_kwargs
)
past_key_values = model_kwargs.get("past_key_values")
if past_key_values is None:
raise ValueError(
f"{self.__class__.__name__} does not support caching and therefore **can't** be used "
"for contrastive search."
)
elif (
not isinstance(past_key_values[0], (tuple, torch.Tensor))
or past_key_values[0][0].shape[0] != batch_size
):
raise ValueError(
f"{self.__class__.__name__} does not have a standard cache format and therefore **can't** be "
"used for contrastive search without further modifications."
)
# contrastive_search main logic start:
# contrastive search decoding consists of two steps: (1) candidate tokens recall; (2) candidate re-rank by
# degeneration penalty
logit_for_next_step = logits_processor(input_ids, logit_for_next_step)
logit_for_next_step = logits_warper(input_ids, logit_for_next_step)
next_probs = nn.functional.softmax(logit_for_next_step, dim=-1)
top_k_probs, top_k_ids = torch.topk(next_probs, dim=-1, k=top_k)
# Store scores, attentions and hidden_states when required
if return_dict_in_generate:
if output_scores:
scores += (logit_for_next_step,)
if output_attentions:
decoder_attentions += (
(outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,)
)
if self.config.is_encoder_decoder:
cross_attentions += (outputs.cross_attentions,)
if output_hidden_states:
decoder_hidden_states += (
(outputs.decoder_hidden_states,)
if self.config.is_encoder_decoder
else (outputs.hidden_states,)
)
# Replicates the new past_key_values to match the `top_k` candidates
new_key_values = []
for layer in model_kwargs["past_key_values"]:
items = []
# item is either the key or the value matrix
for item in layer:
items.append(item.repeat_interleave(top_k, dim=0))
new_key_values.append(items)
model_kwargs["past_key_values"] = new_key_values
# compute the candidate tokens by the language model and collects their hidden_states
next_model_inputs = self.prepare_inputs_for_generation(top_k_ids.view(-1, 1), **model_kwargs)
outputs = self(
**next_model_inputs, return_dict=True, output_hidden_states=True, output_attentions=output_attentions
)
next_past_key_values = self._extract_past_from_model_output(outputs, standardize_cache_format=True)
logits = outputs.logits[:, -1, :]
# name is different for encoder-decoder and decoder-only models
if self.config.is_encoder_decoder:
next_hidden = outputs.decoder_hidden_states[-1]
full_hidden_states = outputs.decoder_hidden_states
else:
next_hidden = outputs.hidden_states[-1]
full_hidden_states = outputs.hidden_states
context_hidden = last_hidden_states.repeat_interleave(top_k, dim=0)
# compute the degeneration penalty and re-rank the candidates based on the degeneration penalty and the
# model confidence
selected_idx = _ranking_fast(context_hidden, next_hidden, top_k_probs, penalty_alpha, top_k)
# prepare for the next step: (1) next token_id; (2) past_key_values; (3) last_hidden_states for computing
# the degeneration penalty; (4) logits for selecting next top-k candidates; (5) selected tokens scores
# (model confidence minus degeneration penalty); (6) decoder hidden_states
next_tokens = top_k_ids[range(len(top_k_ids)), selected_idx]
next_hidden = torch.stack(torch.split(next_hidden.squeeze(dim=1), top_k))
next_hidden = next_hidden[range(batch_size), selected_idx, :]
last_hidden_states = torch.cat([last_hidden_states, next_hidden.unsqueeze(1)], dim=1)
next_decoder_hidden_states = ()
for layer in full_hidden_states:
layer = torch.stack(torch.split(layer, top_k))[range(batch_size), selected_idx, :]
next_decoder_hidden_states += (layer,)
# select the past_key_value
new_key_values = ()
for layer in next_past_key_values:
items = ()
# item is either the key or the value matrix
for item in layer:
item = torch.stack(torch.split(item, top_k, dim=0)) # [B, K, num_head, seq_len, esz]
item = item[range(batch_size), selected_idx, ...] # [B, num_head, seq_len, esz]
items += (item,)
new_key_values += (items,)
next_past_key_values = new_key_values
logit_for_next_step = torch.stack(torch.split(logits, top_k))[range(batch_size), selected_idx, :]
# Rebuilds the relevant parts of the model output for the selected token, for use in the next iteration
if self.config.is_encoder_decoder:
next_step_cross_attentions = ()
next_step_decoder_attentions = ()
if output_attentions:
for layer in outputs.cross_attentions:
layer = torch.stack(torch.split(layer, top_k, dim=0))[range(batch_size), selected_idx, ...]
next_step_cross_attentions += (layer,)
for layer in outputs.decoder_attentions:
layer = torch.stack(torch.split(layer, top_k, dim=0))[range(batch_size), selected_idx, ...]
next_step_decoder_attentions += (layer,)
outputs = Seq2SeqLMOutput(
past_key_values=next_past_key_values,
decoder_hidden_states=next_decoder_hidden_states,
decoder_attentions=next_step_decoder_attentions or None,
cross_attentions=next_step_cross_attentions or None,
)
else:
next_step_attentions = ()
if output_attentions:
for layer in outputs.attentions:
layer = torch.stack(torch.split(layer, top_k, dim=0))[range(batch_size), selected_idx, ...]
next_step_attentions += (layer,)
outputs = CausalLMOutputWithPast(
past_key_values=next_past_key_values,
hidden_states=next_decoder_hidden_states,
attentions=next_step_attentions or None,
)
# contrastive_search main logic end
if synced_gpus and this_peer_finished:
continue # don't waste resources running the code we don't need
# finished sentences should have their next token be a padding token
if eos_token_id is not None:
if pad_token_id is None:
raise ValueError("If `eos_token_id` is defined, make sure that `pad_token_id` is defined.")
next_tokens = next_tokens * unfinished_sequences + pad_token_id * (1 - unfinished_sequences)
# update generated ids, model inputs, and length for next step
input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1)
model_kwargs = self._update_model_kwargs_for_generation(
outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder
)
# if eos_token was found in one sentence, set sentence to finished
if eos_token_id_tensor is not None:
unfinished_sequences = unfinished_sequences.mul(
next_tokens.tile(eos_token_id_tensor.shape[0], 1).ne(eos_token_id_tensor.unsqueeze(1)).prod(dim=0)
)
# stop when each sentence is finished, or if we exceed the maximum length
if unfinished_sequences.max() == 0 or stopping_criteria(input_ids, scores):
if not synced_gpus:
break
else:
this_peer_finished = True
if return_dict_in_generate:
if self.config.is_encoder_decoder:
return ContrastiveSearchEncoderDecoderOutput(
sequences=input_ids,
scores=scores,
encoder_attentions=encoder_attentions,
encoder_hidden_states=encoder_hidden_states,
decoder_attentions=decoder_attentions,
cross_attentions=cross_attentions,
decoder_hidden_states=decoder_hidden_states,
)
else:
return ContrastiveSearchDecoderOnlyOutput(
sequences=input_ids,
scores=scores,
attentions=decoder_attentions,
hidden_states=decoder_hidden_states,
)
else:
return input_ids
def greedy_search(
self,
input_ids: torch.LongTensor,
logits_processor: Optional[LogitsProcessorList] = None,
stopping_criteria: Optional[StoppingCriteriaList] = None,
max_length: Optional[int] = None,
pad_token_id: Optional[int] = None,
eos_token_id: Optional[Union[int, List[int]]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
output_scores: Optional[bool] = None,
return_dict_in_generate: Optional[bool] = None,
synced_gpus: Optional[bool] = False,
**model_kwargs,
) -> Union[GreedySearchOutput, torch.LongTensor]:
r"""
Generates sequences of token ids for models with a language modeling head using **greedy decoding** and can be
used for text-decoder, text-to-text, speech-to-text, and vision-to-text models.
<Tip warning={true}>
In most cases, you do not need to call [`~generation.GenerationMixin.greedy_search`] directly. Use generate()
instead. For an overview of generation strategies and code examples, check the [following
guide](./generation_strategies).
</Tip>
Parameters:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
The sequence used as a prompt for the generation.
logits_processor (`LogitsProcessorList`, *optional*):
An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`]
used to modify the prediction scores of the language modeling head applied at each generation step.
stopping_criteria (`StoppingCriteriaList`, *optional*):
An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`]
used to tell if the generation loop should stop.
max_length (`int`, *optional*, defaults to 20):
**DEPRECATED**. Use `logits_processor` or `stopping_criteria` directly to cap the number of generated
tokens. The maximum length of the sequence to be generated.
pad_token_id (`int`, *optional*):
The id of the *padding* token.
eos_token_id (`Union[int, List[int]]`, *optional*):
The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
output_attentions (`bool`, *optional*, defaults to `False`):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more details.
output_hidden_states (`bool`, *optional*, defaults to `False`):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more details.
output_scores (`bool`, *optional*, defaults to `False`):
Whether or not to return the prediction scores. See `scores` under returned tensors for more details.
return_dict_in_generate (`bool`, *optional*, defaults to `False`):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
synced_gpus (`bool`, *optional*, defaults to `False`):
Whether to continue running the while loop until max_length (needed for ZeRO stage 3)
model_kwargs:
Additional model specific keyword arguments will be forwarded to the `forward` function of the model.
If model is an encoder-decoder model the kwargs should include `encoder_outputs`.
Return:
[`~generation.GreedySearchDecoderOnlyOutput`], [`~generation.GreedySearchEncoderDecoderOutput`] or
`torch.LongTensor`: A `torch.LongTensor` containing the generated tokens (default behaviour) or a
[`~generation.GreedySearchDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and
`return_dict_in_generate=True` or a [`~generation.GreedySearchEncoderDecoderOutput`] if
`model.config.is_encoder_decoder=True`.
Examples:
```python
>>> from transformers import (
... AutoTokenizer,
... AutoModelForCausalLM,
... LogitsProcessorList,
... MinLengthLogitsProcessor,
... StoppingCriteriaList,
... MaxLengthCriteria,
... )
>>> tokenizer = AutoTokenizer.from_pretrained("gpt2")
>>> model = AutoModelForCausalLM.from_pretrained("gpt2")
>>> # set pad_token_id to eos_token_id because GPT2 does not have a PAD token
>>> model.generation_config.pad_token_id = model.generation_config.eos_token_id
>>> input_prompt = "It might be possible to"
>>> input_ids = tokenizer(input_prompt, return_tensors="pt").input_ids
>>> # instantiate logits processors
>>> logits_processor = LogitsProcessorList(
... [
... MinLengthLogitsProcessor(10, eos_token_id=model.generation_config.eos_token_id),
... ]
... )
>>> stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=20)])
>>> outputs = model.greedy_search(
... input_ids, logits_processor=logits_processor, stopping_criteria=stopping_criteria
... )
>>> tokenizer.batch_decode(outputs, skip_special_tokens=True)
["It might be possible to get a better understanding of the nature of the problem, but it's not"]
```"""
# init values
logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()
stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()
if max_length is not None:
warnings.warn(
"`max_length` is deprecated in this function, use"
" `stopping_criteria=StoppingCriteriaList([MaxLengthCriteria(max_length=max_length)])` instead.",
UserWarning,
)
stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length)
pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id
eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id
if isinstance(eos_token_id, int):
eos_token_id = [eos_token_id]
eos_token_id_tensor = torch.tensor(eos_token_id).to(input_ids.device) if eos_token_id is not None else None
output_scores = output_scores if output_scores is not None else self.generation_config.output_scores
output_attentions = (
output_attentions if output_attentions is not None else self.generation_config.output_attentions
)
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states
)
return_dict_in_generate = (
return_dict_in_generate
if return_dict_in_generate is not None
else self.generation_config.return_dict_in_generate
)
# init attention / hidden states / scores tuples
scores = () if (return_dict_in_generate and output_scores) else None
decoder_attentions = () if (return_dict_in_generate and output_attentions) else None
cross_attentions = () if (return_dict_in_generate and output_attentions) else None
decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None
# if model is an encoder-decoder, retrieve encoder attention weights and hidden states
if return_dict_in_generate and self.config.is_encoder_decoder:
encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None
encoder_hidden_states = (
model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None
)
# keep track of which sequences are already finished
unfinished_sequences = input_ids.new(input_ids.shape[0]).fill_(1)
this_peer_finished = False # used by synced_gpus only
while True:
if synced_gpus:
# Under synced_gpus the `forward` call must continue until all gpus complete their sequence.
# The following logic allows an early break if all peers finished generating their sequence
this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(input_ids.device)
# send 0.0 if we finished, 1.0 otherwise
dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM)
# did all peers finish? the reduced sum will be 0.0 then
if this_peer_finished_flag.item() == 0.0:
break
# prepare model inputs
model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)
# forward pass to get next token
outputs = self(
**model_inputs,
return_dict=True,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
if synced_gpus and this_peer_finished:
continue # don't waste resources running the code we don't need
next_token_logits = outputs.logits[:, -1, :]
# pre-process distribution
next_tokens_scores = logits_processor(input_ids, next_token_logits)
# Store scores, attentions and hidden_states when required
if return_dict_in_generate:
if output_scores:
scores += (next_tokens_scores,)
if output_attentions:
decoder_attentions += (
(outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,)
)
if self.config.is_encoder_decoder:
cross_attentions += (outputs.cross_attentions,)
if output_hidden_states:
decoder_hidden_states += (
(outputs.decoder_hidden_states,)
if self.config.is_encoder_decoder
else (outputs.hidden_states,)
)
# argmax
next_tokens = torch.argmax(next_tokens_scores, dim=-1)
# finished sentences should have their next token be a padding token
if eos_token_id is not None:
if pad_token_id is None:
raise ValueError("If `eos_token_id` is defined, make sure that `pad_token_id` is defined.")
next_tokens = next_tokens * unfinished_sequences + pad_token_id * (1 - unfinished_sequences)
# update generated ids, model inputs, and length for next step
input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1)
model_kwargs = self._update_model_kwargs_for_generation(
outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder
)
# if eos_token was found in one sentence, set sentence to finished
if eos_token_id_tensor is not None:
unfinished_sequences = unfinished_sequences.mul(
next_tokens.tile(eos_token_id_tensor.shape[0], 1).ne(eos_token_id_tensor.unsqueeze(1)).prod(dim=0)
)
# stop when each sentence is finished, or if we exceed the maximum length
if unfinished_sequences.max() == 0 or stopping_criteria(input_ids, scores):
if not synced_gpus:
break
else:
this_peer_finished = True
if return_dict_in_generate:
if self.config.is_encoder_decoder:
return GreedySearchEncoderDecoderOutput(
sequences=input_ids,
scores=scores,
encoder_attentions=encoder_attentions,
encoder_hidden_states=encoder_hidden_states,
decoder_attentions=decoder_attentions,
cross_attentions=cross_attentions,
decoder_hidden_states=decoder_hidden_states,
)
else:
return GreedySearchDecoderOnlyOutput(
sequences=input_ids,
scores=scores,
attentions=decoder_attentions,
hidden_states=decoder_hidden_states,
)
else:
return input_ids
def sample(
self,
input_ids: torch.LongTensor,
logits_processor: Optional[LogitsProcessorList] = None,
stopping_criteria: Optional[StoppingCriteriaList] = None,
logits_warper: Optional[LogitsProcessorList] = None,
max_length: Optional[int] = None,
pad_token_id: Optional[int] = None,
eos_token_id: Optional[Union[int, List[int]]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
output_scores: Optional[bool] = None,
return_dict_in_generate: Optional[bool] = None,
synced_gpus: Optional[bool] = False,
**model_kwargs,
) -> Union[SampleOutput, torch.LongTensor]:
r"""
Generates sequences of token ids for models with a language modeling head using **multinomial sampling** and
can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models.
<Tip warning={true}>
In most cases, you do not need to call [`~generation.GenerationMixin.sample`] directly. Use generate() instead.
For an overview of generation strategies and code examples, check the [following
guide](./generation_strategies).
</Tip>
Parameters:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
The sequence used as a prompt for the generation.
logits_processor (`LogitsProcessorList`, *optional*):
An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`]
used to modify the prediction scores of the language modeling head applied at each generation step.
stopping_criteria (`StoppingCriteriaList`, *optional*):
An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`]
used to tell if the generation loop should stop.
logits_warper (`LogitsProcessorList`, *optional*):
An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsWarper`] used
to warp the prediction score distribution of the language modeling head applied before multinomial
sampling at each generation step.
max_length (`int`, *optional*, defaults to 20):
**DEPRECATED**. Use `logits_processor` or `stopping_criteria` directly to cap the number of generated
tokens. The maximum length of the sequence to be generated.
pad_token_id (`int`, *optional*):
The id of the *padding* token.
eos_token_id (`Union[int, List[int]]`, *optional*):
The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
output_attentions (`bool`, *optional*, defaults to `False`):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more details.
output_hidden_states (`bool`, *optional*, defaults to `False`):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more details.
output_scores (`bool`, *optional*, defaults to `False`):
Whether or not to return the prediction scores. See `scores` under returned tensors for more details.
return_dict_in_generate (`bool`, *optional*, defaults to `False`):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
synced_gpus (`bool`, *optional*, defaults to `False`):
Whether to continue running the while loop until max_length (needed for ZeRO stage 3)
model_kwargs:
Additional model specific kwargs will be forwarded to the `forward` function of the model. If model is
an encoder-decoder model the kwargs should include `encoder_outputs`.
Return:
[`~generation.SampleDecoderOnlyOutput`], [`~generation.SampleEncoderDecoderOutput`] or `torch.LongTensor`:
A `torch.LongTensor` containing the generated tokens (default behaviour) or a
[`~generation.SampleDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and
`return_dict_in_generate=True` or a [`~generation.SampleEncoderDecoderOutput`] if
`model.config.is_encoder_decoder=True`.
Examples:
```python
>>> from transformers import (
... AutoTokenizer,
... AutoModelForCausalLM,
... LogitsProcessorList,
... MinLengthLogitsProcessor,
... TopKLogitsWarper,
... TemperatureLogitsWarper,
... StoppingCriteriaList,
... MaxLengthCriteria,
... )
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("gpt2")
>>> model = AutoModelForCausalLM.from_pretrained("gpt2")
>>> # set pad_token_id to eos_token_id because GPT2 does not have a EOS token
>>> model.config.pad_token_id = model.config.eos_token_id
>>> model.generation_config.pad_token_id = model.config.eos_token_id
>>> input_prompt = "Today is a beautiful day, and"
>>> input_ids = tokenizer(input_prompt, return_tensors="pt").input_ids
>>> # instantiate logits processors
>>> logits_processor = LogitsProcessorList(
... [
... MinLengthLogitsProcessor(15, eos_token_id=model.generation_config.eos_token_id),
... ]
... )
>>> # instantiate logits processors
>>> logits_warper = LogitsProcessorList(
... [
... TopKLogitsWarper(50),
... TemperatureLogitsWarper(0.7),
... ]
... )
>>> stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=20)])
>>> torch.manual_seed(0) # doctest: +IGNORE_RESULT
>>> outputs = model.sample(
... input_ids,
... logits_processor=logits_processor,
... logits_warper=logits_warper,
... stopping_criteria=stopping_criteria,
... )
>>> tokenizer.batch_decode(outputs, skip_special_tokens=True)
['Today is a beautiful day, and a wonderful day.\n\nI was lucky enough to meet the']
```"""
# init values
logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()
stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()
if max_length is not None:
warnings.warn(
"`max_length` is deprecated in this function, use"
" `stopping_criteria=StoppingCriteriaList(MaxLengthCriteria(max_length=max_length))` instead.",
UserWarning,
)
stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length)
logits_warper = logits_warper if logits_warper is not None else LogitsProcessorList()
pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id
eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id
if isinstance(eos_token_id, int):
eos_token_id = [eos_token_id]
eos_token_id_tensor = torch.tensor(eos_token_id).to(input_ids.device) if eos_token_id is not None else None
output_scores = output_scores if output_scores is not None else self.generation_config.output_scores
output_attentions = (
output_attentions if output_attentions is not None else self.generation_config.output_attentions
)
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states
)
return_dict_in_generate = (
return_dict_in_generate
if return_dict_in_generate is not None
else self.generation_config.return_dict_in_generate
)
# init attention / hidden states / scores tuples
scores = () if (return_dict_in_generate and output_scores) else None
decoder_attentions = () if (return_dict_in_generate and output_attentions) else None
cross_attentions = () if (return_dict_in_generate and output_attentions) else None
decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None
# if model is an encoder-decoder, retrieve encoder attention weights and hidden states
if return_dict_in_generate and self.config.is_encoder_decoder:
encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None
encoder_hidden_states = (
model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None
)
# keep track of which sequences are already finished
unfinished_sequences = input_ids.new(input_ids.shape[0]).fill_(1)
this_peer_finished = False # used by synced_gpus only
# auto-regressive generation
while True:
if synced_gpus:
# Under synced_gpus the `forward` call must continue until all gpus complete their sequence.
# The following logic allows an early break if all peers finished generating their sequence
this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(input_ids.device)
# send 0.0 if we finished, 1.0 otherwise
dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM)
# did all peers finish? the reduced sum will be 0.0 then
if this_peer_finished_flag.item() == 0.0:
break
# prepare model inputs
model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)
# forward pass to get next token
outputs = self(
**model_inputs,
return_dict=True,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
if synced_gpus and this_peer_finished:
continue # don't waste resources running the code we don't need
next_token_logits = outputs.logits[:, -1, :]
# pre-process distribution
next_token_scores = logits_processor(input_ids, next_token_logits)
next_token_scores = logits_warper(input_ids, next_token_scores)
# Store scores, attentions and hidden_states when required
if return_dict_in_generate:
if output_scores:
scores += (next_token_scores,)
if output_attentions:
decoder_attentions += (
(outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,)
)
if self.config.is_encoder_decoder:
cross_attentions += (outputs.cross_attentions,)
if output_hidden_states:
decoder_hidden_states += (
(outputs.decoder_hidden_states,)
if self.config.is_encoder_decoder
else (outputs.hidden_states,)
)
# sample
probs = nn.functional.softmax(next_token_scores, dim=-1)
next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1)
# finished sentences should have their next token be a padding token
if eos_token_id is not None:
if pad_token_id is None:
raise ValueError("If `eos_token_id` is defined, make sure that `pad_token_id` is defined.")
next_tokens = next_tokens * unfinished_sequences + pad_token_id * (1 - unfinished_sequences)
# update generated ids, model inputs, and length for next step
input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1)
model_kwargs = self._update_model_kwargs_for_generation(
outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder
)
# if eos_token was found in one sentence, set sentence to finished
if eos_token_id_tensor is not None:
unfinished_sequences = unfinished_sequences.mul(
next_tokens.tile(eos_token_id_tensor.shape[0], 1).ne(eos_token_id_tensor.unsqueeze(1)).prod(dim=0)
)
# stop when each sentence is finished, or if we exceed the maximum length
if unfinished_sequences.max() == 0 or stopping_criteria(input_ids, scores):
if not synced_gpus:
break
else:
this_peer_finished = True
if return_dict_in_generate:
if self.config.is_encoder_decoder:
return SampleEncoderDecoderOutput(
sequences=input_ids,
scores=scores,
encoder_attentions=encoder_attentions,
encoder_hidden_states=encoder_hidden_states,
decoder_attentions=decoder_attentions,
cross_attentions=cross_attentions,
decoder_hidden_states=decoder_hidden_states,
)
else:
return SampleDecoderOnlyOutput(
sequences=input_ids,
scores=scores,
attentions=decoder_attentions,
hidden_states=decoder_hidden_states,
)
else:
return input_ids
def beam_search(
self,
input_ids: torch.LongTensor,
beam_scorer: BeamScorer,
logits_processor: Optional[LogitsProcessorList] = None,
stopping_criteria: Optional[StoppingCriteriaList] = None,
max_length: Optional[int] = None,
pad_token_id: Optional[int] = None,
eos_token_id: Optional[Union[int, List[int]]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
output_scores: Optional[bool] = None,
return_dict_in_generate: Optional[bool] = None,
synced_gpus: Optional[bool] = False,
**model_kwargs,
) -> Union[BeamSearchOutput, torch.LongTensor]:
r"""
Generates sequences of token ids for models with a language modeling head using **beam search decoding** and
can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models.
<Tip warning={true}>
In most cases, you do not need to call [`~generation.GenerationMixin.beam_search`] directly. Use generate()
instead. For an overview of generation strategies and code examples, check the [following
guide](./generation_strategies).
</Tip>
Parameters:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
The sequence used as a prompt for the generation.
beam_scorer (`BeamScorer`):
An derived instance of [`BeamScorer`] that defines how beam hypotheses are constructed, stored and
sorted during generation. For more information, the documentation of [`BeamScorer`] should be read.
logits_processor (`LogitsProcessorList`, *optional*):
An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`]
used to modify the prediction scores of the language modeling head applied at each generation step.
stopping_criteria (`StoppingCriteriaList`, *optional*):
An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`]
used to tell if the generation loop should stop.
max_length (`int`, *optional*, defaults to 20):
**DEPRECATED**. Use `logits_processor` or `stopping_criteria` directly to cap the number of generated
tokens. The maximum length of the sequence to be generated.
pad_token_id (`int`, *optional*):
The id of the *padding* token.
eos_token_id (`Union[int, List[int]]`, *optional*):
The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
output_attentions (`bool`, *optional*, defaults to `False`):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more details.
output_hidden_states (`bool`, *optional*, defaults to `False`):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more details.
output_scores (`bool`, *optional*, defaults to `False`):
Whether or not to return the prediction scores. See `scores` under returned tensors for more details.
return_dict_in_generate (`bool`, *optional*, defaults to `False`):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
synced_gpus (`bool`, *optional*, defaults to `False`):
Whether to continue running the while loop until max_length (needed for ZeRO stage 3)
model_kwargs:
Additional model specific kwargs will be forwarded to the `forward` function of the model. If model is
an encoder-decoder model the kwargs should include `encoder_outputs`.
Return:
[`generation.BeamSearchDecoderOnlyOutput`], [`~generation.BeamSearchEncoderDecoderOutput`] or
`torch.LongTensor`: A `torch.LongTensor` containing the generated tokens (default behaviour) or a
[`~generation.BeamSearchDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and
`return_dict_in_generate=True` or a [`~generation.BeamSearchEncoderDecoderOutput`] if
`model.config.is_encoder_decoder=True`.
Examples:
```python
>>> from transformers import (
... AutoTokenizer,
... AutoModelForSeq2SeqLM,
... LogitsProcessorList,
... MinLengthLogitsProcessor,
... BeamSearchScorer,
... )
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("t5-base")
>>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base")
>>> encoder_input_str = "translate English to German: How old are you?"
>>> encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids
>>> # lets run beam search using 3 beams
>>> num_beams = 3
>>> # define decoder start token ids
>>> input_ids = torch.ones((num_beams, 1), device=model.device, dtype=torch.long)
>>> input_ids = input_ids * model.config.decoder_start_token_id
>>> # add encoder_outputs to model keyword arguments
>>> model_kwargs = {
... "encoder_outputs": model.get_encoder()(
... encoder_input_ids.repeat_interleave(num_beams, dim=0), return_dict=True
... )
... }
>>> # instantiate beam scorer
>>> beam_scorer = BeamSearchScorer(
... batch_size=1,
... num_beams=num_beams,
... device=model.device,
... )
>>> # instantiate logits processors
>>> logits_processor = LogitsProcessorList(
... [
... MinLengthLogitsProcessor(5, eos_token_id=model.config.eos_token_id),
... ]
... )
>>> outputs = model.beam_search(input_ids, beam_scorer, logits_processor=logits_processor, **model_kwargs)
>>> tokenizer.batch_decode(outputs, skip_special_tokens=True)
['Wie alt bist du?']
```"""
# init values
logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()
stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()
if max_length is not None:
warnings.warn(
"`max_length` is deprecated in this function, use"
" `stopping_criteria=StoppingCriteriaList(MaxLengthCriteria(max_length=max_length))` instead.",
UserWarning,
)
stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length)
if len(stopping_criteria) == 0:
warnings.warn("You don't have defined any stopping_criteria, this will likely loop forever", UserWarning)
pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id
eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id
if isinstance(eos_token_id, int):
eos_token_id = [eos_token_id]
output_scores = output_scores if output_scores is not None else self.generation_config.output_scores
output_attentions = (
output_attentions if output_attentions is not None else self.generation_config.output_attentions
)
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states
)
return_dict_in_generate = (
return_dict_in_generate
if return_dict_in_generate is not None
else self.generation_config.return_dict_in_generate
)
batch_size = len(beam_scorer._beam_hyps)
num_beams = beam_scorer.num_beams
batch_beam_size, cur_len = input_ids.shape
if num_beams * batch_size != batch_beam_size:
raise ValueError(
f"Batch dimension of `input_ids` should be {num_beams * batch_size}, but is {batch_beam_size}."
)
# init attention / hidden states / scores tuples
scores = () if (return_dict_in_generate and output_scores) else None
beam_indices = (
tuple(() for _ in range(batch_beam_size)) if (return_dict_in_generate and output_scores) else None
)
decoder_attentions = () if (return_dict_in_generate and output_attentions) else None
cross_attentions = () if (return_dict_in_generate and output_attentions) else None
decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None
# if model is an encoder-decoder, retrieve encoder attention weights and hidden states
if return_dict_in_generate and self.config.is_encoder_decoder:
encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None
encoder_hidden_states = (
model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None
)
# initialise score of first beam with 0 and the rest with -1e9. This makes sure that only tokens
# of the first beam are considered to avoid sampling the exact same tokens across all beams.
beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device)
beam_scores[:, 1:] = -1e9
beam_scores = beam_scores.view((batch_size * num_beams,))
this_peer_finished = False # used by synced_gpus only
while True:
if synced_gpus:
# Under synced_gpus the `forward` call must continue until all gpus complete their sequence.
# The following logic allows an early break if all peers finished generating their sequence
this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(input_ids.device)
# send 0.0 if we finished, 1.0 otherwise
dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM)
# did all peers finish? the reduced sum will be 0.0 then
if this_peer_finished_flag.item() == 0.0:
break
model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)
outputs = self(
**model_inputs,
return_dict=True,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
if synced_gpus and this_peer_finished:
cur_len = cur_len + 1
continue # don't waste resources running the code we don't need
next_token_logits = outputs.logits[:, -1, :]
# hack: adjust tokens for Marian. For Marian we have to make sure that the `pad_token_id`
# cannot be generated both before and after the `nn.functional.log_softmax` operation.
next_token_logits = self.adjust_logits_during_generation(next_token_logits, cur_len=cur_len)
next_token_scores = nn.functional.log_softmax(
next_token_logits, dim=-1
) # (batch_size * num_beams, vocab_size)
next_token_scores_processed = logits_processor(input_ids, next_token_scores)
next_token_scores = next_token_scores_processed + beam_scores[:, None].expand_as(next_token_scores)
# Store scores, attentions and hidden_states when required
if return_dict_in_generate:
if output_scores:
scores += (next_token_scores_processed,)
if output_attentions:
decoder_attentions += (
(outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,)
)
if self.config.is_encoder_decoder:
cross_attentions += (outputs.cross_attentions,)
if output_hidden_states:
decoder_hidden_states += (
(outputs.decoder_hidden_states,)
if self.config.is_encoder_decoder
else (outputs.hidden_states,)
)
# reshape for beam search
vocab_size = next_token_scores.shape[-1]
next_token_scores = next_token_scores.view(batch_size, num_beams * vocab_size)
# Sample 2 next tokens for each beam (so we have some spare tokens and match output of beam search)
next_token_scores, next_tokens = torch.topk(
next_token_scores, 2 * num_beams, dim=1, largest=True, sorted=True
)
next_indices = torch_int_div(next_tokens, vocab_size)
next_tokens = next_tokens % vocab_size
# stateless
beam_outputs = beam_scorer.process(
input_ids,
next_token_scores,
next_tokens,
next_indices,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
beam_indices=beam_indices,
)
beam_scores = beam_outputs["next_beam_scores"]
beam_next_tokens = beam_outputs["next_beam_tokens"]
beam_idx = beam_outputs["next_beam_indices"]
input_ids = torch.cat([input_ids[beam_idx, :], beam_next_tokens.unsqueeze(-1)], dim=-1)
model_kwargs = self._update_model_kwargs_for_generation(
outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder
)
if model_kwargs["past_key_values"] is not None:
model_kwargs["past_key_values"] = self._reorder_cache(model_kwargs["past_key_values"], beam_idx)
if return_dict_in_generate and output_scores:
beam_indices = tuple((beam_indices[beam_idx[i]] + (beam_idx[i],) for i in range(len(beam_indices))))
# increase cur_len
cur_len = cur_len + 1
if beam_scorer.is_done or stopping_criteria(input_ids, scores):
if not synced_gpus:
break
else:
this_peer_finished = True
sequence_outputs = beam_scorer.finalize(
input_ids,
beam_scores,
next_tokens,
next_indices,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
max_length=stopping_criteria.max_length,
beam_indices=beam_indices,
)
if return_dict_in_generate:
if not output_scores:
sequence_outputs["sequence_scores"] = None
if self.config.is_encoder_decoder:
return BeamSearchEncoderDecoderOutput(
sequences=sequence_outputs["sequences"],
sequences_scores=sequence_outputs["sequence_scores"],
scores=scores,
beam_indices=sequence_outputs["beam_indices"],
encoder_attentions=encoder_attentions,
encoder_hidden_states=encoder_hidden_states,
decoder_attentions=decoder_attentions,
cross_attentions=cross_attentions,
decoder_hidden_states=decoder_hidden_states,
)
else:
return BeamSearchDecoderOnlyOutput(
sequences=sequence_outputs["sequences"],
sequences_scores=sequence_outputs["sequence_scores"],
scores=scores,
beam_indices=sequence_outputs["beam_indices"],
attentions=decoder_attentions,
hidden_states=decoder_hidden_states,
)
else:
return sequence_outputs["sequences"]
def beam_sample(
self,
input_ids: torch.LongTensor,
beam_scorer: BeamScorer,
logits_processor: Optional[LogitsProcessorList] = None,
stopping_criteria: Optional[StoppingCriteriaList] = None,
logits_warper: Optional[LogitsProcessorList] = None,
max_length: Optional[int] = None,
pad_token_id: Optional[int] = None,
eos_token_id: Optional[Union[int, List[int]]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
output_scores: Optional[bool] = None,
return_dict_in_generate: Optional[bool] = None,
synced_gpus: Optional[bool] = False,
**model_kwargs,
) -> Union[BeamSampleOutput, torch.LongTensor]:
r"""
Generates sequences of token ids for models with a language modeling head using **beam search multinomial
sampling** and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models.
<Tip warning={true}>
In most cases, you do not need to call [`~generation.GenerationMixin.beam_sample`] directly. Use generate()
instead. For an overview of generation strategies and code examples, check the [following
guide](./generation_strategies).
</Tip>
Parameters:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
The sequence used as a prompt for the generation.
beam_scorer (`BeamScorer`):
A derived instance of [`BeamScorer`] that defines how beam hypotheses are constructed, stored and
sorted during generation. For more information, the documentation of [`BeamScorer`] should be read.
logits_processor (`LogitsProcessorList`, *optional*):
An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`]
used to modify the prediction scores of the language modeling head applied at each generation step.
stopping_criteria (`StoppingCriteriaList`, *optional*):
An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`]
used to tell if the generation loop should stop.
logits_warper (`LogitsProcessorList`, *optional*):
An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsWarper`] used
to warp the prediction score distribution of the language modeling head applied before multinomial
sampling at each generation step.
max_length (`int`, *optional*, defaults to 20):
**DEPRECATED**. Use `logits_processor` or `stopping_criteria` directly to cap the number of generated
tokens. The maximum length of the sequence to be generated.
pad_token_id (`int`, *optional*):
The id of the *padding* token.
eos_token_id (`Union[int, List[int]]`, *optional*):
The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
output_attentions (`bool`, *optional*, defaults to `False`):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more details.
output_hidden_states (`bool`, *optional*, defaults to `False`):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more details.
output_scores (`bool`, *optional*, defaults to `False`):
Whether or not to return the prediction scores. See `scores` under returned tensors for more details.
return_dict_in_generate (`bool`, *optional*, defaults to `False`):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
synced_gpus (`bool`, *optional*, defaults to `False`):
Whether to continue running the while loop until max_length (needed for ZeRO stage 3)
model_kwargs:
Additional model specific kwargs will be forwarded to the `forward` function of the model. If model is
an encoder-decoder model the kwargs should include `encoder_outputs`.
Return:
[`~generation.BeamSampleDecoderOnlyOutput`], [`~generation.BeamSampleEncoderDecoderOutput`] or
`torch.LongTensor`: A `torch.LongTensor` containing the generated tokens (default behaviour) or a
[`~generation.BeamSampleDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and
`return_dict_in_generate=True` or a [`~generation.BeamSampleEncoderDecoderOutput`] if
`model.config.is_encoder_decoder=True`.
Examples:
```python
>>> from transformers import (
... AutoTokenizer,
... AutoModelForSeq2SeqLM,
... LogitsProcessorList,
... MinLengthLogitsProcessor,
... TopKLogitsWarper,
... TemperatureLogitsWarper,
... BeamSearchScorer,
... )
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("t5-base")
>>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base")
>>> encoder_input_str = "translate English to German: How old are you?"
>>> encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids
>>> # lets run beam search using 3 beams
>>> num_beams = 3
>>> # define decoder start token ids
>>> input_ids = torch.ones((num_beams, 1), device=model.device, dtype=torch.long)
>>> input_ids = input_ids * model.config.decoder_start_token_id
>>> # add encoder_outputs to model keyword arguments
>>> model_kwargs = {
... "encoder_outputs": model.get_encoder()(
... encoder_input_ids.repeat_interleave(num_beams, dim=0), return_dict=True
... )
... }
>>> # instantiate beam scorer
>>> beam_scorer = BeamSearchScorer(
... batch_size=1,
... max_length=model.config.max_length,
... num_beams=num_beams,
... device=model.device,
... )
>>> # instantiate logits processors
>>> logits_processor = LogitsProcessorList(
... [MinLengthLogitsProcessor(5, eos_token_id=model.config.eos_token_id)]
... )
>>> # instantiate logits processors
>>> logits_warper = LogitsProcessorList(
... [
... TopKLogitsWarper(50),
... TemperatureLogitsWarper(0.7),
... ]
... )
>>> outputs = model.beam_sample(
... input_ids, beam_scorer, logits_processor=logits_processor, logits_warper=logits_warper, **model_kwargs
... )
>>> tokenizer.batch_decode(outputs, skip_special_tokens=True)
['Wie alt bist du?']
```"""
# init values
logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()
stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()
if max_length is not None:
warnings.warn(
"`max_length` is deprecated in this function, use"
" `stopping_criteria=StoppingCriteriaList(MaxLengthCriteria(max_length=max_length))` instead.",
UserWarning,
)
stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length)
pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id
eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id
if isinstance(eos_token_id, int):
eos_token_id = [eos_token_id]
output_scores = output_scores if output_scores is not None else self.generation_config.output_scores
output_attentions = (
output_attentions if output_attentions is not None else self.generation_config.output_attentions
)
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states
)
return_dict_in_generate = (
return_dict_in_generate
if return_dict_in_generate is not None
else self.generation_config.return_dict_in_generate
)
batch_size = len(beam_scorer._beam_hyps)
num_beams = beam_scorer.num_beams
batch_beam_size, cur_len = input_ids.shape
# init attention / hidden states / scores tuples
scores = () if (return_dict_in_generate and output_scores) else None
beam_indices = (
tuple(() for _ in range(batch_beam_size)) if (return_dict_in_generate and output_scores) else None
)
decoder_attentions = () if (return_dict_in_generate and output_attentions) else None
cross_attentions = () if (return_dict_in_generate and output_attentions) else None
decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None
# if model is an encoder-decoder, retrieve encoder attention weights and hidden states
if return_dict_in_generate and self.config.is_encoder_decoder:
encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None
encoder_hidden_states = (
model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None
)
beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device)
beam_scores = beam_scores.view((batch_size * num_beams,))
this_peer_finished = False # used by synced_gpus only
while True:
if synced_gpus:
# Under synced_gpus the `forward` call must continue until all gpus complete their sequence.
# The following logic allows an early break if all peers finished generating their sequence
this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(input_ids.device)
# send 0.0 if we finished, 1.0 otherwise
dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM)
# did all peers finish? the reduced sum will be 0.0 then
if this_peer_finished_flag.item() == 0.0:
break
model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)
outputs = self(
**model_inputs,
return_dict=True,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
if synced_gpus and this_peer_finished:
cur_len = cur_len + 1
continue # don't waste resources running the code we don't need
next_token_logits = outputs.logits[:, -1, :]
# hack: adjust tokens for Marian. For Marian we have to make sure that the `pad_token_id`
# cannot be generated both before and after the `nn.functional.log_softmax` operation.
next_token_logits = self.adjust_logits_during_generation(next_token_logits, cur_len=cur_len)
next_token_scores = nn.functional.log_softmax(
next_token_logits, dim=-1
) # (batch_size * num_beams, vocab_size)
next_token_scores_processed = logits_processor(input_ids, next_token_scores)
next_token_scores = next_token_scores_processed + beam_scores[:, None].expand_as(next_token_scores)
# Note: logits warpers are intentionally applied after adding running beam scores. On some logits warpers
# (like top_p) this is indiferent, but on others (like temperature) it is not. For reference, see
# https://github.com/huggingface/transformers/pull/5420#discussion_r449779867
next_token_scores = logits_warper(input_ids, next_token_scores)
# Store scores, attentions and hidden_states when required
if return_dict_in_generate:
if output_scores:
scores += (logits_warper(input_ids, next_token_scores_processed),)
if output_attentions:
decoder_attentions += (
(outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,)
)
if self.config.is_encoder_decoder:
cross_attentions += (outputs.cross_attentions,)
if output_hidden_states:
decoder_hidden_states += (
(outputs.decoder_hidden_states,)
if self.config.is_encoder_decoder
else (outputs.hidden_states,)
)
# reshape for beam search
vocab_size = next_token_scores.shape[-1]
next_token_scores = next_token_scores.view(batch_size, num_beams * vocab_size)
probs = nn.functional.softmax(next_token_scores, dim=-1)
next_tokens = torch.multinomial(probs, num_samples=2 * num_beams)
next_token_scores = torch.gather(next_token_scores, -1, next_tokens)
next_token_scores, _indices = torch.sort(next_token_scores, descending=True, dim=1)
next_tokens = torch.gather(next_tokens, -1, _indices)
next_indices = torch_int_div(next_tokens, vocab_size)
next_tokens = next_tokens % vocab_size
# stateless
beam_outputs = beam_scorer.process(
input_ids,
next_token_scores,
next_tokens,
next_indices,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
beam_indices=beam_indices,
)
beam_scores = beam_outputs["next_beam_scores"]
beam_next_tokens = beam_outputs["next_beam_tokens"]
beam_idx = beam_outputs["next_beam_indices"]
input_ids = torch.cat([input_ids[beam_idx, :], beam_next_tokens.unsqueeze(-1)], dim=-1)
model_kwargs = self._update_model_kwargs_for_generation(
outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder
)
if model_kwargs["past_key_values"] is not None:
model_kwargs["past_key_values"] = self._reorder_cache(model_kwargs["past_key_values"], beam_idx)
if return_dict_in_generate and output_scores:
beam_indices = tuple((beam_indices[beam_idx[i]] + (beam_idx[i],) for i in range(len(beam_indices))))
# increase cur_len
cur_len = cur_len + 1
if beam_scorer.is_done or stopping_criteria(input_ids, scores):
if not synced_gpus:
break
else:
this_peer_finished = True
sequence_outputs = beam_scorer.finalize(
input_ids,
beam_scores,
next_tokens,
next_indices,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
max_length=stopping_criteria.max_length,
beam_indices=beam_indices,
)
if return_dict_in_generate:
if not output_scores:
sequence_outputs["sequence_scores"] = None
if self.config.is_encoder_decoder:
return BeamSampleEncoderDecoderOutput(
sequences=sequence_outputs["sequences"],
sequences_scores=sequence_outputs["sequence_scores"],
scores=scores,
beam_indices=sequence_outputs["beam_indices"],
encoder_attentions=encoder_attentions,
encoder_hidden_states=encoder_hidden_states,
decoder_attentions=decoder_attentions,
cross_attentions=cross_attentions,
decoder_hidden_states=decoder_hidden_states,
)
else:
return BeamSampleDecoderOnlyOutput(
sequences=sequence_outputs["sequences"],
sequences_scores=sequence_outputs["sequence_scores"],
scores=scores,
beam_indices=sequence_outputs["beam_indices"],
attentions=decoder_attentions,
hidden_states=decoder_hidden_states,
)
else:
return sequence_outputs["sequences"]
def group_beam_search(
self,
input_ids: torch.LongTensor,
beam_scorer: BeamScorer,
logits_processor: Optional[LogitsProcessorList] = None,
stopping_criteria: Optional[StoppingCriteriaList] = None,
max_length: Optional[int] = None,
pad_token_id: Optional[int] = None,
eos_token_id: Optional[Union[int, List[int]]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
output_scores: Optional[bool] = None,
return_dict_in_generate: Optional[bool] = None,
synced_gpus: Optional[bool] = False,
**model_kwargs,
):
r"""
Generates sequences of token ids for models with a language modeling head using **diverse beam search
decoding** and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models.
<Tip warning={true}>
In most cases, you do not need to call [`~generation.GenerationMixin.group_beam_search`] directly. Use
generate() instead. For an overview of generation strategies and code examples, check the [following
guide](./generation_strategies).
</Tip>
Parameters:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
The sequence used as a prompt for the generation.
beam_scorer (`BeamScorer`):
An derived instance of [`BeamScorer`] that defines how beam hypotheses are constructed, stored and
sorted during generation. For more information, the documentation of [`BeamScorer`] should be read.
logits_processor (`LogitsProcessorList`, *optional*):
An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`]
used to modify the prediction scores of the language modeling head applied at each generation step.
stopping_criteria (`StoppingCriteriaList`, *optional*):
An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`]
used to tell if the generation loop should stop.
max_length (`int`, *optional*, defaults to 20):
**DEPRECATED**. Use `logits_processor` or `stopping_criteria` directly to cap the number of generated
tokens. The maximum length of the sequence to be generated.
pad_token_id (`int`, *optional*):
The id of the *padding* token.
eos_token_id (`Union[int, List[int]]`, *optional*):
The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
output_attentions (`bool`, *optional*, defaults to `False`):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more details.
output_hidden_states (`bool`, *optional*, defaults to `False`):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more details.
output_scores (`bool`, *optional*, defaults to `False`):
Whether or not to return the prediction scores. See `scores` under returned tensors for more details.
return_dict_in_generate (`bool`, *optional*, defaults to `False`):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
synced_gpus (`bool`, *optional*, defaults to `False`):
Whether to continue running the while loop until max_length (needed for ZeRO stage 3)
model_kwargs:
Additional model specific kwargs that will be forwarded to the `forward` function of the model. If
model is an encoder-decoder model the kwargs should include `encoder_outputs`.
Return:
[`~generation.BeamSearchDecoderOnlyOutput`], [`~generation.BeamSearchEncoderDecoderOutput`] or
`torch.LongTensor`: A `torch.LongTensor` containing the generated tokens (default behaviour) or a
[`~generation.BeamSearchDecoderOnlyOutput`] if [`~generation.BeamSearchDecoderOnlyOutput`] if
`model.config.is_encoder_decoder=False` and `return_dict_in_generate=True` or a
[`~generation.BeamSearchEncoderDecoderOutput`] if `model.config.is_encoder_decoder=True`.
Examples:
```python
>>> from transformers import (
... AutoTokenizer,
... AutoModelForSeq2SeqLM,
... LogitsProcessorList,
... MinLengthLogitsProcessor,
... HammingDiversityLogitsProcessor,
... BeamSearchScorer,
... )
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("t5-base")
>>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base")
>>> encoder_input_str = "translate English to German: How old are you?"
>>> encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids
>>> # lets run diverse beam search using 6 beams
>>> num_beams = 6
>>> # define decoder start token ids
>>> input_ids = torch.ones((num_beams, 1), device=model.device, dtype=torch.long)
>>> input_ids = input_ids * model.config.decoder_start_token_id
>>> # add encoder_outputs to model keyword arguments
>>> model_kwargs = {
... "encoder_outputs": model.get_encoder()(
... encoder_input_ids.repeat_interleave(num_beams, dim=0), return_dict=True
... )
... }
>>> # instantiate beam scorer
>>> beam_scorer = BeamSearchScorer(
... batch_size=1,
... max_length=model.config.max_length,
... num_beams=num_beams,
... device=model.device,
... num_beam_groups=3,
... )
>>> # instantiate logits processors
>>> logits_processor = LogitsProcessorList(
... [
... HammingDiversityLogitsProcessor(5.5, num_beams=6, num_beam_groups=3),
... MinLengthLogitsProcessor(5, eos_token_id=model.config.eos_token_id),
... ]
... )
>>> outputs = model.group_beam_search(
... input_ids, beam_scorer, logits_processor=logits_processor, **model_kwargs
... )
>>> tokenizer.batch_decode(outputs, skip_special_tokens=True)
['Wie alt bist du?']
```"""
# init values
logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()
stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()
if max_length is not None:
warnings.warn(
"`max_length` is deprecated in this function, use"
" `stopping_criteria=StoppingCriteriaList(MaxLengthCriteria(max_length=max_length))` instead.",
UserWarning,
)
stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length)
pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id
eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id
if isinstance(eos_token_id, int):
eos_token_id = [eos_token_id]
output_scores = output_scores if output_scores is not None else self.generation_config.output_scores
output_attentions = (
output_attentions if output_attentions is not None else self.generation_config.output_attentions
)
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states
)
return_dict_in_generate = (
return_dict_in_generate
if return_dict_in_generate is not None
else self.generation_config.return_dict_in_generate
)
batch_size = len(beam_scorer._beam_hyps)
num_beams = beam_scorer.num_beams
num_beam_groups = beam_scorer.num_beam_groups
num_sub_beams = num_beams // num_beam_groups
device = input_ids.device
batch_beam_size, cur_len = input_ids.shape
if return_dict_in_generate and output_scores:
beam_indices = [tuple(() for _ in range(num_sub_beams * batch_size)) for _ in range(num_beam_groups)]
else:
beam_indices = None
if num_beams * batch_size != batch_beam_size:
raise ValueError(
f"Batch dimension of `input_ids` should be {num_beams * batch_size}, but is {batch_beam_size}."
)
# init attention / hidden states / scores tuples
scores = () if (return_dict_in_generate and output_scores) else None
decoder_attentions = () if (return_dict_in_generate and output_attentions) else None
cross_attentions = () if (return_dict_in_generate and output_attentions) else None
decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None
# if model is an encoder-decoder, retrieve encoder attention weights and hidden states
if return_dict_in_generate and self.config.is_encoder_decoder:
encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None
encoder_hidden_states = (
model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None
)
# initialise score of first beam of each group with 0 and the rest with -1e9. This ensures that the beams in
# the same group don't produce same tokens everytime.
beam_scores = torch.full((batch_size, num_beams), -1e9, dtype=torch.float, device=device)
beam_scores[:, ::num_sub_beams] = 0
beam_scores = beam_scores.view((batch_size * num_beams,))
this_peer_finished = False # used by synced_gpus only
while True:
if synced_gpus:
# Under synced_gpus the `forward` call must continue until all gpus complete their sequence.
# The following logic allows an early break if all peers finished generating their sequence
this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(input_ids.device)
# send 0.0 if we finished, 1.0 otherwise
dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM)
# did all peers finish? the reduced sum will be 0.0 then
if this_peer_finished_flag.item() == 0.0:
break
# predicted tokens in cur_len step
current_tokens = torch.zeros(batch_size * num_beams, dtype=input_ids.dtype, device=device)
# indices which will form the beams in the next time step
reordering_indices = torch.zeros(batch_size * num_beams, dtype=torch.long, device=device)
# do one decoder step on all beams of all sentences in batch
model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)
outputs = self(
**model_inputs,
return_dict=True,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
if synced_gpus and this_peer_finished:
cur_len = cur_len + 1
continue # don't waste resources running the code we don't need
if output_scores:
processed_score = torch.zeros_like(outputs.logits[:, -1, :])
for beam_group_idx in range(num_beam_groups):
group_start_idx = beam_group_idx * num_sub_beams
group_end_idx = min(group_start_idx + num_sub_beams, num_beams)
group_size = group_end_idx - group_start_idx
# indices of beams of current group among all sentences in batch
batch_group_indices = []
for batch_idx in range(batch_size):
batch_group_indices.extend(
[batch_idx * num_beams + idx for idx in range(group_start_idx, group_end_idx)]
)
group_input_ids = input_ids[batch_group_indices]
# select outputs of beams of current group only
next_token_logits = outputs.logits[batch_group_indices, -1, :]
# hack: adjust tokens for Marian. For Marian we have to make sure that the `pad_token_id`
# cannot be generated both before and after the `nn.functional.log_softmax` operation.
next_token_logits = self.adjust_logits_during_generation(next_token_logits, cur_len=cur_len)
next_token_scores = nn.functional.log_softmax(
next_token_logits, dim=-1
) # (batch_size * group_size, vocab_size)
vocab_size = next_token_scores.shape[-1]
next_token_scores_processed = logits_processor(
group_input_ids, next_token_scores, current_tokens=current_tokens, beam_group_idx=beam_group_idx
)
next_token_scores = next_token_scores_processed + beam_scores[batch_group_indices].unsqueeze(-1)
next_token_scores = next_token_scores.expand_as(next_token_scores_processed)
if output_scores:
processed_score[batch_group_indices] = next_token_scores_processed
# reshape for beam search
next_token_scores = next_token_scores.view(batch_size, group_size * vocab_size)
# Sample 2 next tokens for each beam (so we have some spare tokens and match output of beam search)
next_token_scores, next_tokens = torch.topk(
next_token_scores, 2 * group_size, dim=1, largest=True, sorted=True
)
next_indices = torch_int_div(next_tokens, vocab_size)
next_tokens = next_tokens % vocab_size
# stateless
process_beam_indices = sum(beam_indices, ()) if beam_indices is not None else None
beam_outputs = beam_scorer.process(
group_input_ids,
next_token_scores,
next_tokens,
next_indices,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
beam_indices=process_beam_indices,
)
beam_scores[batch_group_indices] = beam_outputs["next_beam_scores"]
beam_next_tokens = beam_outputs["next_beam_tokens"]
beam_idx = beam_outputs["next_beam_indices"]
if return_dict_in_generate and output_scores:
beam_indices[beam_group_idx] = tuple(
beam_indices[beam_group_idx][beam_idx[i]] + (beam_idx[i],) for i in range(len(beam_indices[0]))
)
input_ids[batch_group_indices] = group_input_ids[beam_idx]
group_input_ids = torch.cat([group_input_ids[beam_idx, :], beam_next_tokens.unsqueeze(-1)], dim=-1)
current_tokens[batch_group_indices] = group_input_ids[:, -1]
# (beam_idx // group_size) -> batch_idx
# (beam_idx % group_size) -> offset of idx inside the group
reordering_indices[batch_group_indices] = (
num_beams * torch_int_div(beam_idx, group_size) + group_start_idx + (beam_idx % group_size)
)
# Store scores, attentions and hidden_states when required
if return_dict_in_generate:
if output_scores:
scores += (processed_score,)
if output_attentions:
decoder_attentions += (
(outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,)
)
if self.config.is_encoder_decoder:
cross_attentions += (outputs.cross_attentions,)
if output_hidden_states:
decoder_hidden_states += (
(outputs.decoder_hidden_states,)
if self.config.is_encoder_decoder
else (outputs.hidden_states,)
)
input_ids = torch.cat([input_ids, current_tokens.unsqueeze(-1)], dim=-1)
model_kwargs = self._update_model_kwargs_for_generation(
outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder
)
if model_kwargs["past_key_values"] is not None:
model_kwargs["past_key_values"] = self._reorder_cache(
model_kwargs["past_key_values"], reordering_indices
)
# increase cur_len
cur_len = cur_len + 1
if beam_scorer.is_done or stopping_criteria(input_ids, scores):
if not synced_gpus:
break
else:
this_peer_finished = True
final_beam_indices = sum(beam_indices, ()) if beam_indices is not None else None
sequence_outputs = beam_scorer.finalize(
input_ids,
beam_scores,
next_tokens,
next_indices,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
max_length=stopping_criteria.max_length,
beam_indices=final_beam_indices,
)
if return_dict_in_generate:
if not output_scores:
sequence_outputs["sequence_scores"] = None
if self.config.is_encoder_decoder:
return BeamSearchEncoderDecoderOutput(
sequences=sequence_outputs["sequences"],
sequences_scores=sequence_outputs["sequence_scores"],
scores=scores,
beam_indices=sequence_outputs["beam_indices"],
encoder_attentions=encoder_attentions,
encoder_hidden_states=encoder_hidden_states,
decoder_attentions=decoder_attentions,
cross_attentions=cross_attentions,
decoder_hidden_states=decoder_hidden_states,
)
else:
return BeamSearchDecoderOnlyOutput(
sequences=sequence_outputs["sequences"],
sequences_scores=sequence_outputs["sequence_scores"],
scores=scores,
beam_indices=sequence_outputs["beam_indices"],
attentions=decoder_attentions,
hidden_states=decoder_hidden_states,
)
else:
return sequence_outputs["sequences"]
def constrained_beam_search(
self,
input_ids: torch.LongTensor,
constrained_beam_scorer: ConstrainedBeamSearchScorer,
logits_processor: Optional[LogitsProcessorList] = None,
stopping_criteria: Optional[StoppingCriteriaList] = None,
max_length: Optional[int] = None,
pad_token_id: Optional[int] = None,
eos_token_id: Optional[Union[int, List[int]]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
output_scores: Optional[bool] = None,
return_dict_in_generate: Optional[bool] = None,
synced_gpus: Optional[bool] = None,
**model_kwargs,
) -> Union[BeamSearchOutput, torch.LongTensor]:
r"""
Generates sequences of token ids for models with a language modeling head using **constrained beam search
decoding** and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models.
<Tip warning={true}>
In most cases, you do not need to call [`~generation.GenerationMixin.constrained_beam_search`] directly. Use
generate() instead. For an overview of generation strategies and code examples, check the [following
guide](./generation_strategies).
</Tip>
Parameters:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
The sequence used as a prompt for the generation.
constrained_beam_scorer (`ConstrainedBeamSearchScorer`):
A derived instance of [`BeamScorer`] that defines how beam hypotheses are constructed, stored and
sorted during generation, while satisfying a list of positive constraints. For more information, the
documentation of [`ConstrainedBeamSearchScorer`] should be read.
logits_processor (`LogitsProcessorList`, *optional*):
An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`]
used to modify the prediction scores of the language modeling head applied at each generation step.
stopping_criteria (`StoppingCriteriaList`, *optional*):
An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`]
used to tell if the generation loop should stop.
logits_warper (`LogitsProcessorList`, *optional*):
An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsWarper`] used
to warp the prediction score distribution of the language modeling head applied before multinomial
sampling at each generation step.
max_length (`int`, *optional*, defaults to 20):
**DEPRECATED**. Use `logits_processor` or `stopping_criteria` directly to cap the number of generated
tokens. The maximum length of the sequence to be generated.
pad_token_id (`int`, *optional*):
The id of the *padding* token.
eos_token_id (`Union[int, List[int]]`, *optional*):
The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
output_attentions (`bool`, *optional*, defaults to `False`):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more details.
output_hidden_states (`bool`, *optional*, defaults to `False`):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more details.
output_scores (`bool`, *optional*, defaults to `False`):
Whether or not to return the prediction scores. See `scores` under returned tensors for more details.
return_dict_in_generate (`bool`, *optional*, defaults to `False`):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
synced_gpus (`bool`, *optional*, defaults to `False`):
Whether to continue running the while loop until max_length (needed for ZeRO stage 3)
model_kwargs:
Additional model specific kwargs will be forwarded to the `forward` function of the model. If model is
an encoder-decoder model the kwargs should include `encoder_outputs`.
Return:
[`generation.BeamSearchDecoderOnlyOutput`], [`~generation.BeamSearchEncoderDecoderOutput`] or
`torch.LongTensor`: A `torch.LongTensor` containing the generated tokens (default behaviour) or a
[`~generation.BeamSearchDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and
`return_dict_in_generate=True` or a [`~generation.BeamSearchEncoderDecoderOutput`] if
`model.config.is_encoder_decoder=True`.
Examples:
```python
>>> from transformers import (
... AutoTokenizer,
... AutoModelForSeq2SeqLM,
... LogitsProcessorList,
... MinLengthLogitsProcessor,
... ConstrainedBeamSearchScorer,
... PhrasalConstraint,
... )
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("t5-base")
>>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base")
>>> encoder_input_str = "translate English to German: How old are you?"
>>> encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids
>>> # lets run beam search using 3 beams
>>> num_beams = 3
>>> # define decoder start token ids
>>> input_ids = torch.ones((num_beams, 1), device=model.device, dtype=torch.long)
>>> input_ids = input_ids * model.config.decoder_start_token_id
>>> # add encoder_outputs to model keyword arguments
>>> model_kwargs = {
... "encoder_outputs": model.get_encoder()(
... encoder_input_ids.repeat_interleave(num_beams, dim=0), return_dict=True
... )
... }
>>> constraint_str = "Sie"
>>> constraint_token_ids = tokenizer.encode(constraint_str)[:-1] # slice to remove eos token
>>> constraints = [PhrasalConstraint(token_ids=constraint_token_ids)]
>>> # instantiate beam scorer
>>> beam_scorer = ConstrainedBeamSearchScorer(
... batch_size=1, num_beams=num_beams, device=model.device, constraints=constraints
... )
>>> # instantiate logits processors
>>> logits_processor = LogitsProcessorList(
... [
... MinLengthLogitsProcessor(5, eos_token_id=model.config.eos_token_id),
... ]
... )
>>> outputs = model.constrained_beam_search(
... input_ids, beam_scorer, constraints=constraints, logits_processor=logits_processor, **model_kwargs
... )
>>> tokenizer.batch_decode(outputs, skip_special_tokens=True)
['Wie alt sind Sie?']
```"""
# init values
logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()
stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()
if max_length is not None:
warnings.warn(
"`max_length` is deprecated in this function, use"
" `stopping_criteria=StoppingCriteriaList(MaxLengthCriteria(max_length=max_length))` instead.",
UserWarning,
)
stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length)
if len(stopping_criteria) == 0:
warnings.warn("You don't have defined any stopping_criteria, this will likely loop forever", UserWarning)
pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id
eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id
if isinstance(eos_token_id, int):
eos_token_id = [eos_token_id]
output_scores = output_scores if output_scores is not None else self.generation_config.output_scores
output_attentions = (
output_attentions if output_attentions is not None else self.generation_config.output_attentions
)
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states
)
return_dict_in_generate = (
return_dict_in_generate
if return_dict_in_generate is not None
else self.generation_config.return_dict_in_generate
)
# init attention / hidden states / scores tuples
scores = () if (return_dict_in_generate and output_scores) else None
decoder_attentions = () if (return_dict_in_generate and output_attentions) else None
cross_attentions = () if (return_dict_in_generate and output_attentions) else None
decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None
# if model is an encoder-decoder, retrieve encoder attention weights and hidden states
if return_dict_in_generate and self.config.is_encoder_decoder:
encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None
encoder_hidden_states = (
model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None
)
batch_size = len(constrained_beam_scorer._beam_hyps)
num_beams = constrained_beam_scorer.num_beams
batch_beam_size, cur_len = input_ids.shape
if num_beams * batch_size != batch_beam_size:
raise ValueError(
f"Batch dimension of `input_ids` should be {num_beams * batch_size}, but is {batch_beam_size}."
)
# initialise score of first beam with 0 and the rest with -1e9. This makes sure that only tokens
# of the first beam are considered to avoid sampling the exact same tokens across all beams.
beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device)
beam_scores[:, 1:] = -1e9
beam_scores = beam_scores.view((batch_size * num_beams,))
this_peer_finished = False # used by synced_gpus only
while True:
if synced_gpus:
# Under synced_gpus the `forward` call must continue until all gpus complete their sequence.
# The following logic allows an early break if all peers finished generating their sequence
this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(input_ids.device)
# send 0.0 if we finished, 1.0 otherwise
dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM)
# did all peers finish? the reduced sum will be 0.0 then
if this_peer_finished_flag.item() == 0.0:
break
model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)
outputs = self(
**model_inputs,
return_dict=True,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
if synced_gpus and this_peer_finished:
cur_len = cur_len + 1
continue # don't waste resources running the code we don't need
next_token_logits = outputs.logits[:, -1, :]
# hack: adjust tokens for Marian. For Marian we have to make sure that the `pad_token_id`
# cannot be generated both before and after the `nn.functional.log_softmax` operation.
next_token_logits = self.adjust_logits_during_generation(next_token_logits, cur_len=cur_len)
next_token_scores = nn.functional.log_softmax(
next_token_logits, dim=-1
) # (batch_size * num_beams, vocab_size)
next_token_scores_processed = logits_processor(input_ids, next_token_scores)
next_token_scores = next_token_scores_processed + beam_scores[:, None].expand_as(next_token_scores)
scores_for_all_vocab = next_token_scores.clone()
# Store scores, attentions and hidden_states when required
if return_dict_in_generate:
if output_scores:
scores += (next_token_scores,)
if output_attentions:
decoder_attentions += (
(outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,)
)
if self.config.is_encoder_decoder:
cross_attentions += (outputs.cross_attentions,)
if output_hidden_states:
decoder_hidden_states += (
(outputs.decoder_hidden_states,)
if self.config.is_encoder_decoder
else (outputs.hidden_states,)
)
# reshape for beam search
vocab_size = next_token_scores.shape[-1]
next_token_scores = next_token_scores.view(batch_size, num_beams * vocab_size)
# Sample 2 next tokens for each beam (so we have some spare tokens and match output of beam search)
next_token_scores, next_tokens = torch.topk(
next_token_scores, 2 * num_beams, dim=1, largest=True, sorted=True
)
next_indices = (next_tokens / vocab_size).long()
next_tokens = next_tokens % vocab_size
# stateless
beam_outputs = constrained_beam_scorer.process(
input_ids,
next_token_scores,
next_tokens,
next_indices,
scores_for_all_vocab,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
)
beam_scores = beam_outputs["next_beam_scores"]
beam_next_tokens = beam_outputs["next_beam_tokens"]
beam_idx = beam_outputs["next_beam_indices"]
input_ids = torch.cat([input_ids[beam_idx, :], beam_next_tokens.unsqueeze(-1)], dim=-1)
model_kwargs = self._update_model_kwargs_for_generation(
outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder
)
if model_kwargs["past_key_values"] is not None:
model_kwargs["past_key_values"] = self._reorder_cache(model_kwargs["past_key_values"], beam_idx)
# increase cur_len
cur_len = cur_len + 1
if constrained_beam_scorer.is_done or stopping_criteria(input_ids, scores):
if not synced_gpus:
break
else:
this_peer_finished = True
sequence_outputs = constrained_beam_scorer.finalize(
input_ids,
beam_scores,
next_tokens,
next_indices,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
max_length=stopping_criteria.max_length,
)
if return_dict_in_generate:
if not output_scores:
sequence_outputs["sequence_scores"] = None
if self.config.is_encoder_decoder:
return BeamSearchEncoderDecoderOutput(
sequences=sequence_outputs["sequences"],
sequences_scores=sequence_outputs["sequence_scores"],
scores=scores,
encoder_attentions=encoder_attentions,
encoder_hidden_states=encoder_hidden_states,
decoder_attentions=decoder_attentions,
cross_attentions=cross_attentions,
decoder_hidden_states=decoder_hidden_states,
)
else:
return BeamSearchDecoderOnlyOutput(
sequences=sequence_outputs["sequences"],
sequences_scores=sequence_outputs["sequence_scores"],
scores=scores,
attentions=decoder_attentions,
hidden_states=decoder_hidden_states,
)
else:
return sequence_outputs["sequences"]
def top_k_top_p_filtering(
logits: torch.FloatTensor,
top_k: int = 0,
top_p: float = 1.0,
filter_value: float = -float("Inf"),
min_tokens_to_keep: int = 1,
) -> torch.FloatTensor:
"""
Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
Args:
logits: logits distribution shape (batch size, vocabulary size)
top_k (`int`, *optional*, defaults to 0):
If > 0, only keep the top k tokens with highest probability (top-k filtering)
top_p (`float`, *optional*, defaults to 1.0):
If < 1.0, only keep the top tokens with cumulative probability >= top_p (nucleus filtering). Nucleus
filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
min_tokens_to_keep (`int`, *optional*, defaults to 1):
Minimumber of tokens we keep per batch example in the output.
From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
"""
if top_k > 0:
logits = TopKLogitsWarper(top_k=top_k, filter_value=filter_value, min_tokens_to_keep=min_tokens_to_keep)(
None, logits
)
if 0 <= top_p <= 1.0:
logits = TopPLogitsWarper(top_p=top_p, filter_value=filter_value, min_tokens_to_keep=min_tokens_to_keep)(
None, logits
)
return logits
def _ranking_fast(
context_hidden: torch.FloatTensor,
next_hidden: torch.FloatTensor,
next_top_k_probs: torch.FloatTensor,
alpha: float,
beam_width: int,
) -> torch.FloatTensor:
"""
Reranks the top_k candidates based on a degeneration penalty (cosine similarity with previous tokens), as described
in the paper "A Contrastive Framework for Neural Text Generation". Returns the index of the best candidate for each
row in the batch.
"""
norm_context_hidden = context_hidden / context_hidden.norm(dim=2, keepdim=True)
norm_next_hidden = next_hidden / next_hidden.norm(dim=2, keepdim=True)
cosine_matrix = torch.matmul(norm_context_hidden, norm_next_hidden.transpose(1, 2)).squeeze(-1) # [B*K, S]
degeneration_penalty, _ = torch.max(cosine_matrix, dim=-1) # [B*K]
next_top_k_probs = next_top_k_probs.view(-1) # [B*K]
contrastive_score = (1.0 - alpha) * next_top_k_probs - alpha * degeneration_penalty
contrastive_score = torch.stack(torch.split(contrastive_score, beam_width)) # [B, K]
_, selected_idx = contrastive_score.max(dim=-1) # [B]
return selected_idx
|
233zzh/TitanDataOperationSystem | 20,528 | 代码/web代码/titanApp/src/main/resources/static/src/assets/extra-libs/flot.tooltip/js/jquery.flot.tooltip.js | /*
* jquery.flot.tooltip
*
* description: easy-to-use tooltips for Flot charts
* version: 0.8.5
* authors: Krzysztof Urbas @krzysu [myviews.pl],Evan Steinkerchner @Roundaround
* website: https://github.com/krzysu/flot.tooltip
*
* build on 2015-05-11
* released under MIT License, 2012
*/
(function ($) {
// plugin options, default values
var defaultOptions = {
tooltip: {
show: false,
cssClass: "flotTip",
content: "%s | X: %x | Y: %y",
// allowed templates are:
// %s -> series label,
// %c -> series color,
// %lx -> x axis label (requires flot-axislabels plugin https://github.com/markrcote/flot-axislabels),
// %ly -> y axis label (requires flot-axislabels plugin https://github.com/markrcote/flot-axislabels),
// %x -> X value,
// %y -> Y value,
// %x.2 -> precision of X value,
// %p -> percent
xDateFormat: null,
yDateFormat: null,
monthNames: null,
dayNames: null,
shifts: {
x: 10,
y: 20
},
defaultTheme: true,
lines: false,
// callbacks
onHover: function (flotItem, $tooltipEl) {},
$compat: false
}
};
// dummy default options object for legacy code (<0.8.5) - is deleted later
defaultOptions.tooltipOpts = defaultOptions.tooltip;
// object
var FlotTooltip = function (plot) {
// variables
this.tipPosition = {x: 0, y: 0};
this.init(plot);
};
// main plugin function
FlotTooltip.prototype.init = function (plot) {
var that = this;
// detect other flot plugins
var plotPluginsLength = $.plot.plugins.length;
this.plotPlugins = [];
if (plotPluginsLength) {
for (var p = 0; p < plotPluginsLength; p++) {
this.plotPlugins.push($.plot.plugins[p].name);
}
}
plot.hooks.bindEvents.push(function (plot, eventHolder) {
// get plot options
that.plotOptions = plot.getOptions();
// for legacy (<0.8.5) implementations
if (typeof(that.plotOptions.tooltip) === 'boolean') {
that.plotOptions.tooltipOpts.show = that.plotOptions.tooltip;
that.plotOptions.tooltip = that.plotOptions.tooltipOpts;
delete that.plotOptions.tooltipOpts;
}
// if not enabled return
if (that.plotOptions.tooltip.show === false || typeof that.plotOptions.tooltip.show === 'undefined') return;
// shortcut to access tooltip options
that.tooltipOptions = that.plotOptions.tooltip;
if (that.tooltipOptions.$compat) {
that.wfunc = 'width';
that.hfunc = 'height';
} else {
that.wfunc = 'innerWidth';
that.hfunc = 'innerHeight';
}
// create tooltip DOM element
var $tip = that.getDomElement();
// bind event
$( plot.getPlaceholder() ).bind("plothover", plothover);
$(eventHolder).bind('mousemove', mouseMove);
});
plot.hooks.shutdown.push(function (plot, eventHolder){
$(plot.getPlaceholder()).unbind("plothover", plothover);
$(eventHolder).unbind("mousemove", mouseMove);
});
function mouseMove(e){
var pos = {};
pos.x = e.pageX;
pos.y = e.pageY;
plot.setTooltipPosition(pos);
}
function plothover(event, pos, item) {
// Simple distance formula.
var lineDistance = function (p1x, p1y, p2x, p2y) {
return Math.sqrt((p2x - p1x) * (p2x - p1x) + (p2y - p1y) * (p2y - p1y));
};
// Here is some voodoo magic for determining the distance to a line form a given point {x, y}.
var dotLineLength = function (x, y, x0, y0, x1, y1, o) {
if (o && !(o =
function (x, y, x0, y0, x1, y1) {
if (typeof x0 !== 'undefined') return { x: x0, y: y };
else if (typeof y0 !== 'undefined') return { x: x, y: y0 };
var left,
tg = -1 / ((y1 - y0) / (x1 - x0));
return {
x: left = (x1 * (x * tg - y + y0) + x0 * (x * -tg + y - y1)) / (tg * (x1 - x0) + y0 - y1),
y: tg * left - tg * x + y
};
} (x, y, x0, y0, x1, y1),
o.x >= Math.min(x0, x1) && o.x <= Math.max(x0, x1) && o.y >= Math.min(y0, y1) && o.y <= Math.max(y0, y1))
) {
var l1 = lineDistance(x, y, x0, y0), l2 = lineDistance(x, y, x1, y1);
return l1 > l2 ? l2 : l1;
} else {
var a = y0 - y1, b = x1 - x0, c = x0 * y1 - y0 * x1;
return Math.abs(a * x + b * y + c) / Math.sqrt(a * a + b * b);
}
};
if (item) {
plot.showTooltip(item, pos);
} else if (that.plotOptions.series.lines.show && that.tooltipOptions.lines === true) {
var maxDistance = that.plotOptions.grid.mouseActiveRadius;
var closestTrace = {
distance: maxDistance + 1
};
$.each(plot.getData(), function (i, series) {
var xBeforeIndex = 0,
xAfterIndex = -1;
// Our search here assumes our data is sorted via the x-axis.
// TODO: Improve efficiency somehow - search smaller sets of data.
for (var j = 1; j < series.data.length; j++) {
if (series.data[j - 1][0] <= pos.x && series.data[j][0] >= pos.x) {
xBeforeIndex = j - 1;
xAfterIndex = j;
}
}
if (xAfterIndex === -1) {
plot.hideTooltip();
return;
}
var pointPrev = { x: series.data[xBeforeIndex][0], y: series.data[xBeforeIndex][1] },
pointNext = { x: series.data[xAfterIndex][0], y: series.data[xAfterIndex][1] };
var distToLine = dotLineLength(series.xaxis.p2c(pos.x), series.yaxis.p2c(pos.y), series.xaxis.p2c(pointPrev.x),
series.yaxis.p2c(pointPrev.y), series.xaxis.p2c(pointNext.x), series.yaxis.p2c(pointNext.y), false);
if (distToLine < closestTrace.distance) {
var closestIndex = lineDistance(pointPrev.x, pointPrev.y, pos.x, pos.y) <
lineDistance(pos.x, pos.y, pointNext.x, pointNext.y) ? xBeforeIndex : xAfterIndex;
var pointSize = series.datapoints.pointsize;
// Calculate the point on the line vertically closest to our cursor.
var pointOnLine = [
pos.x,
pointPrev.y + ((pointNext.y - pointPrev.y) * ((pos.x - pointPrev.x) / (pointNext.x - pointPrev.x)))
];
var item = {
datapoint: pointOnLine,
dataIndex: closestIndex,
series: series,
seriesIndex: i
};
closestTrace = {
distance: distToLine,
item: item
};
}
});
if (closestTrace.distance < maxDistance + 1)
plot.showTooltip(closestTrace.item, pos);
else
plot.hideTooltip();
} else {
plot.hideTooltip();
}
}
// Quick little function for setting the tooltip position.
plot.setTooltipPosition = function (pos) {
var $tip = that.getDomElement();
var totalTipWidth = $tip.outerWidth() + that.tooltipOptions.shifts.x;
var totalTipHeight = $tip.outerHeight() + that.tooltipOptions.shifts.y;
if ((pos.x - $(window).scrollLeft()) > ($(window)[that.wfunc]() - totalTipWidth)) {
pos.x -= totalTipWidth;
}
if ((pos.y - $(window).scrollTop()) > ($(window)[that.hfunc]() - totalTipHeight)) {
pos.y -= totalTipHeight;
}
that.tipPosition.x = pos.x;
that.tipPosition.y = pos.y;
};
// Quick little function for showing the tooltip.
plot.showTooltip = function (target, position) {
var $tip = that.getDomElement();
// convert tooltip content template to real tipText
var tipText = that.stringFormat(that.tooltipOptions.content, target);
if (tipText === '')
return;
$tip.html(tipText);
plot.setTooltipPosition({ x: position.pageX, y: position.pageY });
$tip.css({
left: that.tipPosition.x + that.tooltipOptions.shifts.x,
top: that.tipPosition.y + that.tooltipOptions.shifts.y
}).show();
// run callback
if (typeof that.tooltipOptions.onHover === 'function') {
that.tooltipOptions.onHover(target, $tip);
}
};
// Quick little function for hiding the tooltip.
plot.hideTooltip = function () {
that.getDomElement().hide().html('');
};
};
/**
* get or create tooltip DOM element
* @return jQuery object
*/
FlotTooltip.prototype.getDomElement = function () {
var $tip = $('.' + this.tooltipOptions.cssClass);
if( $tip.length === 0 ){
$tip = $('<div />').addClass(this.tooltipOptions.cssClass);
$tip.appendTo('body').hide().css({position: 'absolute'});
if(this.tooltipOptions.defaultTheme) {
$tip.css({
'background': '#fff',
'z-index': '1040',
'padding': '0.4em 0.6em',
'border-radius': '0.5em',
'font-size': '0.8em',
'border': '1px solid #111',
'display': 'none',
'white-space': 'nowrap'
});
}
}
return $tip;
};
/**
* core function, create tooltip content
* @param {string} content - template with tooltip content
* @param {object} item - Flot item
* @return {string} real tooltip content for current item
*/
FlotTooltip.prototype.stringFormat = function (content, item) {
var percentPattern = /%p\.{0,1}(\d{0,})/;
var seriesPattern = /%s/;
var colorPattern = /%c/;
var xLabelPattern = /%lx/; // requires flot-axislabels plugin https://github.com/markrcote/flot-axislabels, will be ignored if plugin isn't loaded
var yLabelPattern = /%ly/; // requires flot-axislabels plugin https://github.com/markrcote/flot-axislabels, will be ignored if plugin isn't loaded
var xPattern = /%x\.{0,1}(\d{0,})/;
var yPattern = /%y\.{0,1}(\d{0,})/;
var xPatternWithoutPrecision = "%x";
var yPatternWithoutPrecision = "%y";
var customTextPattern = "%ct";
var x, y, customText, p;
// for threshold plugin we need to read data from different place
if (typeof item.series.threshold !== "undefined") {
x = item.datapoint[0];
y = item.datapoint[1];
customText = item.datapoint[2];
} else if (typeof item.series.lines !== "undefined" && item.series.lines.steps) {
x = item.series.datapoints.points[item.dataIndex * 2];
y = item.series.datapoints.points[item.dataIndex * 2 + 1];
// TODO: where to find custom text in this variant?
customText = "";
} else {
x = item.series.data[item.dataIndex][0];
y = item.series.data[item.dataIndex][1];
customText = item.series.data[item.dataIndex][2];
}
// I think this is only in case of threshold plugin
if (item.series.label === null && item.series.originSeries) {
item.series.label = item.series.originSeries.label;
}
// if it is a function callback get the content string
if (typeof(content) === 'function') {
content = content(item.series.label, x, y, item);
}
// the case where the passed content is equal to false
if (typeof(content) === 'boolean' && !content) {
return '';
}
// percent match for pie charts and stacked percent
if (typeof (item.series.percent) !== 'undefined') {
p = item.series.percent;
} else if (typeof (item.series.percents) !== 'undefined') {
p = item.series.percents[item.dataIndex];
}
if (typeof p === 'number') {
content = this.adjustValPrecision(percentPattern, content, p);
}
// series match
if (typeof(item.series.label) !== 'undefined') {
content = content.replace(seriesPattern, item.series.label);
} else {
//remove %s if label is undefined
content = content.replace(seriesPattern, "");
}
// color match
if (typeof(item.series.color) !== 'undefined') {
content = content.replace(colorPattern, item.series.color);
} else {
//remove %s if color is undefined
content = content.replace(colorPattern, "");
}
// x axis label match
if (this.hasAxisLabel('xaxis', item)) {
content = content.replace(xLabelPattern, item.series.xaxis.options.axisLabel);
} else {
//remove %lx if axis label is undefined or axislabels plugin not present
content = content.replace(xLabelPattern, "");
}
// y axis label match
if (this.hasAxisLabel('yaxis', item)) {
content = content.replace(yLabelPattern, item.series.yaxis.options.axisLabel);
} else {
//remove %ly if axis label is undefined or axislabels plugin not present
content = content.replace(yLabelPattern, "");
}
// time mode axes with custom dateFormat
if (this.isTimeMode('xaxis', item) && this.isXDateFormat(item)) {
content = content.replace(xPattern, this.timestampToDate(x, this.tooltipOptions.xDateFormat, item.series.xaxis.options));
}
if (this.isTimeMode('yaxis', item) && this.isYDateFormat(item)) {
content = content.replace(yPattern, this.timestampToDate(y, this.tooltipOptions.yDateFormat, item.series.yaxis.options));
}
// set precision if defined
if (typeof x === 'number') {
content = this.adjustValPrecision(xPattern, content, x);
}
if (typeof y === 'number') {
content = this.adjustValPrecision(yPattern, content, y);
}
// change x from number to given label, if given
if (typeof item.series.xaxis.ticks !== 'undefined') {
var ticks;
if (this.hasRotatedXAxisTicks(item)) {
// xaxis.ticks will be an empty array if tickRotor is being used, but the values are available in rotatedTicks
ticks = 'rotatedTicks';
} else {
ticks = 'ticks';
}
// see https://github.com/krzysu/flot.tooltip/issues/65
var tickIndex = item.dataIndex + item.seriesIndex;
for (var index in item.series.xaxis[ticks]) {
if (item.series.xaxis[ticks].hasOwnProperty(tickIndex) && !this.isTimeMode('xaxis', item)) {
var valueX = (this.isCategoriesMode('xaxis', item)) ? item.series.xaxis[ticks][tickIndex].label : item.series.xaxis[ticks][tickIndex].v;
if (valueX === x) {
content = content.replace(xPattern, item.series.xaxis[ticks][tickIndex].label);
}
}
}
}
// change y from number to given label, if given
if (typeof item.series.yaxis.ticks !== 'undefined') {
for (var index in item.series.yaxis.ticks) {
if (item.series.yaxis.ticks.hasOwnProperty(index)) {
var valueY = (this.isCategoriesMode('yaxis', item)) ? item.series.yaxis.ticks[index].label : item.series.yaxis.ticks[index].v;
if (valueY === y) {
content = content.replace(yPattern, item.series.yaxis.ticks[index].label);
}
}
}
}
// if no value customization, use tickFormatter by default
if (typeof item.series.xaxis.tickFormatter !== 'undefined') {
//escape dollar
content = content.replace(xPatternWithoutPrecision, item.series.xaxis.tickFormatter(x, item.series.xaxis).replace(/\$/g, '$$'));
}
if (typeof item.series.yaxis.tickFormatter !== 'undefined') {
//escape dollar
content = content.replace(yPatternWithoutPrecision, item.series.yaxis.tickFormatter(y, item.series.yaxis).replace(/\$/g, '$$'));
}
if (customText)
content = content.replace(customTextPattern, customText);
return content;
};
// helpers just for readability
FlotTooltip.prototype.isTimeMode = function (axisName, item) {
return (typeof item.series[axisName].options.mode !== 'undefined' && item.series[axisName].options.mode === 'time');
};
FlotTooltip.prototype.isXDateFormat = function (item) {
return (typeof this.tooltipOptions.xDateFormat !== 'undefined' && this.tooltipOptions.xDateFormat !== null);
};
FlotTooltip.prototype.isYDateFormat = function (item) {
return (typeof this.tooltipOptions.yDateFormat !== 'undefined' && this.tooltipOptions.yDateFormat !== null);
};
FlotTooltip.prototype.isCategoriesMode = function (axisName, item) {
return (typeof item.series[axisName].options.mode !== 'undefined' && item.series[axisName].options.mode === 'categories');
};
//
FlotTooltip.prototype.timestampToDate = function (tmst, dateFormat, options) {
var theDate = $.plot.dateGenerator(tmst, options);
return $.plot.formatDate(theDate, dateFormat, this.tooltipOptions.monthNames, this.tooltipOptions.dayNames);
};
//
FlotTooltip.prototype.adjustValPrecision = function (pattern, content, value) {
var precision;
var matchResult = content.match(pattern);
if( matchResult !== null ) {
if(RegExp.$1 !== '') {
precision = RegExp.$1;
value = value.toFixed(precision);
// only replace content if precision exists, in other case use thickformater
content = content.replace(pattern, value);
}
}
return content;
};
// other plugins detection below
// check if flot-axislabels plugin (https://github.com/markrcote/flot-axislabels) is used and that an axis label is given
FlotTooltip.prototype.hasAxisLabel = function (axisName, item) {
return ($.inArray(this.plotPlugins, 'axisLabels') !== -1 && typeof item.series[axisName].options.axisLabel !== 'undefined' && item.series[axisName].options.axisLabel.length > 0);
};
// check whether flot-tickRotor, a plugin which allows rotation of X-axis ticks, is being used
FlotTooltip.prototype.hasRotatedXAxisTicks = function (item) {
return ($.inArray(this.plotPlugins, 'tickRotor') !== -1 && typeof item.series.xaxis.rotatedTicks !== 'undefined');
};
//
var init = function (plot) {
new FlotTooltip(plot);
};
// define Flot plugin
$.plot.plugins.push({
init: init,
options: defaultOptions,
name: 'tooltip',
version: '0.8.5'
});
})(jQuery);
|
27182812/ChatGLM-LLaMA-chinese-insturct | 44,591 | src/transformers/generation/beam_search.py | # coding=utf-8
# Copyright 2020 The HuggingFace Inc. team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from collections import UserDict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..utils import add_start_docstrings
from .beam_constraints import Constraint, ConstraintListState
PROCESS_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size * num_beams, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using any class inheriting from [`PreTrainedTokenizer`]. See
[`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
next_scores (`torch.FloatTensor` of shape `(batch_size, 2 * num_beams)`):
Current scores of the top `2 * num_beams` non-finished beam hypotheses.
next_tokens (`torch.LongTensor` of shape `(batch_size, 2 * num_beams)`):
`input_ids` of the tokens corresponding to the top `2 * num_beams` non-finished beam hypotheses.
next_indices (`torch.LongTensor` of shape `(batch_size, 2 * num_beams)`):
Beam indices indicating to which beam hypothesis the `next_tokens` correspond.
pad_token_id (`int`, *optional*):
The id of the *padding* token.
eos_token_id (`Union[int, List[int]]`, *optional*):
The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
Return:
`UserDict`: A dictionary composed of the fields as defined above:
- **next_beam_scores** (`torch.FloatTensor` of shape `(batch_size * num_beams)`) -- Updated scores of all
non-finished beams.
- **next_beam_tokens** (`torch.FloatTensor` of shape `(batch_size * num_beams)`) -- Next tokens to be added
to the non-finished beam_hypotheses.
- **next_beam_indices** (`torch.FloatTensor` of shape `(batch_size * num_beams)`) -- Beam indices
indicating to which beam the next tokens shall be added.
"""
FINALIZE_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size * num_beams, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using any class inheriting from [`PreTrainedTokenizer`]. See
[`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
final_beam_scores (`torch.FloatTensor` of shape `(batch_size * num_beams)`):
The final scores of all non-finished beams.
final_beam_tokens (`torch.FloatTensor` of shape `(batch_size * num_beams)`):
The last tokens to be added to the non-finished beam_hypotheses.
final_beam_indices (`torch.FloatTensor` of shape `(batch_size * num_beams)`):
The beam indices indicating to which beam the `final_beam_tokens` shall be added.
pad_token_id (`int`, *optional*):
The id of the *padding* token.
eos_token_id (`Union[int, List[int]]`, *optional*):
The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
Return:
`torch.LongTensor` of shape `(batch_size * num_return_sequences, sequence_length)`: The generated sequences.
The second dimension (sequence_length) is either equal to `max_length` or shorter if all batches finished early
due to the `eos_token_id`.
"""
class BeamScorer(ABC):
"""
Abstract base class for all beam scorers that are used for [`~PreTrainedModel.beam_search`] and
[`~PreTrainedModel.beam_sample`].
"""
@abstractmethod
@add_start_docstrings(PROCESS_INPUTS_DOCSTRING)
def process(
self,
input_ids: torch.LongTensor,
next_scores: torch.FloatTensor,
next_tokens: torch.LongTensor,
next_indices: torch.LongTensor,
**kwargs,
) -> Tuple[torch.Tensor]:
raise NotImplementedError("This is an abstract method.")
@abstractmethod
@add_start_docstrings(FINALIZE_INPUTS_DOCSTRING)
def finalize(
self,
input_ids: torch.LongTensor,
next_scores: torch.FloatTensor,
next_tokens: torch.LongTensor,
next_indices: torch.LongTensor,
max_length: int,
**kwargs,
) -> torch.LongTensor:
raise NotImplementedError("This is an abstract method.")
class BeamSearchScorer(BeamScorer):
r"""
[`BeamScorer`] implementing standard beam search decoding.
Adapted in part from [Facebook's XLM beam search
code](https://github.com/facebookresearch/XLM/blob/9e6f6814d17be4fe5b15f2e6c43eb2b2d76daeb4/src/model/transformer.py#L529).
Reference for the diverse beam search algorithm and implementation [Ashwin Kalyan's DBS
implementation](https://github.com/ashwinkalyan/dbs/blob/master/dbs/beam_utils.lua)
Args:
batch_size (`int`):
Batch Size of `input_ids` for which standard beam search decoding is run in parallel.
num_beams (`int`):
Number of beams for beam search.
device (`torch.device`):
Defines the device type (*e.g.*, `"cpu"` or `"cuda"`) on which this instance of `BeamSearchScorer` will be
allocated.
length_penalty (`float`, *optional*, defaults to 1.0):
Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent to
the sequence length, which in turn is used to divide the score of the sequence. Since the score is the log
likelihood of the sequence (i.e. negative), `length_penalty` > 0.0 promotes longer sequences, while
`length_penalty` < 0.0 encourages shorter sequences.
do_early_stopping (`bool` or `str`, *optional*, defaults to `False`):
Controls the stopping condition for beam-based methods, like beam-search. It accepts the following values:
`True`, where the generation stops as soon as there are `num_beams` complete candidates; `False`, where an
heuristic is applied and the generation stops when is it very unlikely to find better candidates;
`"never"`, where the beam search procedure only stops when there cannot be better candidates (canonical
beam search algorithm).
num_beam_hyps_to_keep (`int`, *optional*, defaults to 1):
The number of beam hypotheses that shall be returned upon calling
[`~transformer.BeamSearchScorer.finalize`].
num_beam_groups (`int`):
Number of groups to divide `num_beams` into in order to ensure diversity among different groups of beams.
See [this paper](https://arxiv.org/pdf/1610.02424.pdf) for more details.
max_length (`int`, *optional*):
The maximum length of the sequence to be generated.
"""
def __init__(
self,
batch_size: int,
num_beams: int,
device: torch.device,
length_penalty: Optional[float] = 1.0,
do_early_stopping: Optional[Union[bool, str]] = False,
num_beam_hyps_to_keep: Optional[int] = 1,
num_beam_groups: Optional[int] = 1,
max_length: Optional[int] = None,
):
self.num_beams = num_beams
self.device = device
self.length_penalty = length_penalty
self.do_early_stopping = do_early_stopping
self.num_beam_hyps_to_keep = num_beam_hyps_to_keep
self.num_beam_groups = num_beam_groups
self.group_size = self.num_beams // self.num_beam_groups
self._is_init = False
self._beam_hyps = [
BeamHypotheses(
num_beams=self.num_beams,
length_penalty=self.length_penalty,
early_stopping=self.do_early_stopping,
max_length=max_length,
)
for _ in range(batch_size)
]
self._done = torch.tensor([False for _ in range(batch_size)], dtype=torch.bool, device=self.device)
if not isinstance(num_beams, int) or num_beams <= 1:
raise ValueError(
f"`num_beams` has to be an integer strictly greater than 1, but is {num_beams}. For `num_beams` == 1,"
" one should make use of `greedy_search` instead."
)
if not isinstance(num_beam_groups, int) or (num_beam_groups > num_beams) or (num_beams % num_beam_groups != 0):
raise ValueError(
"`num_beam_groups` has to be an integer smaller or equal than `num_beams` and `num_beams` has to be"
f" divisible by `num_beam_groups`, but is {num_beam_groups} with `num_beams` being {num_beams}."
)
@property
def is_done(self) -> bool:
return self._done.all()
def process(
self,
input_ids: torch.LongTensor,
next_scores: torch.FloatTensor,
next_tokens: torch.LongTensor,
next_indices: torch.LongTensor,
pad_token_id: Optional[int] = None,
eos_token_id: Optional[Union[int, List[int]]] = None,
beam_indices: Optional[torch.LongTensor] = None,
) -> Tuple[torch.Tensor]:
cur_len = input_ids.shape[-1]
batch_size = len(self._beam_hyps)
if not (batch_size == (input_ids.shape[0] // self.group_size)):
if self.num_beam_groups > 1:
raise ValueError(
f"A group beam size of {input_ids.shape[0]} is used as the input, but a group beam "
f"size of {self.group_size} is expected by the beam scorer."
)
else:
raise ValueError(
f"A beam size of {input_ids.shape[0]} is used as the input, but a beam size of "
f"{self.group_size} is expected by the beam scorer."
)
device = input_ids.device
next_beam_scores = torch.zeros((batch_size, self.group_size), dtype=next_scores.dtype, device=device)
next_beam_tokens = torch.zeros((batch_size, self.group_size), dtype=next_tokens.dtype, device=device)
next_beam_indices = torch.zeros((batch_size, self.group_size), dtype=next_indices.dtype, device=device)
if isinstance(eos_token_id, int):
eos_token_id = [eos_token_id]
for batch_idx, beam_hyp in enumerate(self._beam_hyps):
if self._done[batch_idx]:
if self.num_beams < len(beam_hyp):
raise ValueError(f"Batch can only be done if at least {self.num_beams} beams have been generated")
if eos_token_id is None or pad_token_id is None:
raise ValueError("Generated beams >= num_beams -> eos_token_id and pad_token have to be defined")
# pad the batch
next_beam_scores[batch_idx, :] = 0
next_beam_tokens[batch_idx, :] = pad_token_id
next_beam_indices[batch_idx, :] = 0
continue
# next tokens for this sentence
beam_idx = 0
for beam_token_rank, (next_token, next_score, next_index) in enumerate(
zip(next_tokens[batch_idx], next_scores[batch_idx], next_indices[batch_idx])
):
batch_beam_idx = batch_idx * self.group_size + next_index
# add to generated hypotheses if end of sentence
if (eos_token_id is not None) and (next_token.item() in eos_token_id):
# if beam_token does not belong to top num_beams tokens, it should not be added
is_beam_token_worse_than_top_num_beams = beam_token_rank >= self.group_size
if is_beam_token_worse_than_top_num_beams:
continue
if beam_indices is not None:
beam_index = beam_indices[batch_beam_idx]
beam_index = beam_index + (batch_beam_idx,)
else:
beam_index = None
beam_hyp.add(
input_ids[batch_beam_idx].clone(),
next_score.item(),
beam_indices=beam_index,
)
else:
# add next predicted token since it is not eos_token
next_beam_scores[batch_idx, beam_idx] = next_score
next_beam_tokens[batch_idx, beam_idx] = next_token
next_beam_indices[batch_idx, beam_idx] = batch_beam_idx
beam_idx += 1
# once the beam for next step is full, don't add more tokens to it.
if beam_idx == self.group_size:
break
if beam_idx < self.group_size:
raise ValueError(
f"At most {self.group_size} tokens in {next_tokens[batch_idx]} can be equal to `eos_token_id:"
f" {eos_token_id}`. Make sure {next_tokens[batch_idx]} are corrected."
)
# Check if we are done so that we can save a pad step if all(done)
self._done[batch_idx] = self._done[batch_idx] or beam_hyp.is_done(
next_scores[batch_idx].max().item(), cur_len
)
return UserDict(
{
"next_beam_scores": next_beam_scores.view(-1),
"next_beam_tokens": next_beam_tokens.view(-1),
"next_beam_indices": next_beam_indices.view(-1),
}
)
def finalize(
self,
input_ids: torch.LongTensor,
final_beam_scores: torch.FloatTensor,
final_beam_tokens: torch.LongTensor,
final_beam_indices: torch.LongTensor,
max_length: int,
pad_token_id: Optional[int] = None,
eos_token_id: Optional[Union[int, List[int]]] = None,
beam_indices: Optional[torch.LongTensor] = None,
) -> Tuple[torch.LongTensor]:
batch_size = len(self._beam_hyps)
if isinstance(eos_token_id, int):
eos_token_id = [eos_token_id]
# finalize all open beam hypotheses and add to generated hypotheses
for batch_idx, beam_hyp in enumerate(self._beam_hyps):
if self._done[batch_idx]:
continue
# all open beam hypotheses are added to the beam hypothesis
# beam hypothesis class automatically keeps the best beams
for beam_id in range(self.num_beams):
batch_beam_idx = batch_idx * self.num_beams + beam_id
final_score = final_beam_scores[batch_beam_idx].item()
final_tokens = input_ids[batch_beam_idx]
beam_index = beam_indices[batch_beam_idx] if beam_indices is not None else None
beam_hyp.add(final_tokens, final_score, beam_indices=beam_index)
# select the best hypotheses
sent_lengths = input_ids.new(batch_size * self.num_beam_hyps_to_keep)
best = []
best_indices = []
best_scores = torch.zeros(batch_size * self.num_beam_hyps_to_keep, device=self.device, dtype=torch.float32)
# retrieve best hypotheses
for i, beam_hyp in enumerate(self._beam_hyps):
sorted_hyps = sorted(beam_hyp.beams, key=lambda x: x[0])
for j in range(self.num_beam_hyps_to_keep):
best_hyp_tuple = sorted_hyps.pop()
best_score = best_hyp_tuple[0]
best_hyp = best_hyp_tuple[1]
best_index = best_hyp_tuple[2]
sent_lengths[self.num_beam_hyps_to_keep * i + j] = len(best_hyp)
# append hyp to lists
best.append(best_hyp)
# append indices to list
best_indices.append(best_index)
best_scores[i * self.num_beam_hyps_to_keep + j] = best_score
# prepare for adding eos
sent_lengths_max = sent_lengths.max().item() + 1
sent_max_len = min(sent_lengths_max, max_length) if max_length is not None else sent_lengths_max
decoded: torch.LongTensor = input_ids.new(batch_size * self.num_beam_hyps_to_keep, sent_max_len)
if len(best_indices) > 0 and best_indices[0] is not None:
indices: torch.LongTensor = input_ids.new(batch_size * self.num_beam_hyps_to_keep, sent_max_len)
else:
indices = None
# shorter batches are padded if needed
if sent_lengths.min().item() != sent_lengths.max().item():
assert pad_token_id is not None, "`pad_token_id` has to be defined"
decoded.fill_(pad_token_id)
if indices is not None:
indices.fill_(-1)
# fill with hypotheses and eos_token_id if the latter fits in
for i, (hypo, best_idx) in enumerate(zip(best, best_indices)):
decoded[i, : sent_lengths[i]] = hypo
if indices is not None:
indices[i, : len(best_idx)] = torch.tensor(best_idx)
if sent_lengths[i] < sent_max_len:
# inserting only the first eos_token_id
decoded[i, sent_lengths[i]] = eos_token_id[0]
return UserDict(
{
"sequences": decoded,
"sequence_scores": best_scores,
"beam_indices": indices,
}
)
class ConstrainedBeamSearchScorer(BeamScorer):
r"""
[`BeamScorer`] implementing constrained beam search decoding.
Args:
batch_size (`int`):
Batch Size of `input_ids` for which standard beam search decoding is run in parallel.
num_beams (`int`):
Number of beams for beam search.
constraints (`List[Constraint]`):
A list of positive constraints represented as `Constraint` objects that must be fulfilled in the generation
output. For more information, the documentation of [`Constraint`] should be read.
device (`torch.device`):
Defines the device type (*e.g.*, `"cpu"` or `"cuda"`) on which this instance of `BeamSearchScorer` will be
allocated.
length_penalty (`float`, *optional*, defaults to 1.0):
Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent to
the sequence length, which in turn is used to divide the score of the sequence. Since the score is the log
likelihood of the sequence (i.e. negative), `length_penalty` > 0.0 promotes longer sequences, while
`length_penalty` < 0.0 encourages shorter sequences.
do_early_stopping (`bool` or `str`, *optional*, defaults to `False`):
Controls the stopping condition for beam-based methods, like beam-search. It accepts the following values:
`True`, where the generation stops as soon as there are `num_beams` complete candidates; `False`, where an
heuristic is applied and the generation stops when is it very unlikely to find better candidates;
`"never"`, where the beam search procedure only stops when there cannot be better candidates (canonical
beam search algorithm).
num_beam_hyps_to_keep (`int`, *optional*, defaults to 1):
The number of beam hypotheses that shall be returned upon calling
[`~transformer.BeamSearchScorer.finalize`].
num_beam_groups (`int`):
Number of groups to divide `num_beams` into in order to ensure diversity among different groups of beams.
See [this paper](https://arxiv.org/pdf/1610.02424.pdf) for more details.
max_length (`int`, *optional*):
The maximum length of the sequence to be generated.
"""
def __init__(
self,
batch_size: int,
num_beams: int,
constraints: List[Constraint],
device: torch.device,
length_penalty: Optional[float] = 1.0,
do_early_stopping: Optional[Union[bool, str]] = False,
num_beam_hyps_to_keep: Optional[int] = 1,
num_beam_groups: Optional[int] = 1,
max_length: Optional[int] = None,
):
self.num_beams = num_beams
self.device = device
self.length_penalty = length_penalty
self.do_early_stopping = do_early_stopping
self.num_beam_hyps_to_keep = num_beam_hyps_to_keep
self.num_beam_groups = num_beam_groups
self.group_size = self.num_beams // self.num_beam_groups
self.constraints = constraints
self._is_init = False
self._beam_hyps = [
BeamHypotheses(
num_beams=self.num_beams,
length_penalty=self.length_penalty,
early_stopping=self.do_early_stopping,
max_length=max_length,
)
for _ in range(batch_size)
]
self._done = torch.tensor([False for _ in range(batch_size)], dtype=torch.bool, device=self.device)
if not isinstance(num_beams, int) or num_beams <= 1:
raise ValueError(
f"`num_beams` has to be an integer strictly greater than 1, but is {num_beams}. For `num_beams` == 1,"
" one should make use of `greedy_search` instead."
)
if not isinstance(num_beam_groups, int) or (num_beam_groups > num_beams) or (num_beams % num_beam_groups != 0):
raise ValueError(
"`num_beam_groups` has to be an integer smaller or equal than `num_beams` and `num_beams` has to be"
f" divisible by `num_beam_groups`, but is {num_beam_groups} with `num_beams` being {num_beams}."
)
@property
def is_done(self) -> bool:
return self._done.all()
def make_constraint_states(self, n):
return [ConstraintListState([constraint.copy() for constraint in self.constraints]) for _ in range(n)]
def check_completes_constraints(self, sequence):
new_state = self.make_constraint_states(1)[0]
new_state.reset(sequence)
return new_state.completed
def process(
self,
input_ids: torch.LongTensor,
next_scores: torch.FloatTensor,
next_tokens: torch.LongTensor,
next_indices: torch.LongTensor,
scores_for_all_vocab: torch.FloatTensor,
pad_token_id: Optional[int] = None,
eos_token_id: Optional[Union[int, List[int]]] = None,
) -> Tuple[torch.Tensor]:
r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size * num_beams, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using any class inheriting from [`PreTrainedTokenizer`]. See
[`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
next_scores (`torch.FloatTensor` of shape `(batch_size, 2 * num_beams)`):
Current scores of the top `2 * num_beams` non-finished beam hypotheses.
next_tokens (`torch.LongTensor` of shape `(batch_size, 2 * num_beams)`):
`input_ids` of the tokens corresponding to the top `2 * num_beams` non-finished beam hypotheses.
next_indices (`torch.LongTensor` of shape `(batch_size, 2 * num_beams)`):
Beam indices indicating to which beam hypothesis the `next_tokens` correspond.
scores_for_all_vocab (`torch.FloatTensor` of shape `(batch_size * num_beams, sequence_length)`):
The scores of all tokens in the vocabulary for each of the beam hypotheses.
pad_token_id (`int`, *optional*):
The id of the *padding* token.
eos_token_id (`Union[int, List[int]]`, *optional*):
The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
Return:
`UserDict`: A dictionary composed of the fields as defined above:
- **next_beam_scores** (`torch.FloatTensor` of shape `(batch_size * num_beams)`) -- Updated scores of
all
non-finished beams.
- **next_beam_tokens** (`torch.FloatTensor` of shape `(batch_size * num_beams)`) -- Next tokens to be
added
to the non-finished beam_hypotheses.
- **next_beam_indices** (`torch.FloatTensor` of shape `(batch_size * num_beams)`) -- Beam indices
indicating to which beam the next tokens shall be added.
"""
cur_len = input_ids.shape[-1]
batch_size = len(self._beam_hyps)
if not (batch_size == (input_ids.shape[0] // self.group_size)):
if self.num_beam_groups > 1:
raise ValueError(
f"A group beam size of {input_ids.shape[0]} is used as the input, but a group beam "
f"size of {self.group_size} is expected by the beam scorer."
)
else:
raise ValueError(
f"A beam size of {input_ids.shape[0]} is used as the input, but a beam size of "
f"{self.group_size} is expected by the beam scorer."
)
device = input_ids.device
next_beam_scores = torch.zeros((batch_size, self.group_size), dtype=next_scores.dtype, device=device)
next_beam_tokens = torch.zeros((batch_size, self.group_size), dtype=next_tokens.dtype, device=device)
next_beam_indices = torch.zeros((batch_size, self.group_size), dtype=next_indices.dtype, device=device)
if isinstance(eos_token_id, int):
eos_token_id = [eos_token_id]
for batch_idx, beam_hyp in enumerate(self._beam_hyps):
if self._done[batch_idx]:
if self.num_beams < len(beam_hyp):
raise ValueError(f"Batch can only be done if at least {self.num_beams} beams have been generated")
if eos_token_id is None or pad_token_id is None:
raise ValueError("Generated beams >= num_beams -> eos_token_id and pad_token have to be defined")
# pad the batch
next_beam_scores[batch_idx, :] = 0
next_beam_tokens[batch_idx, :] = pad_token_id
next_beam_indices[batch_idx, :] = 0
continue
# next tokens for this sentence.
beam_idx = 0
for beam_token_rank, (next_token, next_score, next_index) in enumerate(
zip(next_tokens[batch_idx], next_scores[batch_idx], next_indices[batch_idx])
):
batch_beam_idx = batch_idx * self.group_size + next_index
# add to generated hypotheses if end of sentence
if (eos_token_id is not None) and (next_token.item() in eos_token_id):
# if beam_token does not belong to top num_beams tokens, it should not be added
is_beam_token_worse_than_top_num_beams = beam_token_rank >= self.group_size
if is_beam_token_worse_than_top_num_beams:
continue
completes_constraint = self.check_completes_constraints(input_ids[batch_beam_idx].cpu().tolist())
if completes_constraint:
beam_hyp.add(
input_ids[batch_beam_idx].clone(),
next_score.item(),
)
else:
# add next predicted token since it is not eos_token
next_beam_scores[batch_idx, beam_idx] = next_score
next_beam_tokens[batch_idx, beam_idx] = next_token
next_beam_indices[batch_idx, beam_idx] = batch_beam_idx
beam_idx += 1
# once the beam for next step is full, don't add more tokens to it.
if beam_idx == self.group_size:
break
new_scores, new_tokens, new_indices = self.step_sentence_constraint(
batch_idx,
input_ids,
scores_for_all_vocab,
next_beam_scores[batch_idx],
next_beam_tokens[batch_idx],
next_beam_indices[batch_idx],
)
next_beam_scores[batch_idx] = new_scores
next_beam_tokens[batch_idx] = new_tokens
next_beam_indices[batch_idx] = new_indices
if beam_idx < self.group_size:
raise ValueError(
f"At most {self.group_size} tokens in {next_tokens[batch_idx]} can be equal to `eos_token_id:"
f" {eos_token_id}`. Make sure {next_tokens[batch_idx]} are corrected."
)
# Check if we are done so that we can save a pad step if all(done)
self._done[batch_idx] = self._done[batch_idx] or beam_hyp.is_done(
next_scores[batch_idx].max().item(), cur_len
)
return UserDict(
{
"next_beam_scores": next_beam_scores.view(-1),
"next_beam_tokens": next_beam_tokens.view(-1),
"next_beam_indices": next_beam_indices.view(-1),
}
)
def step_sentence_constraint(
self,
batch_idx: int,
input_ids: torch.LongTensor,
vocab_scores: torch.FloatTensor,
sent_beam_scores: torch.FloatTensor,
sent_beam_tokens: torch.LongTensor,
sent_beam_indices: torch.LongTensor,
push_progress: bool = False,
):
# sent_beam_tokens are the next {num_beams} number of tokens that are under consideration for this beam
# (candidate next tokens)
# 1. Adding "advance_tokens"
# using ConstraintStateList.advance(), we propose new tokens to be added into this "candidate list" that will
# advance us in fulfilling the constraints.
# 2. Selecting best candidates such that we end up with highest probable candidates
# that fulfill our constraints.
orig_len = sent_beam_indices.size(0)
device = sent_beam_indices.device
# initialize states
topk_contraint_states = self.make_constraint_states(orig_len)
advance_constraint_states = self.make_constraint_states(orig_len)
sidx, eidx = batch_idx * orig_len, (batch_idx + 1) * orig_len
this_batch_input_ids = input_ids[sidx:eidx]
this_batch_token_scores = vocab_scores[sidx:eidx]
full_hypotheses = torch.cat((input_ids[sent_beam_indices], sent_beam_tokens.unsqueeze(-1)), dim=-1)
# need to make new hypothesis that advance the constraints
track_new = {
"new_seqs": full_hypotheses.tolist(),
"new_states": [],
"new_indices": [],
"new_tokens": [],
"new_scores": [],
}
for seq_idx, pre_seq in enumerate(this_batch_input_ids):
# pre_seq = ith sequence generated before this step.
# input_ids -> (topk) generic beam search best model next tokens
# -> (advance) constraints forcing the next token
# either way, we need to sort them into "banks" later, so store a "ConstraintListState" for all types of
# hypotheses.
topk_state = topk_contraint_states[seq_idx]
topk_state.reset(full_hypotheses[seq_idx].cpu().tolist())
advance_state = advance_constraint_states[seq_idx]
advance_state.reset(pre_seq.cpu().tolist())
if not advance_state.completed:
advance_tokens = torch.LongTensor(advance_state.advance()).to(device)
for advance_token in advance_tokens:
# since adding each `advance_token` leads to a different hypothesis, create new state instance.
new_state = advance_state.copy(stateful=True)
new_state.add(advance_token.cpu().tolist())
advance_seq = torch.cat((pre_seq, advance_token.unsqueeze(0)), -1).cpu().tolist()
if advance_seq not in track_new["new_seqs"]:
# prevent duplicates, which are basically bound to happen in this process.
track_new["new_seqs"].append(advance_seq)
track_new["new_indices"].append(sidx + seq_idx) # idx -> global idx across all the batches
track_new["new_tokens"].append(advance_token)
track_new["new_scores"].append(this_batch_token_scores[seq_idx].take(advance_token))
track_new["new_states"].append(new_state)
elif push_progress:
# Basically, `sent_beam_indices` often chooses very little among `input_ids` the generated sequences that
# actually fulfill our constraints. For example, let constraints == ["loves pies"] and
# pre_seq_1 = "The child loves pies and" pre_seq_2 = "The child plays in the playground and"
# Without this step, if `sent_beam_indices` is something like [1,1], then
# 1. `pre_seq_1` won't be added to the list of (topk) hypothesis since it's not in the indices and
# 2. it won't be added to the list of (advance) hypothesis since it's completed already. (this is
# the else part of `if constraints_completed[seq_idx]`)
# 3. it ends up simply getting removed from consideration.
# #3 might be fine and actually desired, since it's likely that it's a low-probability output anyways,
# especially if it's not in the list of `sent_beam_indices`. But this often leads to lengthened beam
# search times, since completed sequences keep getting removed after all this effort for constrained
# generation.
# Here, we basically take `pre_seq_1` and to "push" it into the considered list of hypotheses, by simply
# appending the next likely token in the vocabulary and adding it to the list of hypotheses.
new_score, new_token = torch.max(this_batch_token_scores[seq_idx], 0) # some next probable token
advance_seq = torch.cat((pre_seq, new_token.unsqueeze(0)), -1)
advance_state = advance_constraint_states[seq_idx]
advance_seq = advance_seq.cpu().tolist()
advance_state.reset(advance_seq)
if advance_seq not in track_new["new_seqs"]:
# but still don't want to have duplicates
track_new["new_seqs"].append(advance_seq)
track_new["new_indices"].append(seq_idx)
track_new["new_tokens"].append(new_token)
track_new["new_scores"].append(new_score)
track_new["new_states"].append(advance_state)
if len(track_new["new_indices"]) > 0:
new_indices = torch.tensor(track_new["new_indices"]).to(device)
new_tokens = torch.stack(track_new["new_tokens"]).to(device)
new_scores = torch.stack(track_new["new_scores"]).to(device)
all_states = topk_contraint_states + track_new["new_states"]
all_tokens = torch.cat((sent_beam_tokens, new_tokens), -1)
all_scores = torch.cat((sent_beam_scores, new_scores), -1)
all_banks = torch.tensor([one.get_bank() for one in all_states]).to(device)
zipped = all_banks * 100 + all_scores
indices = zipped.sort(descending=True).indices
sorted_banks = all_banks[indices]
# Then we end up with {sorted among bank C}, {sorted among bank C-1}, ..., {sorted among bank 0}
counter = -1
cur_bank = sorted_banks[0]
increments = []
for bank in sorted_banks:
if bank == cur_bank:
counter += 1
else:
counter = 0
cur_bank = bank
increments.append(counter)
rearrangers = torch.tensor(np.argsort(increments, kind="mergesort"))
indices = indices[rearrangers][:orig_len]
sent_beam_scores = all_scores[indices]
sent_beam_tokens = all_tokens[indices]
sent_beam_indices = torch.cat((sent_beam_indices, new_indices))[indices]
return sent_beam_scores, sent_beam_tokens, sent_beam_indices
def finalize(
self,
input_ids: torch.LongTensor,
final_beam_scores: torch.FloatTensor,
final_beam_tokens: torch.LongTensor,
final_beam_indices: torch.LongTensor,
max_length: int,
pad_token_id: Optional[int] = None,
eos_token_id: Optional[Union[int, List[int]]] = None,
) -> Tuple[torch.LongTensor]:
batch_size = len(self._beam_hyps)
if isinstance(eos_token_id, int):
eos_token_id = [eos_token_id]
# finalize all open beam hypotheses and add to generated hypotheses
for batch_idx, beam_hyp in enumerate(self._beam_hyps):
if self._done[batch_idx]:
continue
# all open beam hypotheses are added to the beam hypothesis
# beam hypothesis class automatically keeps the best beams
ids_collect = []
for beam_id in range(self.num_beams):
batch_beam_idx = batch_idx * self.num_beams + beam_id
final_score = final_beam_scores[batch_beam_idx].item()
final_tokens = input_ids[batch_beam_idx]
completes_constraint = self.check_completes_constraints(final_tokens.cpu().tolist())
if completes_constraint:
beam_hyp.add(final_tokens, final_score)
ids_collect.append(beam_id)
# due to overly complex constraints or other factors, sometimes we can't gaurantee a successful
# generation. In these cases we simply return the highest scoring outputs.
if len(ids_collect) < self.num_beam_hyps_to_keep:
for beam_id in range(self.num_beams):
if beam_id not in ids_collect:
batch_beam_idx = batch_idx * self.num_beams + beam_id
final_score = final_beam_scores[batch_beam_idx].item()
final_tokens = input_ids[batch_beam_idx]
beam_hyp.add(final_tokens, final_score)
if len(ids_collect) >= self.num_beam_hyps_to_keep:
break
# select the best hypotheses
sent_lengths = input_ids.new(batch_size * self.num_beam_hyps_to_keep)
best = []
best_scores = torch.zeros(batch_size * self.num_beam_hyps_to_keep, device=self.device, dtype=torch.float32)
# retrieve best hypotheses
for i, beam_hyp in enumerate(self._beam_hyps):
sorted_hyps = sorted(beam_hyp.beams, key=lambda x: x[0])
for j in range(self.num_beam_hyps_to_keep):
best_hyp_tuple = sorted_hyps.pop()
best_score = best_hyp_tuple[0]
best_hyp = best_hyp_tuple[1]
sent_lengths[self.num_beam_hyps_to_keep * i + j] = len(best_hyp)
# append to lists
best.append(best_hyp)
best_scores[i * self.num_beam_hyps_to_keep + j] = best_score
# prepare for adding eos
sent_lengths_max = sent_lengths.max().item() + 1
sent_max_len = min(sent_lengths_max, max_length) if max_length is not None else sent_lengths_max
decoded: torch.LongTensor = input_ids.new(batch_size * self.num_beam_hyps_to_keep, sent_max_len)
# shorter batches are padded if needed
if sent_lengths.min().item() != sent_lengths.max().item():
assert pad_token_id is not None, "`pad_token_id` has to be defined"
decoded.fill_(pad_token_id)
# fill with hypotheses and eos_token_id if the latter fits in
for i, hypo in enumerate(best):
decoded[i, : sent_lengths[i]] = hypo
if sent_lengths[i] < sent_max_len:
# inserting only the first eos_token_id
decoded[i, sent_lengths[i]] = eos_token_id[0]
return UserDict(
{
"sequences": decoded,
"sequence_scores": best_scores,
}
)
class BeamHypotheses:
def __init__(self, num_beams: int, length_penalty: float, early_stopping: bool, max_length: Optional[int] = None):
"""
Initialize n-best list of hypotheses.
"""
self.length_penalty = length_penalty
self.early_stopping = early_stopping
self.max_length = max_length
self.num_beams = num_beams
self.beams = []
self.worst_score = 1e9
if not isinstance(self.early_stopping, bool) and self.max_length is None:
raise ValueError(
"When `do_early_stopping` is set to a string, `max_length` must be defined. Ensure it is passed to the"
" BeamScorer class instance at initialization time."
)
def __len__(self):
"""
Number of hypotheses in the list.
"""
return len(self.beams)
def add(self, hyp: torch.LongTensor, sum_logprobs: float, beam_indices: Optional[torch.LongTensor] = None):
"""
Add a new hypothesis to the list.
"""
score = sum_logprobs / (hyp.shape[-1] ** self.length_penalty)
if len(self) < self.num_beams or score > self.worst_score:
self.beams.append((score, hyp, beam_indices))
if len(self) > self.num_beams:
sorted_next_scores = sorted([(s, idx) for idx, (s, _, _) in enumerate(self.beams)])
del self.beams[sorted_next_scores[0][1]]
self.worst_score = sorted_next_scores[1][0]
else:
self.worst_score = min(score, self.worst_score)
def is_done(self, best_sum_logprobs: float, cur_len: int) -> bool:
"""
If there are enough hypotheses and that none of the hypotheses being generated can become better than the worst
one in the heap, then we are done with this sentence.
"""
if len(self) < self.num_beams:
return False
# `True`: stop as soon as at least `num_beams` hypotheses are finished
if self.early_stopping is True:
return True
# `False`: heuristic -- compute best possible score from `cur_len`, even though it is not entirely accurate
# when `length_penalty` is positive. See the discussion below for more details.
# https://github.com/huggingface/transformers/pull/20901#issuecomment-1369845565
elif self.early_stopping is False:
highest_attainable_score = best_sum_logprobs / cur_len**self.length_penalty
ret = self.worst_score >= highest_attainable_score
return ret
# `"never"`: compute the best possible score, depending on the signal of `length_penalty`
else:
# `length_penalty` > 0.0 -> max denominator is obtaned from `max_length`, not from `cur_len` -> min
# abs(`highest_attainable_score`) is obtained -> `highest_attainable_score` is negative, hence we obtain
# its max this way
if self.length_penalty > 0.0:
highest_attainable_score = best_sum_logprobs / self.max_length**self.length_penalty
# the opposite logic applies here (max `highest_attainable_score` from `cur_len`)
else:
highest_attainable_score = best_sum_logprobs / cur_len**self.length_penalty
ret = self.worst_score >= highest_attainable_score
return ret
|
27182812/ChatGLM-LLaMA-chinese-insturct | 38,777 | src/transformers/generation/configuration_utils.py | # coding=utf-8
# Copyright 2022 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Generation configuration class and utilities."""
import copy
import json
import os
from typing import Any, Dict, Optional, Union
from .. import __version__
from ..configuration_utils import PretrainedConfig
from ..utils import (
GENERATION_CONFIG_NAME,
PushToHubMixin,
cached_file,
download_url,
extract_commit_hash,
is_remote_url,
logging,
)
logger = logging.get_logger(__name__)
class GenerationConfig(PushToHubMixin):
r"""
Class that holds a configuration for a generation task. A `generate` call supports the following generation methods
for text-decoder, text-to-text, speech-to-text, and vision-to-text models:
- *greedy decoding* by calling [`~generation.GenerationMixin.greedy_search`] if `num_beams=1` and
`do_sample=False`
- *contrastive search* by calling [`~generation.GenerationMixin.contrastive_search`] if `penalty_alpha>0.`
and `top_k>1`
- *multinomial sampling* by calling [`~generation.GenerationMixin.sample`] if `num_beams=1` and
`do_sample=True`
- *beam-search decoding* by calling [`~generation.GenerationMixin.beam_search`] if `num_beams>1` and
`do_sample=False`
- *beam-search multinomial sampling* by calling [`~generation.GenerationMixin.beam_sample`] if
`num_beams>1` and `do_sample=True`
- *diverse beam-search decoding* by calling [`~generation.GenerationMixin.group_beam_search`], if
`num_beams>1` and `num_beam_groups>1`
- *constrained beam-search decoding* by calling [`~generation.GenerationMixin.constrained_beam_search`], if
`constraints!=None` or `force_words_ids!=None`
You do not need to call any of the above methods directly. Pass custom parameter values to 'generate'. To learn
more about decoding strategies refer to the [text generation strategies guide](./generation_strategies).
Arg:
> Parameters that control the length of the output
max_length (`int`, *optional*, defaults to 20):
The maximum length the generated tokens can have. Corresponds to the length of the input prompt +
`max_new_tokens`. Its effect is overridden by `max_new_tokens`, if also set.
max_new_tokens (`int`, *optional*):
The maximum numbers of tokens to generate, ignoring the number of tokens in the prompt.
min_length (`int`, *optional*, defaults to 0):
The minimum length of the sequence to be generated. Corresponds to the length of the input prompt +
`min_new_tokens`. Its effect is overridden by `min_new_tokens`, if also set.
min_new_tokens (`int`, *optional*):
The minimum numbers of tokens to generate, ignoring the number of tokens in the prompt.
early_stopping (`bool` or `str`, *optional*, defaults to `False`):
Controls the stopping condition for beam-based methods, like beam-search. It accepts the following values:
`True`, where the generation stops as soon as there are `num_beams` complete candidates; `False`, where an
heuristic is applied and the generation stops when is it very unlikely to find better candidates;
`"never"`, where the beam search procedure only stops when there cannot be better candidates (canonical
beam search algorithm).
max_time(`float`, *optional*):
The maximum amount of time you allow the computation to run for in seconds. generation will still finish
the current pass after allocated time has been passed.
> Parameters that control the generation strategy used
do_sample (`bool`, *optional*, defaults to `False`):
Whether or not to use sampling ; use greedy decoding otherwise.
num_beams (`int`, *optional*, defaults to 1):
Number of beams for beam search. 1 means no beam search.
num_beam_groups (`int`, *optional*, defaults to 1):
Number of groups to divide `num_beams` into in order to ensure diversity among different groups of beams.
[this paper](https://arxiv.org/pdf/1610.02424.pdf) for more details.
penalty_alpha (`float`, *optional*):
The values balance the model confidence and the degeneration penalty in contrastive search decoding.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should use the past last key/values attentions (if applicable to the model) to
speed up decoding.
> Parameters for manipulation of the model output logits
temperature (`float`, *optional*, defaults to 1.0):
The value used to modulate the next token probabilities.
top_k (`int`, *optional*, defaults to 50):
The number of highest probability vocabulary tokens to keep for top-k-filtering.
top_p (`float`, *optional*, defaults to 1.0):
If set to float < 1, only the smallest set of most probable tokens with probabilities that add up to
`top_p` or higher are kept for generation.
typical_p (`float`, *optional*, defaults to 1.0):
Local typicality measures how similar the conditional probability of predicting a target token next is to
the expected conditional probability of predicting a random token next, given the partial text already
generated. If set to float < 1, the smallest set of the most locally typical tokens with probabilities that
add up to `typical_p` or higher are kept for generation. See [this
paper](https://arxiv.org/pdf/2202.00666.pdf) for more details.
epsilon_cutoff (`float`, *optional*, defaults to 0.0):
If set to float strictly between 0 and 1, only tokens with a conditional probability greater than
`epsilon_cutoff` will be sampled. In the paper, suggested values range from 3e-4 to 9e-4, depending on the
size of the model. See [Truncation Sampling as Language Model
Desmoothing](https://arxiv.org/abs/2210.15191) for more details.
eta_cutoff (`float`, *optional*, defaults to 0.0):
Eta sampling is a hybrid of locally typical sampling and epsilon sampling. If set to float strictly between
0 and 1, a token is only considered if it is greater than either `eta_cutoff` or `sqrt(eta_cutoff) *
exp(-entropy(softmax(next_token_logits)))`. The latter term is intuitively the expected next token
probability, scaled by `sqrt(eta_cutoff)`. In the paper, suggested values range from 3e-4 to 2e-3,
depending on the size of the model. See [Truncation Sampling as Language Model
Desmoothing](https://arxiv.org/abs/2210.15191) for more details.
diversity_penalty (`float`, *optional*, defaults to 0.0):
This value is subtracted from a beam's score if it generates a token same as any beam from other group at a
particular time. Note that `diversity_penalty` is only effective if `group beam search` is enabled.
repetition_penalty (`float`, *optional*, defaults to 1.0):
The parameter for repetition penalty. 1.0 means no penalty. See [this
paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
encoder_repetition_penalty (`float`, *optional*, defaults to 1.0):
The paramater for encoder_repetition_penalty. An exponential penalty on sequences that are not in the
original input. 1.0 means no penalty.
length_penalty (`float`, *optional*, defaults to 1.0):
Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent to
the sequence length, which in turn is used to divide the score of the sequence. Since the score is the log
likelihood of the sequence (i.e. negative), `length_penalty` > 0.0 promotes longer sequences, while
`length_penalty` < 0.0 encourages shorter sequences.
no_repeat_ngram_size (`int`, *optional*, defaults to 0):
If set to int > 0, all ngrams of that size can only occur once.
bad_words_ids(`List[List[int]]`, *optional*):
List of token ids that are not allowed to be generated. In order to get the token ids of the words that
should not appear in the generated text, use `tokenizer(bad_words, add_prefix_space=True,
add_special_tokens=False).input_ids`.
force_words_ids(`List[List[int]]` or `List[List[List[int]]]`, *optional*):
List of token ids that must be generated. If given a `List[List[int]]`, this is treated as a simple list of
words that must be included, the opposite to `bad_words_ids`. If given `List[List[List[int]]]`, this
triggers a [disjunctive constraint](https://github.com/huggingface/transformers/issues/14081), where one
can allow different forms of each word.
renormalize_logits (`bool`, *optional*, defaults to `False`):
Whether to renormalize the logits after applying all the logits processors or warpers (including the custom
ones). It's highly recommended to set this flag to `True` as the search algorithms suppose the score logits
are normalized but some logit processors or warpers break the normalization.
constraints (`List[Constraint]`, *optional*):
Custom constraints that can be added to the generation to ensure that the output will contain the use of
certain tokens as defined by `Constraint` objects, in the most sensible way possible.
forced_bos_token_id (`int`, *optional*, defaults to `model.config.forced_bos_token_id`):
The id of the token to force as the first generated token after the `decoder_start_token_id`. Useful for
multilingual models like [mBART](../model_doc/mbart) where the first generated token needs to be the target
language token.
forced_eos_token_id (`Union[int, List[int]]`, *optional*, defaults to `model.config.forced_eos_token_id`):
The id of the token to force as the last generated token when `max_length` is reached. Optionally, use a
list to set multiple *end-of-sequence* tokens.
remove_invalid_values (`bool`, *optional*, defaults to `model.config.remove_invalid_values`):
Whether to remove possible *nan* and *inf* outputs of the model to prevent the generation method to crash.
Note that using `remove_invalid_values` can slow down generation.
exponential_decay_length_penalty (`tuple(int, float)`, *optional*):
This Tuple adds an exponentially increasing length penalty, after a certain amount of tokens have been
generated. The tuple shall consist of: `(start_index, decay_factor)` where `start_index` indicates where
penalty starts and `decay_factor` represents the factor of exponential decay
suppress_tokens (`List[int]`, *optional*):
A list of tokens that will be suppressed at generation. The `SupressTokens` logit processor will set their
log probs to `-inf` so that they are not sampled.
begin_suppress_tokens (`List[int]`, *optional*):
A list of tokens that will be suppressed at the beginning of the generation. The `SupressBeginTokens` logit
processor will set their log probs to `-inf` so that they are not sampled.
forced_decoder_ids (`List[List[int]]`, *optional*):
A list of pairs of integers which indicates a mapping from generation indices to token indices that will be
forced before sampling. For example, `[[1, 123]]` means the second generated token will always be a token
of index 123.
> Parameters that define the output variables of `generate`
num_return_sequences(`int`, *optional*, defaults to 1):
The number of independently computed returned sequences for each element in the batch.
output_attentions (`bool`, *optional*, defaults to `False`):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more details.
output_hidden_states (`bool`, *optional*, defaults to `False`):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more details.
output_scores (`bool`, *optional*, defaults to `False`):
Whether or not to return the prediction scores. See `scores` under returned tensors for more details.
return_dict_in_generate (`bool`, *optional*, defaults to `False`):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
> Special tokens that can be used at generation time
pad_token_id (`int`, *optional*):
The id of the *padding* token.
bos_token_id (`int`, *optional*):
The id of the *beginning-of-sequence* token.
eos_token_id (`Union[int, List[int]]`, *optional*):
The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
> Generation parameters exclusive to encoder-decoder models
encoder_no_repeat_ngram_size (`int`, *optional*, defaults to 0):
If set to int > 0, all ngrams of that size that occur in the `encoder_input_ids` cannot occur in the
`decoder_input_ids`.
decoder_start_token_id (`int`, *optional*):
If an encoder-decoder model starts decoding with a different token than *bos*, the id of that token.
> Wild card
generation_kwargs:
Additional generation kwargs will be forwarded to the `generate` function of the model. Kwargs that are not
present in `generate`'s signature will be used in the model forward pass.
"""
def __init__(self, **kwargs):
# Parameters that control the length of the output
self.max_length = kwargs.pop("max_length", 20)
self.max_new_tokens = kwargs.pop("max_new_tokens", None)
self.min_length = kwargs.pop("min_length", 0)
self.min_new_tokens = kwargs.pop("min_new_tokens", None)
self.early_stopping = kwargs.pop("early_stopping", False)
self.max_time = kwargs.pop("max_time", None)
# Parameters that control the generation strategy used
self.do_sample = kwargs.pop("do_sample", False)
self.num_beams = kwargs.pop("num_beams", 1)
self.num_beam_groups = kwargs.pop("num_beam_groups", 1)
self.penalty_alpha = kwargs.pop("penalty_alpha", None)
self.use_cache = kwargs.pop("use_cache", True)
# Parameters for manipulation of the model output logits
self.temperature = kwargs.pop("temperature", 1.0)
self.top_k = kwargs.pop("top_k", 50)
self.top_p = kwargs.pop("top_p", 1.0)
self.typical_p = kwargs.pop("typical_p", 1.0)
self.epsilon_cutoff = kwargs.pop("epsilon_cutoff", 0.0)
self.eta_cutoff = kwargs.pop("eta_cutoff", 0.0)
self.diversity_penalty = kwargs.pop("diversity_penalty", 0.0)
self.repetition_penalty = kwargs.pop("repetition_penalty", 1.0)
self.encoder_repetition_penalty = kwargs.pop("encoder_repetition_penalty", 1.0)
self.length_penalty = kwargs.pop("length_penalty", 1.0)
self.no_repeat_ngram_size = kwargs.pop("no_repeat_ngram_size", 0)
self.bad_words_ids = kwargs.pop("bad_words_ids", None)
self.force_words_ids = kwargs.pop("force_words_ids", None)
self.renormalize_logits = kwargs.pop("renormalize_logits", False)
self.constraints = kwargs.pop("constraints", None)
self.forced_bos_token_id = kwargs.pop("forced_bos_token_id", None)
self.forced_eos_token_id = kwargs.pop("forced_eos_token_id", None)
self.remove_invalid_values = kwargs.pop("remove_invalid_values", False)
self.exponential_decay_length_penalty = kwargs.pop("exponential_decay_length_penalty", None)
self.suppress_tokens = kwargs.pop("suppress_tokens", None)
self.begin_suppress_tokens = kwargs.pop("begin_suppress_tokens", None)
self.forced_decoder_ids = kwargs.pop("forced_decoder_ids", None)
# Parameters that define the output variables of `generate`
self.num_return_sequences = kwargs.pop("num_return_sequences", 1)
self.output_attentions = kwargs.pop("output_attentions", False)
self.output_hidden_states = kwargs.pop("output_hidden_states", False)
self.output_scores = kwargs.pop("output_scores", False)
self.return_dict_in_generate = kwargs.pop("return_dict_in_generate", False)
# Special tokens that can be used at generation time
self.pad_token_id = kwargs.pop("pad_token_id", None)
self.bos_token_id = kwargs.pop("bos_token_id", None)
self.eos_token_id = kwargs.pop("eos_token_id", None)
# Generation parameters exclusive to encoder-decoder models
self.encoder_no_repeat_ngram_size = kwargs.pop("encoder_no_repeat_ngram_size", 0)
self.decoder_start_token_id = kwargs.pop("decoder_start_token_id", None)
# Wild card
self.generation_kwargs = kwargs.pop("generation_kwargs", {})
# The remaining attributes do not parametrize `.generate()`, but are informative and/or used by the the hub
# interface.
self._from_model_config = kwargs.pop("_from_model_config", False)
self._commit_hash = kwargs.pop("_commit_hash", None)
self.transformers_version = kwargs.pop("transformers_version", __version__)
# Additional attributes without default values
if not self._from_model_config:
# we don't want to copy values from the model config if we're initializing a `GenerationConfig` from a model's default configuration file
for key, value in kwargs.items():
try:
setattr(self, key, value)
except AttributeError as err:
logger.error(f"Can't set {key} with value {value} for {self}")
raise err
# Validate the values of the attributes
self.validate()
def __eq__(self, other):
if not isinstance(other, GenerationConfig):
return False
self_dict = self.__dict__.copy()
other_dict = other.__dict__.copy()
# ignore metadata
for metadata_field in ("_from_model_config", "_commit_hash", "transformers_version"):
self_dict.pop(metadata_field, None)
other_dict.pop(metadata_field, None)
return self_dict == other_dict
def __repr__(self):
return f"{self.__class__.__name__} {self.to_json_string()}"
def validate(self):
"""
Validates the values of the attributes of the GenerationConfig instance, and raises a `ValueError` if any of
the values are invalid.
"""
if self.early_stopping not in {True, False, "never"}:
raise ValueError(f"`early_stopping` must be a boolean or 'never', but is {self.early_stopping}.")
def save_pretrained(
self,
save_directory: Union[str, os.PathLike],
config_file_name: Optional[Union[str, os.PathLike]] = None,
push_to_hub: bool = False,
**kwargs,
):
r"""
Save a generation configuration object to the directory `save_directory`, so that it can be re-loaded using the
[`~GenerationConfig.from_pretrained`] class method.
Args:
save_directory (`str` or `os.PathLike`):
Directory where the configuration JSON file will be saved (will be created if it does not exist).
config_file_name (`str` or `os.PathLike`, *optional*, defaults to `"generation_config.json"`):
Name of the generation configuration JSON file to be saved in `save_directory`.
push_to_hub (`bool`, *optional*, defaults to `False`):
Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the
repository you want to push to with `repo_id` (will default to the name of `save_directory` in your
namespace).
kwargs:
Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.
"""
config_file_name = config_file_name if config_file_name is not None else GENERATION_CONFIG_NAME
if os.path.isfile(save_directory):
raise AssertionError(f"Provided path ({save_directory}) should be a directory, not a file")
os.makedirs(save_directory, exist_ok=True)
if push_to_hub:
commit_message = kwargs.pop("commit_message", None)
repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1])
repo_id = self._create_repo(repo_id, **kwargs)
files_timestamps = self._get_files_timestamps(save_directory)
output_config_file = os.path.join(save_directory, config_file_name)
self.to_json_file(output_config_file, use_diff=True)
logger.info(f"Configuration saved in {output_config_file}")
if push_to_hub:
self._upload_modified_files(
save_directory,
repo_id,
files_timestamps,
commit_message=commit_message,
token=kwargs.get("use_auth_token"),
)
@classmethod
def from_pretrained(
cls,
pretrained_model_name: Union[str, os.PathLike],
config_file_name: Optional[Union[str, os.PathLike]] = None,
**kwargs,
) -> "GenerationConfig":
r"""
Instantiate a [`GenerationConfig`] from a generation configuration file.
Args:
pretrained_model_name (`str` or `os.PathLike`):
This can be either:
- a string, the *model id* of a pretrained model configuration hosted inside a model repo on
huggingface.co. Valid model ids can be located at the root-level, like `bert-base-uncased`, or
namespaced under a user or organization name, like `dbmdz/bert-base-german-cased`.
- a path to a *directory* containing a configuration file saved using the
[`~GenerationConfig.save_pretrained`] method, e.g., `./my_model_directory/`.
config_file_name (`str` or `os.PathLike`, *optional*, defaults to `"generation_config.json"`):
Name of the generation configuration JSON file to be loaded from `pretrained_model_name`.
cache_dir (`str` or `os.PathLike`, *optional*):
Path to a directory in which a downloaded pretrained model configuration should be cached if the
standard cache should not be used.
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force to (re-)download the configuration files and override the cached versions if
they exist.
resume_download (`bool`, *optional*, defaults to `False`):
Whether or not to delete incompletely received file. Attempts to resume the download if such a file
exists.
proxies (`Dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
use_auth_token (`str` or `bool`, *optional*):
The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use
the token generated when running `huggingface-cli login` (stored in `~/.huggingface`).
revision (`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
identifier allowed by git.
<Tip>
To test a pull request you made on the Hub, you can pass `revision="refs/pr/<pr_number>".
</Tip>
return_unused_kwargs (`bool`, *optional*, defaults to `False`):
If `False`, then this function returns just the final configuration object.
If `True`, then this functions returns a `Tuple(config, unused_kwargs)` where *unused_kwargs* is a
dictionary consisting of the key/value pairs whose keys are not configuration attributes: i.e., the
part of `kwargs` which has not been used to update `config` and is otherwise ignored.
subfolder (`str`, *optional*, defaults to `""`):
In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can
specify the folder name here.
kwargs (`Dict[str, Any]`, *optional*):
The values in kwargs of any keys which are configuration attributes will be used to override the loaded
values. Behavior concerning key/value pairs whose keys are *not* configuration attributes is controlled
by the `return_unused_kwargs` keyword parameter.
Returns:
[`GenerationConfig`]: The configuration object instantiated from this pretrained model.
Examples:
```python
>>> from transformers import GenerationConfig
>>> # Download configuration from huggingface.co and cache.
>>> generation_config = GenerationConfig.from_pretrained("gpt2")
>>> # E.g. config was saved using *save_pretrained('./test/saved_model/')*
>>> generation_config.save_pretrained("./test/saved_model/")
>>> generation_config = GenerationConfig.from_pretrained("./test/saved_model/")
>>> # You can also specify configuration names to your generation configuration file
>>> generation_config.save_pretrained("./test/saved_model/", config_file_name="my_configuration.json")
>>> generation_config = GenerationConfig.from_pretrained("./test/saved_model/", "my_configuration.json")
>>> # If you'd like to try a minor variation to an existing configuration, you can also pass generation
>>> # arguments to `.from_pretrained()`. Be mindful that typos and unused arguments will be ignored
>>> generation_config, unused_kwargs = GenerationConfig.from_pretrained(
... "gpt2", top_k=1, foo=False, return_unused_kwargs=True
... )
>>> generation_config.top_k
1
>>> unused_kwargs
{'foo': False}
```"""
config_file_name = config_file_name if config_file_name is not None else GENERATION_CONFIG_NAME
cache_dir = kwargs.pop("cache_dir", None)
force_download = kwargs.pop("force_download", False)
resume_download = kwargs.pop("resume_download", False)
proxies = kwargs.pop("proxies", None)
use_auth_token = kwargs.pop("use_auth_token", None)
local_files_only = kwargs.pop("local_files_only", False)
revision = kwargs.pop("revision", None)
subfolder = kwargs.pop("subfolder", "")
from_pipeline = kwargs.pop("_from_pipeline", None)
from_auto_class = kwargs.pop("_from_auto", False)
commit_hash = kwargs.pop("_commit_hash", None)
user_agent = {"file_type": "config", "from_auto_class": from_auto_class}
if from_pipeline is not None:
user_agent["using_pipeline"] = from_pipeline
config_path = os.path.join(pretrained_model_name, config_file_name)
config_path = str(config_path)
is_local = os.path.exists(config_path)
if os.path.isfile(os.path.join(subfolder, config_path)):
# Special case when config_path is a local file
resolved_config_file = config_path
is_local = True
elif is_remote_url(config_path):
configuration_file = config_path
resolved_config_file = download_url(config_path)
else:
configuration_file = config_file_name
try:
# Load from local folder or from cache or download from model Hub and cache
resolved_config_file = cached_file(
pretrained_model_name,
configuration_file,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
local_files_only=local_files_only,
use_auth_token=use_auth_token,
user_agent=user_agent,
revision=revision,
subfolder=subfolder,
_commit_hash=commit_hash,
)
commit_hash = extract_commit_hash(resolved_config_file, commit_hash)
except EnvironmentError:
# Raise any environment error raise by `cached_file`. It will have a helpful error message adapted to
# the original exception.
raise
except Exception:
# For any other exception, we throw a generic error.
raise EnvironmentError(
f"Can't load the configuration of '{pretrained_model_name}'. If you were trying to load it"
" from 'https://huggingface.co/models', make sure you don't have a local directory with the same"
f" name. Otherwise, make sure '{pretrained_model_name}' is the correct path to a directory"
f" containing a {configuration_file} file"
)
try:
# Load config dict
config_dict = cls._dict_from_json_file(resolved_config_file)
config_dict["_commit_hash"] = commit_hash
except (json.JSONDecodeError, UnicodeDecodeError):
raise EnvironmentError(
f"It looks like the config file at '{resolved_config_file}' is not a valid JSON file."
)
if is_local:
logger.info(f"loading configuration file {resolved_config_file}")
else:
logger.info(f"loading configuration file {configuration_file} from cache at {resolved_config_file}")
return cls.from_dict(config_dict, **kwargs)
@classmethod
def _dict_from_json_file(cls, json_file: Union[str, os.PathLike]):
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
return json.loads(text)
@classmethod
def from_dict(cls, config_dict: Dict[str, Any], **kwargs) -> "GenerationConfig":
"""
Instantiates a [`GenerationConfig`] from a Python dictionary of parameters.
Args:
config_dict (`Dict[str, Any]`):
Dictionary that will be used to instantiate the configuration object.
kwargs (`Dict[str, Any]`):
Additional parameters from which to initialize the configuration object.
Returns:
[`GenerationConfig`]: The configuration object instantiated from those parameters.
"""
return_unused_kwargs = kwargs.pop("return_unused_kwargs", False)
# Those arguments may be passed along for our internal telemetry.
# We remove them so they don't appear in `return_unused_kwargs`.
kwargs.pop("_from_auto", None)
kwargs.pop("_from_pipeline", None)
# The commit hash might have been updated in the `config_dict`, we don't want the kwargs to erase that update.
if "_commit_hash" in kwargs and "_commit_hash" in config_dict:
kwargs["_commit_hash"] = config_dict["_commit_hash"]
# remove all the arguments that are in the config_dict
config = cls(**config_dict, **kwargs)
unused_kwargs = config.update(**kwargs)
logger.info(f"Generate config {config}")
if return_unused_kwargs:
return config, unused_kwargs
else:
return config
def dict_torch_dtype_to_str(self, d: Dict[str, Any]) -> None:
"""
Checks whether the passed dictionary and its nested dicts have a *torch_dtype* key and if it's not None,
converts torch.dtype to a string of just the type. For example, `torch.float32` get converted into *"float32"*
string, which can then be stored in the json format.
"""
if d.get("torch_dtype", None) is not None and not isinstance(d["torch_dtype"], str):
d["torch_dtype"] = str(d["torch_dtype"]).split(".")[1]
for value in d.values():
if isinstance(value, dict):
self.dict_torch_dtype_to_str(value)
def to_diff_dict(self) -> Dict[str, Any]:
"""
Removes all attributes from config which correspond to the default config attributes for better readability and
serializes to a Python dictionary.
Returns:
`Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance,
"""
config_dict = self.to_dict()
# get the default config dict
default_config_dict = GenerationConfig().to_dict()
serializable_config_dict = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if key not in default_config_dict or key == "transformers_version" or value != default_config_dict[key]:
serializable_config_dict[key] = value
self.dict_torch_dtype_to_str(serializable_config_dict)
return serializable_config_dict
def to_dict(self) -> Dict[str, Any]:
"""
Serializes this instance to a Python dictionary.
Returns:
`Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance.
"""
output = copy.deepcopy(self.__dict__)
if "_commit_hash" in output:
del output["_commit_hash"]
# Transformers version when serializing this file
output["transformers_version"] = __version__
self.dict_torch_dtype_to_str(output)
return output
def to_json_string(self, use_diff: bool = True) -> str:
"""
Serializes this instance to a JSON string.
Args:
use_diff (`bool`, *optional*, defaults to `True`):
If set to `True`, only the difference between the config instance and the default `GenerationConfig()`
is serialized to JSON string.
Returns:
`str`: String containing all the attributes that make up this configuration instance in JSON format.
"""
if use_diff is True:
config_dict = self.to_diff_dict()
else:
config_dict = self.to_dict()
return json.dumps(config_dict, indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path: Union[str, os.PathLike], use_diff: bool = True):
"""
Save this instance to a JSON file.
Args:
json_file_path (`str` or `os.PathLike`):
Path to the JSON file in which this configuration instance's parameters will be saved.
use_diff (`bool`, *optional*, defaults to `True`):
If set to `True`, only the difference between the config instance and the default `GenerationConfig()`
is serialized to JSON file.
"""
with open(json_file_path, "w", encoding="utf-8") as writer:
writer.write(self.to_json_string(use_diff=use_diff))
@classmethod
def from_model_config(cls, model_config: PretrainedConfig) -> "GenerationConfig":
"""
Instantiates a [`GenerationConfig`] from a [`PretrainedConfig`]. This function is useful to convert legacy
[`PretrainedConfig`] objects, which may contain generation parameters, into a stand-alone [`GenerationConfig`].
Args:
model_config (`PretrainedConfig`):
The model config that will be used to instantiate the generation config.
Returns:
[`GenerationConfig`]: The configuration object instantiated from those parameters.
"""
config_dict = model_config.to_dict()
config_dict.pop("_from_model_config", None)
config = cls.from_dict(config_dict, return_unused_kwargs=False, _from_model_config=True)
# Special case: some models have generation attributes set in the decoder. Use them if still unset in the
# generation config.
for decoder_name in ("decoder", "generator"):
if decoder_name in config_dict:
default_generation_config = GenerationConfig()
decoder_config = config_dict[decoder_name]
for attr in config.to_dict().keys():
if attr in decoder_config and getattr(config, attr) == getattr(default_generation_config, attr):
setattr(config, attr, decoder_config[attr])
return config
def update(self, **kwargs):
"""
Updates attributes of this class instance with attributes from `kwargs` if they match existing atributtes,
returning all the unused kwargs.
Args:
kwargs (`Dict[str, Any]`):
Dictionary of attributes to tentatively update this class.
Returns:
`Dict[str, Any]`: Dictionary containing all the key-value pairs that were not used to update the instance.
"""
to_remove = []
for key, value in kwargs.items():
if hasattr(self, key):
setattr(self, key, value)
to_remove.append(key)
# remove all the attributes that were updated, without modifying the input dict
unused_kwargs = {key: value for key, value in kwargs.items() if key not in to_remove}
return unused_kwargs
|
27182812/ChatGLM-LLaMA-chinese-insturct | 8,095 | src/transformers/pipelines/audio_utils.py | # Copyright 2023 The HuggingFace Team. All rights reserved.
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def ffmpeg_read(bpayload: bytes, sampling_rate: int) -> np.array:
"""
Helper function to read an audio file through ffmpeg.
"""
ar = f"{sampling_rate}"
ac = "1"
format_for_conversion = "f32le"
ffmpeg_command = [
"ffmpeg",
"-i",
"pipe:0",
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
try:
with subprocess.Popen(ffmpeg_command, stdin=subprocess.PIPE, stdout=subprocess.PIPE) as ffmpeg_process:
output_stream = ffmpeg_process.communicate(bpayload)
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to load audio files from filename") from error
out_bytes = output_stream[0]
audio = np.frombuffer(out_bytes, np.float32)
if audio.shape[0] == 0:
raise ValueError("Malformed soundfile")
return audio
def ffmpeg_microphone(
sampling_rate: int,
chunk_length_s: float,
format_for_conversion: str = "f32le",
):
"""
Helper function ro read raw microphone data.
"""
ar = f"{sampling_rate}"
ac = "1"
if format_for_conversion == "s16le":
size_of_sample = 2
elif format_for_conversion == "f32le":
size_of_sample = 4
else:
raise ValueError(f"Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`")
system = platform.system()
if system == "Linux":
format_ = "alsa"
input_ = "default"
elif system == "Darwin":
format_ = "avfoundation"
input_ = ":0"
elif system == "Windows":
format_ = "dshow"
input_ = "default"
ffmpeg_command = [
"ffmpeg",
"-f",
format_,
"-i",
input_,
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-fflags",
"nobuffer",
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
chunk_len = int(round(sampling_rate * chunk_length_s)) * size_of_sample
iterator = _ffmpeg_stream(ffmpeg_command, chunk_len)
for item in iterator:
yield item
def ffmpeg_microphone_live(
sampling_rate: int,
chunk_length_s: float,
stream_chunk_s: Optional[int] = None,
stride_length_s: Optional[Union[Tuple[float, float], float]] = None,
format_for_conversion: str = "f32le",
):
"""
Helper function to read audio from the microphone file through ffmpeg. This will output `partial` overlapping
chunks starting from `stream_chunk_s` (if it is defined) until `chunk_length_s` is reached. It will make use of
striding to avoid errors on the "sides" of the various chunks.
Arguments:
sampling_rate (`int`):
The sampling_rate to use when reading the data from the microphone. Try using the model's sampling_rate to
avoid resampling later.
chunk_length_s (`float` or `int`):
The length of the maximum chunk of audio to be sent returned. This includes the eventual striding.
stream_chunk_s (`float` or `int`)
The length of the minimal temporary audio to be returned.
stride_length_s (`float` or `int` or `(float, float)`, *optional*, defaults to `None`)
The length of the striding to be used. Stride is used to provide context to a model on the (left, right) of
an audio sample but without using that part to actually make the prediction. Setting this does not change
the length of the chunk.
format_for_conversion: (`str`, defalts to `f32le`)
The name of the format of the audio samples to be returned by ffmpeg. The standard is `f32le`, `s16le`
could also be used.
Return:
A generator yielding dictionaries of the following form
`{"sampling_rate": int, "raw": np.array(), "partial" bool}` With optionnally a `"stride" (int, int)` key if
`stride_length_s` is defined.
`stride` and `raw` are all expressed in `samples`, and `partial` is a boolean saying if the current yield item
is a whole chunk, or a partial temporary result to be later replaced by another larger chunk.
"""
if stream_chunk_s is not None:
chunk_s = stream_chunk_s
else:
chunk_s = chunk_length_s
microphone = ffmpeg_microphone(sampling_rate, chunk_s, format_for_conversion=format_for_conversion)
if format_for_conversion == "s16le":
dtype = np.int16
size_of_sample = 2
elif format_for_conversion == "f32le":
dtype = np.float32
size_of_sample = 4
else:
raise ValueError(f"Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`")
if stride_length_s is None:
stride_length_s = chunk_length_s / 6
chunk_len = int(round(sampling_rate * chunk_length_s)) * size_of_sample
if isinstance(stride_length_s, (int, float)):
stride_length_s = [stride_length_s, stride_length_s]
stride_left = int(round(sampling_rate * stride_length_s[0])) * size_of_sample
stride_right = int(round(sampling_rate * stride_length_s[1])) * size_of_sample
audio_time = datetime.datetime.now()
delta = datetime.timedelta(seconds=chunk_s)
for item in chunk_bytes_iter(microphone, chunk_len, stride=(stride_left, stride_right), stream=True):
# Put everything back in numpy scale
item["raw"] = np.frombuffer(item["raw"], dtype=dtype)
item["stride"] = (
item["stride"][0] // size_of_sample,
item["stride"][1] // size_of_sample,
)
item["sampling_rate"] = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def chunk_bytes_iter(iterator, chunk_len: int, stride: Tuple[int, int], stream: bool = False):
"""
Reads raw bytes from an iterator and does chunks of length `chunk_len`. Optionally adds `stride` to each chunks to
get overlaps. `stream` is used to return partial results even if a full `chunk_len` is not yet available.
"""
acc = b""
stride_left, stride_right = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f"Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}"
)
_stride_left = 0
for raw in iterator:
acc += raw
if stream and len(acc) < chunk_len:
stride = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(acc) >= chunk_len:
# We are flushing the accumulator
stride = (_stride_left, stride_right)
item = {"raw": acc[:chunk_len], "stride": stride}
if stream:
item["partial"] = False
yield item
_stride_left = stride_left
acc = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(acc) > stride_left:
item = {"raw": acc, "stride": (_stride_left, 0)}
if stream:
item["partial"] = False
yield item
def _ffmpeg_stream(ffmpeg_command, buflen: int):
"""
Internal function to create the generator of data through ffmpeg
"""
bufsize = 2**24 # 16Mo
try:
with subprocess.Popen(ffmpeg_command, stdout=subprocess.PIPE, bufsize=bufsize) as ffmpeg_process:
while True:
raw = ffmpeg_process.stdout.read(buflen)
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to stream audio files from filename") from error
|
27182812/ChatGLM-LLaMA-chinese-insturct | 22,879 | src/transformers/pipelines/token_classification.py | import types
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
from ..models.bert.tokenization_bert import BasicTokenizer
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, Dataset, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
class TokenClassificationArgumentHandler(ArgumentHandler):
"""
Handles arguments for token classification.
"""
def __call__(self, inputs: Union[str, List[str]], **kwargs):
if inputs is not None and isinstance(inputs, (list, tuple)) and len(inputs) > 0:
inputs = list(inputs)
batch_size = len(inputs)
elif isinstance(inputs, str):
inputs = [inputs]
batch_size = 1
elif Dataset is not None and isinstance(inputs, Dataset) or isinstance(inputs, types.GeneratorType):
return inputs, None
else:
raise ValueError("At least one input is required.")
offset_mapping = kwargs.get("offset_mapping")
if offset_mapping:
if isinstance(offset_mapping, list) and isinstance(offset_mapping[0], tuple):
offset_mapping = [offset_mapping]
if len(offset_mapping) != batch_size:
raise ValueError("offset_mapping should have the same batch size as the input")
return inputs, offset_mapping
class AggregationStrategy(ExplicitEnum):
"""All the valid aggregation strategies for TokenClassificationPipeline"""
NONE = "none"
SIMPLE = "simple"
FIRST = "first"
AVERAGE = "average"
MAX = "max"
@add_end_docstrings(
PIPELINE_INIT_ARGS,
r"""
ignore_labels (`List[str]`, defaults to `["O"]`):
A list of labels to ignore.
grouped_entities (`bool`, *optional*, defaults to `False`):
DEPRECATED, use `aggregation_strategy` instead. Whether or not to group the tokens corresponding to the
same entity together in the predictions or not.
aggregation_strategy (`str`, *optional*, defaults to `"none"`):
The strategy to fuse (or not) tokens based on the model prediction.
- "none" : Will simply not do any aggregation and simply return raw results from the model
- "simple" : Will attempt to group entities following the default schema. (A, B-TAG), (B, I-TAG), (C,
I-TAG), (D, B-TAG2) (E, B-TAG2) will end up being [{"word": ABC, "entity": "TAG"}, {"word": "D",
"entity": "TAG2"}, {"word": "E", "entity": "TAG2"}] Notice that two consecutive B tags will end up as
different entities. On word based languages, we might end up splitting words undesirably : Imagine
Microsoft being tagged as [{"word": "Micro", "entity": "ENTERPRISE"}, {"word": "soft", "entity":
"NAME"}]. Look for FIRST, MAX, AVERAGE for ways to mitigate that and disambiguate words (on languages
that support that meaning, which is basically tokens separated by a space). These mitigations will
only work on real words, "New york" might still be tagged with two different entities.
- "first" : (works only on word based models) Will use the `SIMPLE` strategy except that words, cannot
end up with different tags. Words will simply use the tag of the first token of the word when there
is ambiguity.
- "average" : (works only on word based models) Will use the `SIMPLE` strategy except that words,
cannot end up with different tags. scores will be averaged first across tokens, and then the maximum
label is applied.
- "max" : (works only on word based models) Will use the `SIMPLE` strategy except that words, cannot
end up with different tags. Word entity will simply be the token with the maximum score.
""",
)
class TokenClassificationPipeline(Pipeline):
"""
Named Entity Recognition pipeline using any `ModelForTokenClassification`. See the [named entity recognition
examples](../task_summary#named-entity-recognition) for more information.
Example:
```python
>>> from transformers import pipeline
>>> token_classifier = pipeline(model="Jean-Baptiste/camembert-ner", aggregation_strategy="simple")
>>> sentence = "Je m'appelle jean-baptiste et je vis à montréal"
>>> tokens = token_classifier(sentence)
>>> tokens
[{'entity_group': 'PER', 'score': 0.9931, 'word': 'jean-baptiste', 'start': 12, 'end': 26}, {'entity_group': 'LOC', 'score': 0.998, 'word': 'montréal', 'start': 38, 'end': 47}]
>>> token = tokens[0]
>>> # Start and end provide an easy way to highlight words in the original text.
>>> sentence[token["start"] : token["end"]]
' jean-baptiste'
>>> # Some models use the same idea to do part of speech.
>>> syntaxer = pipeline(model="vblagoje/bert-english-uncased-finetuned-pos", aggregation_strategy="simple")
>>> syntaxer("My name is Sarah and I live in London")
[{'entity_group': 'PRON', 'score': 0.999, 'word': 'my', 'start': 0, 'end': 2}, {'entity_group': 'NOUN', 'score': 0.997, 'word': 'name', 'start': 3, 'end': 7}, {'entity_group': 'AUX', 'score': 0.994, 'word': 'is', 'start': 8, 'end': 10}, {'entity_group': 'PROPN', 'score': 0.999, 'word': 'sarah', 'start': 11, 'end': 16}, {'entity_group': 'CCONJ', 'score': 0.999, 'word': 'and', 'start': 17, 'end': 20}, {'entity_group': 'PRON', 'score': 0.999, 'word': 'i', 'start': 21, 'end': 22}, {'entity_group': 'VERB', 'score': 0.998, 'word': 'live', 'start': 23, 'end': 27}, {'entity_group': 'ADP', 'score': 0.999, 'word': 'in', 'start': 28, 'end': 30}, {'entity_group': 'PROPN', 'score': 0.999, 'word': 'london', 'start': 31, 'end': 37}]
```
Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
This token recognition pipeline can currently be loaded from [`pipeline`] using the following task identifier:
`"ner"` (for predicting the classes of tokens in a sequence: person, organisation, location or miscellaneous).
The models that this pipeline can use are models that have been fine-tuned on a token classification task. See the
up-to-date list of available models on
[huggingface.co/models](https://huggingface.co/models?filter=token-classification).
"""
default_input_names = "sequences"
def __init__(self, args_parser=TokenClassificationArgumentHandler(), *args, **kwargs):
super().__init__(*args, **kwargs)
self.check_model_type(
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
)
self._basic_tokenizer = BasicTokenizer(do_lower_case=False)
self._args_parser = args_parser
def _sanitize_parameters(
self,
ignore_labels=None,
grouped_entities: Optional[bool] = None,
ignore_subwords: Optional[bool] = None,
aggregation_strategy: Optional[AggregationStrategy] = None,
offset_mapping: Optional[List[Tuple[int, int]]] = None,
):
preprocess_params = {}
if offset_mapping is not None:
preprocess_params["offset_mapping"] = offset_mapping
postprocess_params = {}
if grouped_entities is not None or ignore_subwords is not None:
if grouped_entities and ignore_subwords:
aggregation_strategy = AggregationStrategy.FIRST
elif grouped_entities and not ignore_subwords:
aggregation_strategy = AggregationStrategy.SIMPLE
else:
aggregation_strategy = AggregationStrategy.NONE
if grouped_entities is not None:
warnings.warn(
"`grouped_entities` is deprecated and will be removed in version v5.0.0, defaulted to"
f' `aggregation_strategy="{aggregation_strategy}"` instead.'
)
if ignore_subwords is not None:
warnings.warn(
"`ignore_subwords` is deprecated and will be removed in version v5.0.0, defaulted to"
f' `aggregation_strategy="{aggregation_strategy}"` instead.'
)
if aggregation_strategy is not None:
if isinstance(aggregation_strategy, str):
aggregation_strategy = AggregationStrategy[aggregation_strategy.upper()]
if (
aggregation_strategy
in {AggregationStrategy.FIRST, AggregationStrategy.MAX, AggregationStrategy.AVERAGE}
and not self.tokenizer.is_fast
):
raise ValueError(
"Slow tokenizers cannot handle subwords. Please set the `aggregation_strategy` option"
'to `"simple"` or use a fast tokenizer.'
)
postprocess_params["aggregation_strategy"] = aggregation_strategy
if ignore_labels is not None:
postprocess_params["ignore_labels"] = ignore_labels
return preprocess_params, {}, postprocess_params
def __call__(self, inputs: Union[str, List[str]], **kwargs):
"""
Classify each token of the text(s) given as inputs.
Args:
inputs (`str` or `List[str]`):
One or several texts (or one list of texts) for token classification.
Return:
A list or a list of list of `dict`: Each result comes as a list of dictionaries (one for each token in the
corresponding input, or each entity if this pipeline was instantiated with an aggregation_strategy) with
the following keys:
- **word** (`str`) -- The token/word classified. This is obtained by decoding the selected tokens. If you
want to have the exact string in the original sentence, use `start` and `end`.
- **score** (`float`) -- The corresponding probability for `entity`.
- **entity** (`str`) -- The entity predicted for that token/word (it is named *entity_group* when
*aggregation_strategy* is not `"none"`.
- **index** (`int`, only present when `aggregation_strategy="none"`) -- The index of the corresponding
token in the sentence.
- **start** (`int`, *optional*) -- The index of the start of the corresponding entity in the sentence. Only
exists if the offsets are available within the tokenizer
- **end** (`int`, *optional*) -- The index of the end of the corresponding entity in the sentence. Only
exists if the offsets are available within the tokenizer
"""
_inputs, offset_mapping = self._args_parser(inputs, **kwargs)
if offset_mapping:
kwargs["offset_mapping"] = offset_mapping
return super().__call__(inputs, **kwargs)
def preprocess(self, sentence, offset_mapping=None):
truncation = True if self.tokenizer.model_max_length and self.tokenizer.model_max_length > 0 else False
model_inputs = self.tokenizer(
sentence,
return_tensors=self.framework,
truncation=truncation,
return_special_tokens_mask=True,
return_offsets_mapping=self.tokenizer.is_fast,
)
if offset_mapping:
model_inputs["offset_mapping"] = offset_mapping
model_inputs["sentence"] = sentence
return model_inputs
def _forward(self, model_inputs):
# Forward
special_tokens_mask = model_inputs.pop("special_tokens_mask")
offset_mapping = model_inputs.pop("offset_mapping", None)
sentence = model_inputs.pop("sentence")
if self.framework == "tf":
logits = self.model(model_inputs.data)[0]
else:
output = self.model(**model_inputs)
logits = output["logits"] if isinstance(output, dict) else output[0]
return {
"logits": logits,
"special_tokens_mask": special_tokens_mask,
"offset_mapping": offset_mapping,
"sentence": sentence,
**model_inputs,
}
def postprocess(self, model_outputs, aggregation_strategy=AggregationStrategy.NONE, ignore_labels=None):
if ignore_labels is None:
ignore_labels = ["O"]
logits = model_outputs["logits"][0].numpy()
sentence = model_outputs["sentence"]
input_ids = model_outputs["input_ids"][0]
offset_mapping = model_outputs["offset_mapping"][0] if model_outputs["offset_mapping"] is not None else None
special_tokens_mask = model_outputs["special_tokens_mask"][0].numpy()
maxes = np.max(logits, axis=-1, keepdims=True)
shifted_exp = np.exp(logits - maxes)
scores = shifted_exp / shifted_exp.sum(axis=-1, keepdims=True)
if self.framework == "tf":
input_ids = input_ids.numpy()
offset_mapping = offset_mapping.numpy() if offset_mapping is not None else None
pre_entities = self.gather_pre_entities(
sentence, input_ids, scores, offset_mapping, special_tokens_mask, aggregation_strategy
)
grouped_entities = self.aggregate(pre_entities, aggregation_strategy)
# Filter anything that is in self.ignore_labels
entities = [
entity
for entity in grouped_entities
if entity.get("entity", None) not in ignore_labels
and entity.get("entity_group", None) not in ignore_labels
]
return entities
def gather_pre_entities(
self,
sentence: str,
input_ids: np.ndarray,
scores: np.ndarray,
offset_mapping: Optional[List[Tuple[int, int]]],
special_tokens_mask: np.ndarray,
aggregation_strategy: AggregationStrategy,
) -> List[dict]:
"""Fuse various numpy arrays into dicts with all the information needed for aggregation"""
pre_entities = []
for idx, token_scores in enumerate(scores):
# Filter special_tokens, they should only occur
# at the sentence boundaries since we're not encoding pairs of
# sentences so we don't have to keep track of those.
if special_tokens_mask[idx]:
continue
word = self.tokenizer.convert_ids_to_tokens(int(input_ids[idx]))
if offset_mapping is not None:
start_ind, end_ind = offset_mapping[idx]
if not isinstance(start_ind, int):
if self.framework == "pt":
start_ind = start_ind.item()
end_ind = end_ind.item()
word_ref = sentence[start_ind:end_ind]
if getattr(self.tokenizer._tokenizer.model, "continuing_subword_prefix", None):
# This is a BPE, word aware tokenizer, there is a correct way
# to fuse tokens
is_subword = len(word) != len(word_ref)
else:
# This is a fallback heuristic. This will fail most likely on any kind of text + punctuation mixtures that will be considered "words". Non word aware models cannot do better than this unfortunately.
if aggregation_strategy in {
AggregationStrategy.FIRST,
AggregationStrategy.AVERAGE,
AggregationStrategy.MAX,
}:
warnings.warn("Tokenizer does not support real words, using fallback heuristic", UserWarning)
is_subword = start_ind > 0 and " " not in sentence[start_ind - 1 : start_ind + 1]
if int(input_ids[idx]) == self.tokenizer.unk_token_id:
word = word_ref
is_subword = False
else:
start_ind = None
end_ind = None
is_subword = False
pre_entity = {
"word": word,
"scores": token_scores,
"start": start_ind,
"end": end_ind,
"index": idx,
"is_subword": is_subword,
}
pre_entities.append(pre_entity)
return pre_entities
def aggregate(self, pre_entities: List[dict], aggregation_strategy: AggregationStrategy) -> List[dict]:
if aggregation_strategy in {AggregationStrategy.NONE, AggregationStrategy.SIMPLE}:
entities = []
for pre_entity in pre_entities:
entity_idx = pre_entity["scores"].argmax()
score = pre_entity["scores"][entity_idx]
entity = {
"entity": self.model.config.id2label[entity_idx],
"score": score,
"index": pre_entity["index"],
"word": pre_entity["word"],
"start": pre_entity["start"],
"end": pre_entity["end"],
}
entities.append(entity)
else:
entities = self.aggregate_words(pre_entities, aggregation_strategy)
if aggregation_strategy == AggregationStrategy.NONE:
return entities
return self.group_entities(entities)
def aggregate_word(self, entities: List[dict], aggregation_strategy: AggregationStrategy) -> dict:
word = self.tokenizer.convert_tokens_to_string([entity["word"] for entity in entities])
if aggregation_strategy == AggregationStrategy.FIRST:
scores = entities[0]["scores"]
idx = scores.argmax()
score = scores[idx]
entity = self.model.config.id2label[idx]
elif aggregation_strategy == AggregationStrategy.MAX:
max_entity = max(entities, key=lambda entity: entity["scores"].max())
scores = max_entity["scores"]
idx = scores.argmax()
score = scores[idx]
entity = self.model.config.id2label[idx]
elif aggregation_strategy == AggregationStrategy.AVERAGE:
scores = np.stack([entity["scores"] for entity in entities])
average_scores = np.nanmean(scores, axis=0)
entity_idx = average_scores.argmax()
entity = self.model.config.id2label[entity_idx]
score = average_scores[entity_idx]
else:
raise ValueError("Invalid aggregation_strategy")
new_entity = {
"entity": entity,
"score": score,
"word": word,
"start": entities[0]["start"],
"end": entities[-1]["end"],
}
return new_entity
def aggregate_words(self, entities: List[dict], aggregation_strategy: AggregationStrategy) -> List[dict]:
"""
Override tokens from a given word that disagree to force agreement on word boundaries.
Example: micro|soft| com|pany| B-ENT I-NAME I-ENT I-ENT will be rewritten with first strategy as microsoft|
company| B-ENT I-ENT
"""
if aggregation_strategy in {
AggregationStrategy.NONE,
AggregationStrategy.SIMPLE,
}:
raise ValueError("NONE and SIMPLE strategies are invalid for word aggregation")
word_entities = []
word_group = None
for entity in entities:
if word_group is None:
word_group = [entity]
elif entity["is_subword"]:
word_group.append(entity)
else:
word_entities.append(self.aggregate_word(word_group, aggregation_strategy))
word_group = [entity]
# Last item
word_entities.append(self.aggregate_word(word_group, aggregation_strategy))
return word_entities
def group_sub_entities(self, entities: List[dict]) -> dict:
"""
Group together the adjacent tokens with the same entity predicted.
Args:
entities (`dict`): The entities predicted by the pipeline.
"""
# Get the first entity in the entity group
entity = entities[0]["entity"].split("-")[-1]
scores = np.nanmean([entity["score"] for entity in entities])
tokens = [entity["word"] for entity in entities]
entity_group = {
"entity_group": entity,
"score": np.mean(scores),
"word": self.tokenizer.convert_tokens_to_string(tokens),
"start": entities[0]["start"],
"end": entities[-1]["end"],
}
return entity_group
def get_tag(self, entity_name: str) -> Tuple[str, str]:
if entity_name.startswith("B-"):
bi = "B"
tag = entity_name[2:]
elif entity_name.startswith("I-"):
bi = "I"
tag = entity_name[2:]
else:
# It's not in B-, I- format
# Default to I- for continuation.
bi = "I"
tag = entity_name
return bi, tag
def group_entities(self, entities: List[dict]) -> List[dict]:
"""
Find and group together the adjacent tokens with the same entity predicted.
Args:
entities (`dict`): The entities predicted by the pipeline.
"""
entity_groups = []
entity_group_disagg = []
for entity in entities:
if not entity_group_disagg:
entity_group_disagg.append(entity)
continue
# If the current entity is similar and adjacent to the previous entity,
# append it to the disaggregated entity group
# The split is meant to account for the "B" and "I" prefixes
# Shouldn't merge if both entities are B-type
bi, tag = self.get_tag(entity["entity"])
last_bi, last_tag = self.get_tag(entity_group_disagg[-1]["entity"])
if tag == last_tag and bi != "B":
# Modify subword type to be previous_type
entity_group_disagg.append(entity)
else:
# If the current entity is different from the previous entity
# aggregate the disaggregated entity group
entity_groups.append(self.group_sub_entities(entity_group_disagg))
entity_group_disagg = [entity]
if entity_group_disagg:
# it's the last entity, add it to the entity groups
entity_groups.append(self.group_sub_entities(entity_group_disagg))
return entity_groups
NerPipeline = TokenClassificationPipeline
|
27182812/ChatGLM-LLaMA-chinese-insturct | 45,567 | src/transformers/pipelines/__init__.py | # coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import json
import os
import warnings
from pathlib import Path
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
from huggingface_hub import model_info
from numpy import isin
from ..configuration_utils import PretrainedConfig
from ..dynamic_module_utils import get_class_from_dynamic_module
from ..feature_extraction_utils import PreTrainedFeatureExtractor
from ..image_processing_utils import BaseImageProcessor
from ..models.auto.configuration_auto import AutoConfig
from ..models.auto.feature_extraction_auto import FEATURE_EXTRACTOR_MAPPING, AutoFeatureExtractor
from ..models.auto.image_processing_auto import IMAGE_PROCESSOR_MAPPING, AutoImageProcessor
from ..models.auto.modeling_auto import AutoModelForDepthEstimation
from ..models.auto.tokenization_auto import TOKENIZER_MAPPING, AutoTokenizer
from ..tokenization_utils import PreTrainedTokenizer
from ..tokenization_utils_fast import PreTrainedTokenizerFast
from ..utils import (
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
is_kenlm_available,
is_offline_mode,
is_pyctcdecode_available,
is_tf_available,
is_torch_available,
logging,
)
from .audio_classification import AudioClassificationPipeline
from .automatic_speech_recognition import AutomaticSpeechRecognitionPipeline
from .base import (
ArgumentHandler,
CsvPipelineDataFormat,
JsonPipelineDataFormat,
PipedPipelineDataFormat,
Pipeline,
PipelineDataFormat,
PipelineException,
PipelineRegistry,
get_default_model_and_revision,
infer_framework_load_model,
)
from .conversational import Conversation, ConversationalPipeline
from .depth_estimation import DepthEstimationPipeline
from .document_question_answering import DocumentQuestionAnsweringPipeline
from .feature_extraction import FeatureExtractionPipeline
from .fill_mask import FillMaskPipeline
from .image_classification import ImageClassificationPipeline
from .image_segmentation import ImageSegmentationPipeline
from .image_to_text import ImageToTextPipeline
from .object_detection import ObjectDetectionPipeline
from .question_answering import QuestionAnsweringArgumentHandler, QuestionAnsweringPipeline
from .table_question_answering import TableQuestionAnsweringArgumentHandler, TableQuestionAnsweringPipeline
from .text2text_generation import SummarizationPipeline, Text2TextGenerationPipeline, TranslationPipeline
from .text_classification import TextClassificationPipeline
from .text_generation import TextGenerationPipeline
from .token_classification import (
AggregationStrategy,
NerPipeline,
TokenClassificationArgumentHandler,
TokenClassificationPipeline,
)
from .video_classification import VideoClassificationPipeline
from .visual_question_answering import VisualQuestionAnsweringPipeline
from .zero_shot_audio_classification import ZeroShotAudioClassificationPipeline
from .zero_shot_classification import ZeroShotClassificationArgumentHandler, ZeroShotClassificationPipeline
from .zero_shot_image_classification import ZeroShotImageClassificationPipeline
from .zero_shot_object_detection import ZeroShotObjectDetectionPipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import (
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_WITH_LM_HEAD_MAPPING,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForImageClassification,
TFAutoModelForMaskedLM,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeq2SeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelForVision2Seq,
)
if is_torch_available():
import torch
from ..models.auto.modeling_auto import (
MODEL_FOR_MASKED_LM_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING,
AutoModel,
AutoModelForAudioClassification,
AutoModelForCausalLM,
AutoModelForCTC,
AutoModelForDocumentQuestionAnswering,
AutoModelForImageClassification,
AutoModelForImageSegmentation,
AutoModelForMaskedLM,
AutoModelForObjectDetection,
AutoModelForQuestionAnswering,
AutoModelForSemanticSegmentation,
AutoModelForSeq2SeqLM,
AutoModelForSequenceClassification,
AutoModelForSpeechSeq2Seq,
AutoModelForTableQuestionAnswering,
AutoModelForTokenClassification,
AutoModelForVideoClassification,
AutoModelForVision2Seq,
AutoModelForVisualQuestionAnswering,
AutoModelForZeroShotObjectDetection,
)
if TYPE_CHECKING:
from ..modeling_tf_utils import TFPreTrainedModel
from ..modeling_utils import PreTrainedModel
logger = logging.get_logger(__name__)
# Register all the supported tasks here
TASK_ALIASES = {
"sentiment-analysis": "text-classification",
"ner": "token-classification",
"vqa": "visual-question-answering",
}
SUPPORTED_TASKS = {
"audio-classification": {
"impl": AudioClassificationPipeline,
"tf": (),
"pt": (AutoModelForAudioClassification,) if is_torch_available() else (),
"default": {"model": {"pt": ("superb/wav2vec2-base-superb-ks", "372e048")}},
"type": "audio",
},
"automatic-speech-recognition": {
"impl": AutomaticSpeechRecognitionPipeline,
"tf": (),
"pt": (AutoModelForCTC, AutoModelForSpeechSeq2Seq) if is_torch_available() else (),
"default": {"model": {"pt": ("facebook/wav2vec2-base-960h", "55bb623")}},
"type": "multimodal",
},
"feature-extraction": {
"impl": FeatureExtractionPipeline,
"tf": (TFAutoModel,) if is_tf_available() else (),
"pt": (AutoModel,) if is_torch_available() else (),
"default": {"model": {"pt": ("distilbert-base-cased", "935ac13"), "tf": ("distilbert-base-cased", "935ac13")}},
"type": "multimodal",
},
"text-classification": {
"impl": TextClassificationPipeline,
"tf": (TFAutoModelForSequenceClassification,) if is_tf_available() else (),
"pt": (AutoModelForSequenceClassification,) if is_torch_available() else (),
"default": {
"model": {
"pt": ("distilbert-base-uncased-finetuned-sst-2-english", "af0f99b"),
"tf": ("distilbert-base-uncased-finetuned-sst-2-english", "af0f99b"),
},
},
"type": "text",
},
"token-classification": {
"impl": TokenClassificationPipeline,
"tf": (TFAutoModelForTokenClassification,) if is_tf_available() else (),
"pt": (AutoModelForTokenClassification,) if is_torch_available() else (),
"default": {
"model": {
"pt": ("dbmdz/bert-large-cased-finetuned-conll03-english", "f2482bf"),
"tf": ("dbmdz/bert-large-cased-finetuned-conll03-english", "f2482bf"),
},
},
"type": "text",
},
"question-answering": {
"impl": QuestionAnsweringPipeline,
"tf": (TFAutoModelForQuestionAnswering,) if is_tf_available() else (),
"pt": (AutoModelForQuestionAnswering,) if is_torch_available() else (),
"default": {
"model": {
"pt": ("distilbert-base-cased-distilled-squad", "626af31"),
"tf": ("distilbert-base-cased-distilled-squad", "626af31"),
},
},
"type": "text",
},
"table-question-answering": {
"impl": TableQuestionAnsweringPipeline,
"pt": (AutoModelForTableQuestionAnswering,) if is_torch_available() else (),
"tf": (TFAutoModelForTableQuestionAnswering,) if is_tf_available() else (),
"default": {
"model": {
"pt": ("google/tapas-base-finetuned-wtq", "69ceee2"),
"tf": ("google/tapas-base-finetuned-wtq", "69ceee2"),
},
},
"type": "text",
},
"visual-question-answering": {
"impl": VisualQuestionAnsweringPipeline,
"pt": (AutoModelForVisualQuestionAnswering,) if is_torch_available() else (),
"tf": (),
"default": {
"model": {"pt": ("dandelin/vilt-b32-finetuned-vqa", "4355f59")},
},
"type": "multimodal",
},
"document-question-answering": {
"impl": DocumentQuestionAnsweringPipeline,
"pt": (AutoModelForDocumentQuestionAnswering,) if is_torch_available() else (),
"tf": (),
"default": {
"model": {"pt": ("impira/layoutlm-document-qa", "52e01b3")},
},
"type": "multimodal",
},
"fill-mask": {
"impl": FillMaskPipeline,
"tf": (TFAutoModelForMaskedLM,) if is_tf_available() else (),
"pt": (AutoModelForMaskedLM,) if is_torch_available() else (),
"default": {"model": {"pt": ("distilroberta-base", "ec58a5b"), "tf": ("distilroberta-base", "ec58a5b")}},
"type": "text",
},
"summarization": {
"impl": SummarizationPipeline,
"tf": (TFAutoModelForSeq2SeqLM,) if is_tf_available() else (),
"pt": (AutoModelForSeq2SeqLM,) if is_torch_available() else (),
"default": {"model": {"pt": ("sshleifer/distilbart-cnn-12-6", "a4f8f3e"), "tf": ("t5-small", "d769bba")}},
"type": "text",
},
# This task is a special case as it's parametrized by SRC, TGT languages.
"translation": {
"impl": TranslationPipeline,
"tf": (TFAutoModelForSeq2SeqLM,) if is_tf_available() else (),
"pt": (AutoModelForSeq2SeqLM,) if is_torch_available() else (),
"default": {
("en", "fr"): {"model": {"pt": ("t5-base", "686f1db"), "tf": ("t5-base", "686f1db")}},
("en", "de"): {"model": {"pt": ("t5-base", "686f1db"), "tf": ("t5-base", "686f1db")}},
("en", "ro"): {"model": {"pt": ("t5-base", "686f1db"), "tf": ("t5-base", "686f1db")}},
},
"type": "text",
},
"text2text-generation": {
"impl": Text2TextGenerationPipeline,
"tf": (TFAutoModelForSeq2SeqLM,) if is_tf_available() else (),
"pt": (AutoModelForSeq2SeqLM,) if is_torch_available() else (),
"default": {"model": {"pt": ("t5-base", "686f1db"), "tf": ("t5-base", "686f1db")}},
"type": "text",
},
"text-generation": {
"impl": TextGenerationPipeline,
"tf": (TFAutoModelForCausalLM,) if is_tf_available() else (),
"pt": (AutoModelForCausalLM,) if is_torch_available() else (),
"default": {"model": {"pt": ("gpt2", "6c0e608"), "tf": ("gpt2", "6c0e608")}},
"type": "text",
},
"zero-shot-classification": {
"impl": ZeroShotClassificationPipeline,
"tf": (TFAutoModelForSequenceClassification,) if is_tf_available() else (),
"pt": (AutoModelForSequenceClassification,) if is_torch_available() else (),
"default": {
"model": {"pt": ("facebook/bart-large-mnli", "c626438"), "tf": ("roberta-large-mnli", "130fb28")},
"config": {"pt": ("facebook/bart-large-mnli", "c626438"), "tf": ("roberta-large-mnli", "130fb28")},
},
"type": "text",
},
"zero-shot-image-classification": {
"impl": ZeroShotImageClassificationPipeline,
"tf": (TFAutoModel,) if is_tf_available() else (),
"pt": (AutoModel,) if is_torch_available() else (),
"default": {
"model": {
"pt": ("openai/clip-vit-base-patch32", "f4881ba"),
"tf": ("openai/clip-vit-base-patch32", "f4881ba"),
}
},
"type": "multimodal",
},
"zero-shot-audio-classification": {
"impl": ZeroShotAudioClassificationPipeline,
"tf": (),
"pt": (AutoModel,) if is_torch_available() else (),
"default": {
"model": {
"pt": ("laion/clap-htsat-fused", "973b6e5"),
}
},
"type": "multimodal",
},
"conversational": {
"impl": ConversationalPipeline,
"tf": (TFAutoModelForSeq2SeqLM, TFAutoModelForCausalLM) if is_tf_available() else (),
"pt": (AutoModelForSeq2SeqLM, AutoModelForCausalLM) if is_torch_available() else (),
"default": {
"model": {"pt": ("microsoft/DialoGPT-medium", "8bada3b"), "tf": ("microsoft/DialoGPT-medium", "8bada3b")}
},
"type": "text",
},
"image-classification": {
"impl": ImageClassificationPipeline,
"tf": (TFAutoModelForImageClassification,) if is_tf_available() else (),
"pt": (AutoModelForImageClassification,) if is_torch_available() else (),
"default": {
"model": {
"pt": ("google/vit-base-patch16-224", "5dca96d"),
"tf": ("google/vit-base-patch16-224", "5dca96d"),
}
},
"type": "image",
},
"image-segmentation": {
"impl": ImageSegmentationPipeline,
"tf": (),
"pt": (AutoModelForImageSegmentation, AutoModelForSemanticSegmentation) if is_torch_available() else (),
"default": {"model": {"pt": ("facebook/detr-resnet-50-panoptic", "fc15262")}},
"type": "multimodal",
},
"image-to-text": {
"impl": ImageToTextPipeline,
"tf": (TFAutoModelForVision2Seq,) if is_tf_available() else (),
"pt": (AutoModelForVision2Seq,) if is_torch_available() else (),
"default": {
"model": {
"pt": ("ydshieh/vit-gpt2-coco-en", "65636df"),
"tf": ("ydshieh/vit-gpt2-coco-en", "65636df"),
}
},
"type": "multimodal",
},
"object-detection": {
"impl": ObjectDetectionPipeline,
"tf": (),
"pt": (AutoModelForObjectDetection,) if is_torch_available() else (),
"default": {"model": {"pt": ("facebook/detr-resnet-50", "2729413")}},
"type": "multimodal",
},
"zero-shot-object-detection": {
"impl": ZeroShotObjectDetectionPipeline,
"tf": (),
"pt": (AutoModelForZeroShotObjectDetection,) if is_torch_available() else (),
"default": {"model": {"pt": ("google/owlvit-base-patch32", "17740e1")}},
"type": "multimodal",
},
"depth-estimation": {
"impl": DepthEstimationPipeline,
"tf": (),
"pt": (AutoModelForDepthEstimation,) if is_torch_available() else (),
"default": {"model": {"pt": ("Intel/dpt-large", "e93beec")}},
"type": "image",
},
"video-classification": {
"impl": VideoClassificationPipeline,
"tf": (),
"pt": (AutoModelForVideoClassification,) if is_torch_available() else (),
"default": {"model": {"pt": ("MCG-NJU/videomae-base-finetuned-kinetics", "4800870")}},
"type": "video",
},
}
NO_FEATURE_EXTRACTOR_TASKS = set()
NO_IMAGE_PROCESSOR_TASKS = set()
NO_TOKENIZER_TASKS = set()
# Those model configs are special, they are generic over their task, meaning
# any tokenizer/feature_extractor might be use for a given model so we cannot
# use the statically defined TOKENIZER_MAPPING and FEATURE_EXTRACTOR_MAPPING to
# see if the model defines such objects or not.
MULTI_MODEL_CONFIGS = {"SpeechEncoderDecoderConfig", "VisionEncoderDecoderConfig", "VisionTextDualEncoderConfig"}
for task, values in SUPPORTED_TASKS.items():
if values["type"] == "text":
NO_FEATURE_EXTRACTOR_TASKS.add(task)
NO_IMAGE_PROCESSOR_TASKS.add(task)
elif values["type"] in {"image", "video"}:
NO_TOKENIZER_TASKS.add(task)
elif values["type"] in {"audio"}:
NO_TOKENIZER_TASKS.add(task)
NO_IMAGE_PROCESSOR_TASKS.add(task)
elif values["type"] != "multimodal":
raise ValueError(f"SUPPORTED_TASK {task} contains invalid type {values['type']}")
PIPELINE_REGISTRY = PipelineRegistry(supported_tasks=SUPPORTED_TASKS, task_aliases=TASK_ALIASES)
def get_supported_tasks() -> List[str]:
"""
Returns a list of supported task strings.
"""
return PIPELINE_REGISTRY.get_supported_tasks()
def get_task(model: str, use_auth_token: Optional[str] = None) -> str:
if is_offline_mode():
raise RuntimeError("You cannot infer task automatically within `pipeline` when using offline mode")
try:
info = model_info(model, token=use_auth_token)
except Exception as e:
raise RuntimeError(f"Instantiating a pipeline without a task set raised an error: {e}")
if not info.pipeline_tag:
raise RuntimeError(
f"The model {model} does not seem to have a correct `pipeline_tag` set to infer the task automatically"
)
if getattr(info, "library_name", "transformers") != "transformers":
raise RuntimeError(f"This model is meant to be used with {info.library_name} not with transformers")
task = info.pipeline_tag
return task
def check_task(task: str) -> Tuple[str, Dict, Any]:
"""
Checks an incoming task string, to validate it's correct and return the default Pipeline and Model classes, and
default models if they exist.
Args:
task (`str`):
The task defining which pipeline will be returned. Currently accepted tasks are:
- `"audio-classification"`
- `"automatic-speech-recognition"`
- `"conversational"`
- `"depth-estimation"`
- `"document-question-answering"`
- `"feature-extraction"`
- `"fill-mask"`
- `"image-classification"`
- `"image-segmentation"`
- `"image-to-text"`
- `"object-detection"`
- `"question-answering"`
- `"summarization"`
- `"table-question-answering"`
- `"text2text-generation"`
- `"text-classification"` (alias `"sentiment-analysis"` available)
- `"text-generation"`
- `"token-classification"` (alias `"ner"` available)
- `"translation"`
- `"translation_xx_to_yy"`
- `"video-classification"`
- `"visual-question-answering"`
- `"zero-shot-classification"`
- `"zero-shot-image-classification"`
- `"zero-shot-object-detection"`
Returns:
(normalized_task: `str`, task_defaults: `dict`, task_options: (`tuple`, None)) The normalized task name
(removed alias and options). The actual dictionary required to initialize the pipeline and some extra task
options for parametrized tasks like "translation_XX_to_YY"
"""
return PIPELINE_REGISTRY.check_task(task)
def clean_custom_task(task_info):
import transformers
if "impl" not in task_info:
raise RuntimeError("This model introduces a custom pipeline without specifying its implementation.")
pt_class_names = task_info.get("pt", ())
if isinstance(pt_class_names, str):
pt_class_names = [pt_class_names]
task_info["pt"] = tuple(getattr(transformers, c) for c in pt_class_names)
tf_class_names = task_info.get("tf", ())
if isinstance(tf_class_names, str):
tf_class_names = [tf_class_names]
task_info["tf"] = tuple(getattr(transformers, c) for c in tf_class_names)
return task_info, None
def pipeline(
task: str = None,
model: Optional = None,
config: Optional[Union[str, PretrainedConfig]] = None,
tokenizer: Optional[Union[str, PreTrainedTokenizer, PreTrainedTokenizerFast]] = None,
feature_extractor: Optional[Union[str, PreTrainedFeatureExtractor]] = None,
image_processor: Optional[Union[str, BaseImageProcessor]] = None,
framework: Optional[str] = None,
revision: Optional[str] = None,
use_fast: bool = True,
use_auth_token: Optional[Union[str, bool]] = None,
device: Optional[Union[int, str, "torch.device"]] = None,
device_map=None,
torch_dtype=None,
trust_remote_code: Optional[bool] = None,
model_kwargs: Dict[str, Any] = None,
pipeline_class: Optional[Any] = None,
**kwargs,
) -> Pipeline:
"""
Utility factory method to build a [`Pipeline`].
Pipelines are made of:
- A [tokenizer](tokenizer) in charge of mapping raw textual input to token.
- A [model](model) to make predictions from the inputs.
- Some (optional) post processing for enhancing model's output.
Args:
task (`str`):
The task defining which pipeline will be returned. Currently accepted tasks are:
- `"audio-classification"`: will return a [`AudioClassificationPipeline`].
- `"automatic-speech-recognition"`: will return a [`AutomaticSpeechRecognitionPipeline`].
- `"conversational"`: will return a [`ConversationalPipeline`].
- `"depth-estimation"`: will return a [`DepthEstimationPipeline`].
- `"document-question-answering"`: will return a [`DocumentQuestionAnsweringPipeline`].
- `"feature-extraction"`: will return a [`FeatureExtractionPipeline`].
- `"fill-mask"`: will return a [`FillMaskPipeline`]:.
- `"image-classification"`: will return a [`ImageClassificationPipeline`].
- `"image-segmentation"`: will return a [`ImageSegmentationPipeline`].
- `"image-to-text"`: will return a [`ImageToTextPipeline`].
- `"object-detection"`: will return a [`ObjectDetectionPipeline`].
- `"question-answering"`: will return a [`QuestionAnsweringPipeline`].
- `"summarization"`: will return a [`SummarizationPipeline`].
- `"table-question-answering"`: will return a [`TableQuestionAnsweringPipeline`].
- `"text2text-generation"`: will return a [`Text2TextGenerationPipeline`].
- `"text-classification"` (alias `"sentiment-analysis"` available): will return a
[`TextClassificationPipeline`].
- `"text-generation"`: will return a [`TextGenerationPipeline`]:.
- `"token-classification"` (alias `"ner"` available): will return a [`TokenClassificationPipeline`].
- `"translation"`: will return a [`TranslationPipeline`].
- `"translation_xx_to_yy"`: will return a [`TranslationPipeline`].
- `"video-classification"`: will return a [`VideoClassificationPipeline`].
- `"visual-question-answering"`: will return a [`VisualQuestionAnsweringPipeline`].
- `"zero-shot-classification"`: will return a [`ZeroShotClassificationPipeline`].
- `"zero-shot-image-classification"`: will return a [`ZeroShotImageClassificationPipeline`].
- `"zero-shot-audio-classification"`: will return a [`ZeroShotAudioClassificationPipeline`].
- `"zero-shot-object-detection"`: will return a [`ZeroShotObjectDetectionPipeline`].
model (`str` or [`PreTrainedModel`] or [`TFPreTrainedModel`], *optional*):
The model that will be used by the pipeline to make predictions. This can be a model identifier or an
actual instance of a pretrained model inheriting from [`PreTrainedModel`] (for PyTorch) or
[`TFPreTrainedModel`] (for TensorFlow).
If not provided, the default for the `task` will be loaded.
config (`str` or [`PretrainedConfig`], *optional*):
The configuration that will be used by the pipeline to instantiate the model. This can be a model
identifier or an actual pretrained model configuration inheriting from [`PretrainedConfig`].
If not provided, the default configuration file for the requested model will be used. That means that if
`model` is given, its default configuration will be used. However, if `model` is not supplied, this
`task`'s default model's config is used instead.
tokenizer (`str` or [`PreTrainedTokenizer`], *optional*):
The tokenizer that will be used by the pipeline to encode data for the model. This can be a model
identifier or an actual pretrained tokenizer inheriting from [`PreTrainedTokenizer`].
If not provided, the default tokenizer for the given `model` will be loaded (if it is a string). If `model`
is not specified or not a string, then the default tokenizer for `config` is loaded (if it is a string).
However, if `config` is also not given or not a string, then the default tokenizer for the given `task`
will be loaded.
feature_extractor (`str` or [`PreTrainedFeatureExtractor`], *optional*):
The feature extractor that will be used by the pipeline to encode data for the model. This can be a model
identifier or an actual pretrained feature extractor inheriting from [`PreTrainedFeatureExtractor`].
Feature extractors are used for non-NLP models, such as Speech or Vision models as well as multi-modal
models. Multi-modal models will also require a tokenizer to be passed.
If not provided, the default feature extractor for the given `model` will be loaded (if it is a string). If
`model` is not specified or not a string, then the default feature extractor for `config` is loaded (if it
is a string). However, if `config` is also not given or not a string, then the default feature extractor
for the given `task` will be loaded.
framework (`str`, *optional*):
The framework to use, either `"pt"` for PyTorch or `"tf"` for TensorFlow. The specified framework must be
installed.
If no framework is specified, will default to the one currently installed. If no framework is specified and
both frameworks are installed, will default to the framework of the `model`, or to PyTorch if no model is
provided.
revision (`str`, *optional*, defaults to `"main"`):
When passing a task name or a string model identifier: The specific model version to use. It can be a
branch name, a tag name, or a commit id, since we use a git-based system for storing models and other
artifacts on huggingface.co, so `revision` can be any identifier allowed by git.
use_fast (`bool`, *optional*, defaults to `True`):
Whether or not to use a Fast tokenizer if possible (a [`PreTrainedTokenizerFast`]).
use_auth_token (`str` or *bool*, *optional*):
The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
when running `huggingface-cli login` (stored in `~/.huggingface`).
device (`int` or `str` or `torch.device`):
Defines the device (*e.g.*, `"cpu"`, `"cuda:1"`, `"mps"`, or a GPU ordinal rank like `1`) on which this
pipeline will be allocated.
device_map (`str` or `Dict[str, Union[int, str, torch.device]`, *optional*):
Sent directly as `model_kwargs` (just a simpler shortcut). When `accelerate` library is present, set
`device_map="auto"` to compute the most optimized `device_map` automatically (see
[here](https://huggingface.co/docs/accelerate/main/en/package_reference/big_modeling#accelerate.cpu_offload)
for more information).
<Tip warning={true}>
Do not use `device_map` AND `device` at the same time as they will conflict
</Tip>
torch_dtype (`str` or `torch.dtype`, *optional*):
Sent directly as `model_kwargs` (just a simpler shortcut) to use the available precision for this model
(`torch.float16`, `torch.bfloat16`, ... or `"auto"`).
trust_remote_code (`bool`, *optional*, defaults to `False`):
Whether or not to allow for custom code defined on the Hub in their own modeling, configuration,
tokenization or even pipeline files. This option should only be set to `True` for repositories you trust
and in which you have read the code, as it will execute code present on the Hub on your local machine.
model_kwargs:
Additional dictionary of keyword arguments passed along to the model's `from_pretrained(...,
**model_kwargs)` function.
kwargs:
Additional keyword arguments passed along to the specific pipeline init (see the documentation for the
corresponding pipeline class for possible values).
Returns:
[`Pipeline`]: A suitable pipeline for the task.
Examples:
```python
>>> from transformers import pipeline, AutoModelForTokenClassification, AutoTokenizer
>>> # Sentiment analysis pipeline
>>> analyzer = pipeline("sentiment-analysis")
>>> # Question answering pipeline, specifying the checkpoint identifier
>>> oracle = pipeline(
... "question-answering", model="distilbert-base-cased-distilled-squad", tokenizer="bert-base-cased"
... )
>>> # Named entity recognition pipeline, passing in a specific model and tokenizer
>>> model = AutoModelForTokenClassification.from_pretrained("dbmdz/bert-large-cased-finetuned-conll03-english")
>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
>>> recognizer = pipeline("ner", model=model, tokenizer=tokenizer)
```"""
if model_kwargs is None:
model_kwargs = {}
# Make sure we only pass use_auth_token once as a kwarg (it used to be possible to pass it in model_kwargs,
# this is to keep BC).
use_auth_token = model_kwargs.pop("use_auth_token", use_auth_token)
hub_kwargs = {
"revision": revision,
"use_auth_token": use_auth_token,
"trust_remote_code": trust_remote_code,
"_commit_hash": None,
}
if task is None and model is None:
raise RuntimeError(
"Impossible to instantiate a pipeline without either a task or a model "
"being specified. "
"Please provide a task class or a model"
)
if model is None and tokenizer is not None:
raise RuntimeError(
"Impossible to instantiate a pipeline with tokenizer specified but not the model as the provided tokenizer"
" may not be compatible with the default model. Please provide a PreTrainedModel class or a"
" path/identifier to a pretrained model when providing tokenizer."
)
if model is None and feature_extractor is not None:
raise RuntimeError(
"Impossible to instantiate a pipeline with feature_extractor specified but not the model as the provided"
" feature_extractor may not be compatible with the default model. Please provide a PreTrainedModel class"
" or a path/identifier to a pretrained model when providing feature_extractor."
)
if isinstance(model, Path):
model = str(model)
# Config is the primordial information item.
# Instantiate config if needed
if isinstance(config, str):
config = AutoConfig.from_pretrained(config, _from_pipeline=task, **hub_kwargs, **model_kwargs)
hub_kwargs["_commit_hash"] = config._commit_hash
elif config is None and isinstance(model, str):
config = AutoConfig.from_pretrained(model, _from_pipeline=task, **hub_kwargs, **model_kwargs)
hub_kwargs["_commit_hash"] = config._commit_hash
custom_tasks = {}
if config is not None and len(getattr(config, "custom_pipelines", {})) > 0:
custom_tasks = config.custom_pipelines
if task is None and trust_remote_code is not False:
if len(custom_tasks) == 1:
task = list(custom_tasks.keys())[0]
else:
raise RuntimeError(
"We can't infer the task automatically for this model as there are multiple tasks available. Pick "
f"one in {', '.join(custom_tasks.keys())}"
)
if task is None and model is not None:
if not isinstance(model, str):
raise RuntimeError(
"Inferring the task automatically requires to check the hub with a model_id defined as a `str`."
f"{model} is not a valid model_id."
)
task = get_task(model, use_auth_token)
# Retrieve the task
if task in custom_tasks:
normalized_task = task
targeted_task, task_options = clean_custom_task(custom_tasks[task])
if pipeline_class is None:
if not trust_remote_code:
raise ValueError(
"Loading this pipeline requires you to execute the code in the pipeline file in that"
" repo on your local machine. Make sure you have read the code there to avoid malicious use, then"
" set the option `trust_remote_code=True` to remove this error."
)
class_ref = targeted_task["impl"]
module_file, class_name = class_ref.split(".")
pipeline_class = get_class_from_dynamic_module(
model, module_file + ".py", class_name, revision=revision, use_auth_token=use_auth_token
)
else:
normalized_task, targeted_task, task_options = check_task(task)
if pipeline_class is None:
pipeline_class = targeted_task["impl"]
# Use default model/config/tokenizer for the task if no model is provided
if model is None:
# At that point framework might still be undetermined
model, default_revision = get_default_model_and_revision(targeted_task, framework, task_options)
revision = revision if revision is not None else default_revision
logger.warning(
f"No model was supplied, defaulted to {model} and revision"
f" {revision} ({HUGGINGFACE_CO_RESOLVE_ENDPOINT}/{model}).\n"
"Using a pipeline without specifying a model name and revision in production is not recommended."
)
if config is None and isinstance(model, str):
config = AutoConfig.from_pretrained(model, _from_pipeline=task, **hub_kwargs, **model_kwargs)
hub_kwargs["_commit_hash"] = config._commit_hash
if device_map is not None:
if "device_map" in model_kwargs:
raise ValueError(
'You cannot use both `pipeline(... device_map=..., model_kwargs={"device_map":...})` as those'
" arguments might conflict, use only one.)"
)
if device is not None:
logger.warning(
"Both `device` and `device_map` are specified. `device` will override `device_map`. You"
" will most likely encounter unexpected behavior. Please remove `device` and keep `device_map`."
)
model_kwargs["device_map"] = device_map
if torch_dtype is not None:
if "torch_dtype" in model_kwargs:
raise ValueError(
'You cannot use both `pipeline(... torch_dtype=..., model_kwargs={"torch_dtype":...})` as those'
" arguments might conflict, use only one.)"
)
model_kwargs["torch_dtype"] = torch_dtype
model_name = model if isinstance(model, str) else None
# Infer the framework from the model
# Forced if framework already defined, inferred if it's None
# Will load the correct model if possible
model_classes = {"tf": targeted_task["tf"], "pt": targeted_task["pt"]}
framework, model = infer_framework_load_model(
model,
model_classes=model_classes,
config=config,
framework=framework,
task=task,
**hub_kwargs,
**model_kwargs,
)
model_config = model.config
hub_kwargs["_commit_hash"] = model.config._commit_hash
load_tokenizer = type(model_config) in TOKENIZER_MAPPING or model_config.tokenizer_class is not None
load_feature_extractor = type(model_config) in FEATURE_EXTRACTOR_MAPPING or feature_extractor is not None
load_image_processor = type(model_config) in IMAGE_PROCESSOR_MAPPING or image_processor is not None
# If `model` (instance of `PretrainedModel` instead of `str`) is passed (and/or same for config), while
# `image_processor` or `feature_extractor` is `None`, the loading will fail. This happens particularly for some
# vision tasks when calling `pipeline()` with `model` and only one of the `image_processor` and `feature_extractor`.
# TODO: we need to make `NO_IMAGE_PROCESSOR_TASKS` and `NO_FEATURE_EXTRACTOR_TASKS` more robust to avoid such issue.
# This block is only temporarily to make CI green.
if load_image_processor and load_feature_extractor:
load_feature_extractor = False
if (
tokenizer is None
and not load_tokenizer
and normalized_task not in NO_TOKENIZER_TASKS
# Using class name to avoid importing the real class.
and model_config.__class__.__name__ in MULTI_MODEL_CONFIGS
):
# This is a special category of models, that are fusions of multiple models
# so the model_config might not define a tokenizer, but it seems to be
# necessary for the task, so we're force-trying to load it.
load_tokenizer = True
if (
image_processor is None
and not load_image_processor
and normalized_task not in NO_IMAGE_PROCESSOR_TASKS
# Using class name to avoid importing the real class.
and model_config.__class__.__name__ in MULTI_MODEL_CONFIGS
and normalized_task != "automatic-speech-recognition"
):
# This is a special category of models, that are fusions of multiple models
# so the model_config might not define a tokenizer, but it seems to be
# necessary for the task, so we're force-trying to load it.
load_image_processor = True
if (
feature_extractor is None
and not load_feature_extractor
and normalized_task not in NO_FEATURE_EXTRACTOR_TASKS
# Using class name to avoid importing the real class.
and model_config.__class__.__name__ in MULTI_MODEL_CONFIGS
):
# This is a special category of models, that are fusions of multiple models
# so the model_config might not define a tokenizer, but it seems to be
# necessary for the task, so we're force-trying to load it.
load_feature_extractor = True
if task in NO_TOKENIZER_TASKS:
# These will never require a tokenizer.
# the model on the other hand might have a tokenizer, but
# the files could be missing from the hub, instead of failing
# on such repos, we just force to not load it.
load_tokenizer = False
if task in NO_FEATURE_EXTRACTOR_TASKS:
load_feature_extractor = False
if task in NO_IMAGE_PROCESSOR_TASKS:
load_image_processor = False
if load_tokenizer:
# Try to infer tokenizer from model or config name (if provided as str)
if tokenizer is None:
if isinstance(model_name, str):
tokenizer = model_name
elif isinstance(config, str):
tokenizer = config
else:
# Impossible to guess what is the right tokenizer here
raise Exception(
"Impossible to guess which tokenizer to use. "
"Please provide a PreTrainedTokenizer class or a path/identifier to a pretrained tokenizer."
)
# Instantiate tokenizer if needed
if isinstance(tokenizer, (str, tuple)):
if isinstance(tokenizer, tuple):
# For tuple we have (tokenizer name, {kwargs})
use_fast = tokenizer[1].pop("use_fast", use_fast)
tokenizer_identifier = tokenizer[0]
tokenizer_kwargs = tokenizer[1]
else:
tokenizer_identifier = tokenizer
tokenizer_kwargs = model_kwargs
tokenizer = AutoTokenizer.from_pretrained(
tokenizer_identifier, use_fast=use_fast, _from_pipeline=task, **hub_kwargs, **tokenizer_kwargs
)
if load_image_processor:
# Try to infer image processor from model or config name (if provided as str)
if image_processor is None:
if isinstance(model_name, str):
image_processor = model_name
elif isinstance(config, str):
image_processor = config
# Backward compatibility, as `feature_extractor` used to be the name
# for `ImageProcessor`.
elif feature_extractor is not None and isinstance(feature_extractor, BaseImageProcessor):
image_processor = feature_extractor
else:
# Impossible to guess what is the right image_processor here
raise Exception(
"Impossible to guess which image processor to use. "
"Please provide a PreTrainedImageProcessor class or a path/identifier "
"to a pretrained image processor."
)
# Instantiate image_processor if needed
if isinstance(image_processor, (str, tuple)):
image_processor = AutoImageProcessor.from_pretrained(
image_processor, _from_pipeline=task, **hub_kwargs, **model_kwargs
)
if load_feature_extractor:
# Try to infer feature extractor from model or config name (if provided as str)
if feature_extractor is None:
if isinstance(model_name, str):
feature_extractor = model_name
elif isinstance(config, str):
feature_extractor = config
else:
# Impossible to guess what is the right feature_extractor here
raise Exception(
"Impossible to guess which feature extractor to use. "
"Please provide a PreTrainedFeatureExtractor class or a path/identifier "
"to a pretrained feature extractor."
)
# Instantiate feature_extractor if needed
if isinstance(feature_extractor, (str, tuple)):
feature_extractor = AutoFeatureExtractor.from_pretrained(
feature_extractor, _from_pipeline=task, **hub_kwargs, **model_kwargs
)
if (
feature_extractor._processor_class
and feature_extractor._processor_class.endswith("WithLM")
and isinstance(model_name, str)
):
try:
import kenlm # to trigger `ImportError` if not installed
from pyctcdecode import BeamSearchDecoderCTC
if os.path.isdir(model_name) or os.path.isfile(model_name):
decoder = BeamSearchDecoderCTC.load_from_dir(model_name)
else:
language_model_glob = os.path.join(
BeamSearchDecoderCTC._LANGUAGE_MODEL_SERIALIZED_DIRECTORY, "*"
)
alphabet_filename = BeamSearchDecoderCTC._ALPHABET_SERIALIZED_FILENAME
allow_patterns = [language_model_glob, alphabet_filename]
decoder = BeamSearchDecoderCTC.load_from_hf_hub(model_name, allow_patterns=allow_patterns)
kwargs["decoder"] = decoder
except ImportError as e:
logger.warning(f"Could not load the `decoder` for {model_name}. Defaulting to raw CTC. Error: {e}")
if not is_kenlm_available():
logger.warning("Try to install `kenlm`: `pip install kenlm")
if not is_pyctcdecode_available():
logger.warning("Try to install `pyctcdecode`: `pip install pyctcdecode")
if task == "translation" and model.config.task_specific_params:
for key in model.config.task_specific_params:
if key.startswith("translation"):
task = key
warnings.warn(
f'"translation" task was used, instead of "translation_XX_to_YY", defaulting to "{task}"',
UserWarning,
)
break
if tokenizer is not None:
kwargs["tokenizer"] = tokenizer
if feature_extractor is not None:
kwargs["feature_extractor"] = feature_extractor
if torch_dtype is not None:
kwargs["torch_dtype"] = torch_dtype
if image_processor is not None:
kwargs["image_processor"] = image_processor
if device is not None:
kwargs["device"] = device
return pipeline_class(model=model, framework=framework, task=task, **kwargs)
|
27182812/ChatGLM-LLaMA-chinese-insturct | 14,469 | src/transformers/pipelines/text_generation.py | import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class ReturnType(enum.Enum):
TENSORS = 0
NEW_TEXT = 1
FULL_TEXT = 2
@add_end_docstrings(PIPELINE_INIT_ARGS)
class TextGenerationPipeline(Pipeline):
"""
Language generation pipeline using any `ModelWithLMHead`. This pipeline predicts the words that will follow a
specified text prompt.
Example:
```python
>>> from transformers import pipeline
>>> generator = pipeline(model="gpt2")
>>> generator("I can't believe you did such a ", do_sample=False)
[{'generated_text': "I can't believe you did such a icky thing to me. I'm so sorry. I'm so sorry. I'm so sorry. I'm so sorry. I'm so sorry. I'm so sorry. I'm so sorry. I"}]
>>> # These parameters will return suggestions, and only the newly created text making it easier for prompting suggestions.
>>> outputs = generator("My tart needs some", num_return_sequences=4, return_full_text=False)
```
Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
This language generation pipeline can currently be loaded from [`pipeline`] using the following task identifier:
`"text-generation"`.
The models that this pipeline can use are models that have been trained with an autoregressive language modeling
objective, which includes the uni-directional models in the library (e.g. gpt2). See the list of available models
on [huggingface.co/models](https://huggingface.co/models?filter=text-generation).
"""
# Prefix text to help Transformer-XL and XLNet with short prompts as proposed by Aman Rusia
# in https://github.com/rusiaaman/XLNet-gen#methodology
# and https://medium.com/@amanrusia/xlnet-speaks-comparison-to-gpt-2-ea1a4e9ba39e
XL_PREFIX = """
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_CAUSAL_LM_MAPPING
)
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
prefix = None
if self.model.config.prefix is not None:
prefix = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
prefix = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
preprocess_params, forward_params, _ = self._sanitize_parameters(prefix=prefix, **self._forward_params)
self._preprocess_params = {**self._preprocess_params, **preprocess_params}
self._forward_params = {**self._forward_params, **forward_params}
def _sanitize_parameters(
self,
return_full_text=None,
return_tensors=None,
return_text=None,
return_type=None,
clean_up_tokenization_spaces=None,
prefix=None,
handle_long_generation=None,
stop_sequence=None,
**generate_kwargs,
):
preprocess_params = {}
if prefix is not None:
preprocess_params["prefix"] = prefix
if prefix:
prefix_inputs = self.tokenizer(
prefix, padding=False, add_special_tokens=False, return_tensors=self.framework
)
prefix_length = prefix_inputs["input_ids"].shape[-1]
if "max_new_tokens" in generate_kwargs:
pass
elif "max_length" in generate_kwargs:
generate_kwargs["max_length"] += prefix_length
else:
generate_kwargs["max_length"] = self.model.config.max_length + prefix_length
if "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"
" [None, 'hole']"
)
preprocess_params["handle_long_generation"] = handle_long_generation
preprocess_params.update(generate_kwargs)
forward_params = generate_kwargs
postprocess_params = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_full_text`")
if return_tensors is not None:
raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`")
return_type = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_tensors`")
return_type = ReturnType.TENSORS
if return_type is not None:
postprocess_params["return_type"] = return_type
if clean_up_tokenization_spaces is not None:
postprocess_params["clean_up_tokenization_spaces"] = clean_up_tokenization_spaces
if stop_sequence is not None:
stop_sequence_ids = self.tokenizer.encode(stop_sequence, add_special_tokens=False)
if len(stop_sequence_ids) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim."
)
generate_kwargs["eos_token_id"] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
# overriding _parse_and_tokenize to allow for unusual language-modeling tokenizer arguments
def _parse_and_tokenize(self, *args, **kwargs):
"""
Parse arguments and tokenize
"""
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"add_space_before_punct_symbol": True})
return super()._parse_and_tokenize(*args, **kwargs)
def __call__(self, text_inputs, **kwargs):
"""
Complete the prompt(s) given as inputs.
Args:
args (`str` or `List[str]`):
One or several prompts (or one list of prompts) to complete.
return_tensors (`bool`, *optional*, defaults to `False`):
Whether or not to return the tensors of predictions (as token indices) in the outputs. If set to
`True`, the decoded text is not returned.
return_text (`bool`, *optional*, defaults to `True`):
Whether or not to return the decoded texts in the outputs.
return_full_text (`bool`, *optional*, defaults to `True`):
If set to `False` only added text is returned, otherwise the full text is returned. Only meaningful if
*return_text* is set to True.
clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
Whether or not to clean up the potential extra spaces in the text output.
prefix (`str`, *optional*):
Prefix added to prompt.
handle_long_generation (`str`, *optional*):
By default, this pipelines does not handle long generation (ones that exceed in one form or the other
the model maximum length). There is no perfect way to adress this (more info
:https://github.com/huggingface/transformers/issues/14033#issuecomment-948385227). This provides common
strategies to work around that problem depending on your use case.
- `None` : default strategy where nothing in particular happens
- `"hole"`: Truncates left of input, and leaves a gap wide enough to let generation happen (might
truncate a lot of the prompt and not suitable when generation exceed the model capacity)
generate_kwargs:
Additional keyword arguments to pass along to the generate method of the model (see the generate method
corresponding to your framework [here](./model#generative-models)).
Return:
A list or a list of list of `dict`: Returns one of the following dictionaries (cannot return a combination
of both `generated_text` and `generated_token_ids`):
- **generated_text** (`str`, present when `return_text=True`) -- The generated text.
- **generated_token_ids** (`torch.Tensor` or `tf.Tensor`, present when `return_tensors=True`) -- The token
ids of the generated text.
"""
return super().__call__(text_inputs, **kwargs)
def preprocess(self, prompt_text, prefix="", handle_long_generation=None, **generate_kwargs):
inputs = self.tokenizer(
prefix + prompt_text, padding=False, add_special_tokens=False, return_tensors=self.framework
)
inputs["prompt_text"] = prompt_text
if handle_long_generation == "hole":
cur_len = inputs["input_ids"].shape[-1]
if "max_new_tokens" in generate_kwargs:
new_tokens = generate_kwargs["max_new_tokens"]
else:
new_tokens = generate_kwargs.get("max_length", self.model.config.max_length) - cur_len
if new_tokens < 0:
raise ValueError("We cannot infer how many new tokens are expected")
if cur_len + new_tokens > self.tokenizer.model_max_length:
keep_length = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"We cannot use `hole` to handle this generation the number of desired tokens exceeds the"
" models max length"
)
inputs["input_ids"] = inputs["input_ids"][:, -keep_length:]
if "attention_mask" in inputs:
inputs["attention_mask"] = inputs["attention_mask"][:, -keep_length:]
return inputs
def _forward(self, model_inputs, **generate_kwargs):
input_ids = model_inputs["input_ids"]
attention_mask = model_inputs.get("attention_mask", None)
# Allow empty prompts
if input_ids.shape[1] == 0:
input_ids = None
attention_mask = None
in_b = 1
else:
in_b = input_ids.shape[0]
prompt_text = model_inputs.pop("prompt_text")
# BS x SL
generated_sequence = self.model.generate(input_ids=input_ids, attention_mask=attention_mask, **generate_kwargs)
out_b = generated_sequence.shape[0]
if self.framework == "pt":
generated_sequence = generated_sequence.reshape(in_b, out_b // in_b, *generated_sequence.shape[1:])
elif self.framework == "tf":
generated_sequence = tf.reshape(generated_sequence, (in_b, out_b // in_b, *generated_sequence.shape[1:]))
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def postprocess(self, model_outputs, return_type=ReturnType.FULL_TEXT, clean_up_tokenization_spaces=True):
generated_sequence = model_outputs["generated_sequence"][0]
input_ids = model_outputs["input_ids"]
prompt_text = model_outputs["prompt_text"]
generated_sequence = generated_sequence.numpy().tolist()
records = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
record = {"generated_token_ids": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
text = self.tokenizer.decode(
sequence,
skip_special_tokens=True,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
)
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
prompt_length = 0
else:
prompt_length = len(
self.tokenizer.decode(
input_ids[0],
skip_special_tokens=True,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
)
)
if return_type == ReturnType.FULL_TEXT:
all_text = prompt_text + text[prompt_length:]
else:
all_text = text[prompt_length:]
record = {"generated_text": all_text}
records.append(record)
return records
|
27182812/ChatGLM-LLaMA-chinese-insturct | 8,737 | src/transformers/pipelines/image_segmentation.py | from typing import Any, Dict, List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import (
MODEL_FOR_IMAGE_SEGMENTATION_MAPPING,
MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING,
MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING,
MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING,
)
logger = logging.get_logger(__name__)
Prediction = Dict[str, Any]
Predictions = List[Prediction]
@add_end_docstrings(PIPELINE_INIT_ARGS)
class ImageSegmentationPipeline(Pipeline):
"""
Image segmentation pipeline using any `AutoModelForXXXSegmentation`. This pipeline predicts masks of objects and
their classes.
Example:
```python
>>> from transformers import pipeline
>>> segmenter = pipeline(model="facebook/detr-resnet-50-panoptic")
>>> segments = segmenter("https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png")
>>> len(segments)
2
>>> segments[0]["label"]
'bird'
>>> segments[1]["label"]
'bird'
>>> type(segments[0]["mask"]) # This is a black and white mask showing where is the bird on the original image.
<class 'PIL.Image.Image'>
>>> segments[0]["mask"].size
(768, 512)
```
This image segmentation pipeline can currently be loaded from [`pipeline`] using the following task identifier:
`"image-segmentation"`.
See the list of available models on
[huggingface.co/models](https://huggingface.co/models?filter=image-segmentation).
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.framework == "tf":
raise ValueError(f"The {self.__class__} is only available in PyTorch.")
requires_backends(self, "vision")
self.check_model_type(
dict(
MODEL_FOR_IMAGE_SEGMENTATION_MAPPING.items()
+ MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING.items()
+ MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING.items()
+ MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING.items()
)
)
def _sanitize_parameters(self, **kwargs):
preprocessor_kwargs = {}
postprocess_kwargs = {}
if "subtask" in kwargs:
postprocess_kwargs["subtask"] = kwargs["subtask"]
preprocessor_kwargs["subtask"] = kwargs["subtask"]
if "threshold" in kwargs:
postprocess_kwargs["threshold"] = kwargs["threshold"]
if "mask_threshold" in kwargs:
postprocess_kwargs["mask_threshold"] = kwargs["mask_threshold"]
if "overlap_mask_area_threshold" in kwargs:
postprocess_kwargs["overlap_mask_area_threshold"] = kwargs["overlap_mask_area_threshold"]
return preprocessor_kwargs, {}, postprocess_kwargs
def __call__(self, images, **kwargs) -> Union[Predictions, List[Prediction]]:
"""
Perform segmentation (detect masks & classes) in the image(s) passed as inputs.
Args:
images (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`):
The pipeline handles three types of images:
- A string containing an HTTP(S) link pointing to an image
- A string containing a local path to an image
- An image loaded in PIL directly
The pipeline accepts either a single image or a batch of images. Images in a batch must all be in the
same format: all as HTTP(S) links, all as local paths, or all as PIL images.
subtask (`str`, *optional*):
Segmentation task to be performed, choose [`semantic`, `instance` and `panoptic`] depending on model
capabilities. If not set, the pipeline will attempt tp resolve in the following order:
`panoptic`, `instance`, `semantic`.
threshold (`float`, *optional*, defaults to 0.9):
Probability threshold to filter out predicted masks.
mask_threshold (`float`, *optional*, defaults to 0.5):
Threshold to use when turning the predicted masks into binary values.
overlap_mask_area_threshold (`float`, *optional*, defaults to 0.5):
Mask overlap threshold to eliminate small, disconnected segments.
Return:
A dictionary or a list of dictionaries containing the result. If the input is a single image, will return a
list of dictionaries, if the input is a list of several images, will return a list of list of dictionaries
corresponding to each image.
The dictionaries contain the mask, label and score (where applicable) of each detected object and contains
the following keys:
- **label** (`str`) -- The class label identified by the model.
- **mask** (`PIL.Image`) -- A binary mask of the detected object as a Pil Image of shape (width, height) of
the original image. Returns a mask filled with zeros if no object is found.
- **score** (*optional* `float`) -- Optionally, when the model is capable of estimating a confidence of the
"object" described by the label and the mask.
"""
return super().__call__(images, **kwargs)
def preprocess(self, image, subtask=None):
image = load_image(image)
target_size = [(image.height, image.width)]
if self.model.config.__class__.__name__ == "OneFormerConfig":
if subtask is None:
kwargs = {}
else:
kwargs = {"task_inputs": [subtask]}
inputs = self.image_processor(images=[image], return_tensors="pt", **kwargs)
inputs["task_inputs"] = self.tokenizer(
inputs["task_inputs"],
padding="max_length",
max_length=self.model.config.task_seq_len,
return_tensors=self.framework,
)["input_ids"]
else:
inputs = self.image_processor(images=[image], return_tensors="pt")
inputs["target_size"] = target_size
return inputs
def _forward(self, model_inputs):
target_size = model_inputs.pop("target_size")
model_outputs = self.model(**model_inputs)
model_outputs["target_size"] = target_size
return model_outputs
def postprocess(
self, model_outputs, subtask=None, threshold=0.9, mask_threshold=0.5, overlap_mask_area_threshold=0.5
):
fn = None
if subtask in {"panoptic", None} and hasattr(self.image_processor, "post_process_panoptic_segmentation"):
fn = self.image_processor.post_process_panoptic_segmentation
elif subtask in {"instance", None} and hasattr(self.image_processor, "post_process_instance_segmentation"):
fn = self.image_processor.post_process_instance_segmentation
if fn is not None:
outputs = fn(
model_outputs,
threshold=threshold,
mask_threshold=mask_threshold,
overlap_mask_area_threshold=overlap_mask_area_threshold,
target_sizes=model_outputs["target_size"],
)[0]
annotation = []
segmentation = outputs["segmentation"]
for segment in outputs["segments_info"]:
mask = (segmentation == segment["id"]) * 255
mask = Image.fromarray(mask.numpy().astype(np.uint8), mode="L")
label = self.model.config.id2label[segment["label_id"]]
score = segment["score"]
annotation.append({"score": score, "label": label, "mask": mask})
elif subtask in {"semantic", None} and hasattr(self.image_processor, "post_process_semantic_segmentation"):
outputs = self.image_processor.post_process_semantic_segmentation(
model_outputs, target_sizes=model_outputs["target_size"]
)[0]
annotation = []
segmentation = outputs.numpy()
labels = np.unique(segmentation)
for label in labels:
mask = (segmentation == label) * 255
mask = Image.fromarray(mask.astype(np.uint8), mode="L")
label = self.model.config.id2label[label]
annotation.append({"score": None, "label": label, "mask": mask})
else:
raise ValueError(f"Subtask {subtask} is not supported for model {type(self.model)}")
return annotation
|
27182812/ChatGLM-LLaMA-chinese-insturct | 29,474 | src/transformers/pipelines/question_answering.py | import types
import warnings
from collections.abc import Iterable
from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union
import numpy as np
from ..data import SquadExample, SquadFeatures, squad_convert_examples_to_features
from ..modelcard import ModelCard
from ..tokenization_utils import PreTrainedTokenizer
from ..utils import (
PaddingStrategy,
add_end_docstrings,
is_tf_available,
is_tokenizers_available,
is_torch_available,
logging,
)
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
logger = logging.get_logger(__name__)
if TYPE_CHECKING:
from ..modeling_tf_utils import TFPreTrainedModel
from ..modeling_utils import PreTrainedModel
if is_tokenizers_available():
import tokenizers
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING
Dataset = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
from ..models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
def decode_spans(
start: np.ndarray, end: np.ndarray, topk: int, max_answer_len: int, undesired_tokens: np.ndarray
) -> Tuple:
"""
Take the output of any `ModelForQuestionAnswering` and will generate probabilities for each span to be the actual
answer.
In addition, it filters out some unwanted/impossible cases like answer len being greater than max_answer_len or
answer end position being before the starting position. The method supports output the k-best answer through the
topk argument.
Args:
start (`np.ndarray`): Individual start probabilities for each token.
end (`np.ndarray`): Individual end probabilities for each token.
topk (`int`): Indicates how many possible answer span(s) to extract from the model output.
max_answer_len (`int`): Maximum size of the answer to extract from the model's output.
undesired_tokens (`np.ndarray`): Mask determining tokens that can be part of the answer
"""
# Ensure we have batch axis
if start.ndim == 1:
start = start[None]
if end.ndim == 1:
end = end[None]
# Compute the score of each tuple(start, end) to be the real answer
outer = np.matmul(np.expand_dims(start, -1), np.expand_dims(end, 1))
# Remove candidate with end < start and end - start > max_answer_len
candidates = np.tril(np.triu(outer), max_answer_len - 1)
# Inspired by Chen & al. (https://github.com/facebookresearch/DrQA)
scores_flat = candidates.flatten()
if topk == 1:
idx_sort = [np.argmax(scores_flat)]
elif len(scores_flat) < topk:
idx_sort = np.argsort(-scores_flat)
else:
idx = np.argpartition(-scores_flat, topk)[0:topk]
idx_sort = idx[np.argsort(-scores_flat[idx])]
starts, ends = np.unravel_index(idx_sort, candidates.shape)[1:]
desired_spans = np.isin(starts, undesired_tokens.nonzero()) & np.isin(ends, undesired_tokens.nonzero())
starts = starts[desired_spans]
ends = ends[desired_spans]
scores = candidates[0, starts, ends]
return starts, ends, scores
def select_starts_ends(
start,
end,
p_mask,
attention_mask,
min_null_score=1000000,
top_k=1,
handle_impossible_answer=False,
max_answer_len=15,
):
"""
Takes the raw output of any `ModelForQuestionAnswering` and first normalizes its outputs and then uses
`decode_spans()` to generate probabilities for each span to be the actual answer.
Args:
start (`np.ndarray`): Individual start logits for each token.
end (`np.ndarray`): Individual end logits for each token.
p_mask (`np.ndarray`): A mask with 1 for values that cannot be in the answer
attention_mask (`np.ndarray`): The attention mask generated by the tokenizer
min_null_score(`float`): The minimum null (empty) answer score seen so far.
topk (`int`): Indicates how many possible answer span(s) to extract from the model output.
handle_impossible_answer(`bool`): Whether to allow null (empty) answers
max_answer_len (`int`): Maximum size of the answer to extract from the model's output.
"""
# Ensure padded tokens & question tokens cannot belong to the set of candidate answers.
undesired_tokens = np.abs(np.array(p_mask) - 1)
if attention_mask is not None:
undesired_tokens = undesired_tokens & attention_mask
# Generate mask
undesired_tokens_mask = undesired_tokens == 0.0
# Make sure non-context indexes in the tensor cannot contribute to the softmax
start = np.where(undesired_tokens_mask, -10000.0, start)
end = np.where(undesired_tokens_mask, -10000.0, end)
# Normalize logits and spans to retrieve the answer
start = np.exp(start - start.max(axis=-1, keepdims=True))
start = start / start.sum()
end = np.exp(end - end.max(axis=-1, keepdims=True))
end = end / end.sum()
if handle_impossible_answer:
min_null_score = min(min_null_score, (start[0, 0] * end[0, 0]).item())
# Mask CLS
start[0, 0] = end[0, 0] = 0.0
starts, ends, scores = decode_spans(start, end, top_k, max_answer_len, undesired_tokens)
return starts, ends, scores, min_null_score
class QuestionAnsweringArgumentHandler(ArgumentHandler):
"""
QuestionAnsweringPipeline requires the user to provide multiple arguments (i.e. question & context) to be mapped to
internal [`SquadExample`].
QuestionAnsweringArgumentHandler manages all the possible to create a [`SquadExample`] from the command-line
supplied arguments.
"""
def normalize(self, item):
if isinstance(item, SquadExample):
return item
elif isinstance(item, dict):
for k in ["question", "context"]:
if k not in item:
raise KeyError("You need to provide a dictionary with keys {question:..., context:...}")
elif item[k] is None:
raise ValueError(f"`{k}` cannot be None")
elif isinstance(item[k], str) and len(item[k]) == 0:
raise ValueError(f"`{k}` cannot be empty")
return QuestionAnsweringPipeline.create_sample(**item)
raise ValueError(f"{item} argument needs to be of type (SquadExample, dict)")
def __call__(self, *args, **kwargs):
# Detect where the actual inputs are
if args is not None and len(args) > 0:
if len(args) == 1:
inputs = args[0]
elif len(args) == 2 and {type(el) for el in args} == {str}:
inputs = [{"question": args[0], "context": args[1]}]
else:
inputs = list(args)
# Generic compatibility with sklearn and Keras
# Batched data
elif "X" in kwargs:
inputs = kwargs["X"]
elif "data" in kwargs:
inputs = kwargs["data"]
elif "question" in kwargs and "context" in kwargs:
if isinstance(kwargs["question"], list) and isinstance(kwargs["context"], str):
inputs = [{"question": Q, "context": kwargs["context"]} for Q in kwargs["question"]]
elif isinstance(kwargs["question"], list) and isinstance(kwargs["context"], list):
if len(kwargs["question"]) != len(kwargs["context"]):
raise ValueError("Questions and contexts don't have the same lengths")
inputs = [{"question": Q, "context": C} for Q, C in zip(kwargs["question"], kwargs["context"])]
elif isinstance(kwargs["question"], str) and isinstance(kwargs["context"], str):
inputs = [{"question": kwargs["question"], "context": kwargs["context"]}]
else:
raise ValueError("Arguments can't be understood")
else:
raise ValueError(f"Unknown arguments {kwargs}")
# When user is sending a generator we need to trust it's a valid example
generator_types = (types.GeneratorType, Dataset) if Dataset is not None else (types.GeneratorType,)
if isinstance(inputs, generator_types):
return inputs
# Normalize inputs
if isinstance(inputs, dict):
inputs = [inputs]
elif isinstance(inputs, Iterable):
# Copy to avoid overriding arguments
inputs = list(inputs)
else:
raise ValueError(f"Invalid arguments {kwargs}")
for i, item in enumerate(inputs):
inputs[i] = self.normalize(item)
return inputs
@add_end_docstrings(PIPELINE_INIT_ARGS)
class QuestionAnsweringPipeline(ChunkPipeline):
"""
Question Answering pipeline using any `ModelForQuestionAnswering`. See the [question answering
examples](../task_summary#question-answering) for more information.
Example:
```python
>>> from transformers import pipeline
>>> oracle = pipeline(model="deepset/roberta-base-squad2")
>>> oracle(question="Where do I live?", context="My name is Wolfgang and I live in Berlin")
{'score': 0.9191, 'start': 34, 'end': 40, 'answer': 'Berlin'}
```
Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
This question answering pipeline can currently be loaded from [`pipeline`] using the following task identifier:
`"question-answering"`.
The models that this pipeline can use are models that have been fine-tuned on a question answering task. See the
up-to-date list of available models on
[huggingface.co/models](https://huggingface.co/models?filter=question-answering).
"""
default_input_names = "question,context"
handle_impossible_answer = False
def __init__(
self,
model: Union["PreTrainedModel", "TFPreTrainedModel"],
tokenizer: PreTrainedTokenizer,
modelcard: Optional[ModelCard] = None,
framework: Optional[str] = None,
task: str = "",
**kwargs,
):
super().__init__(
model=model,
tokenizer=tokenizer,
modelcard=modelcard,
framework=framework,
task=task,
**kwargs,
)
self._args_parser = QuestionAnsweringArgumentHandler()
self.check_model_type(
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING if self.framework == "tf" else MODEL_FOR_QUESTION_ANSWERING_MAPPING
)
@staticmethod
def create_sample(
question: Union[str, List[str]], context: Union[str, List[str]]
) -> Union[SquadExample, List[SquadExample]]:
"""
QuestionAnsweringPipeline leverages the [`SquadExample`] internally. This helper method encapsulate all the
logic for converting question(s) and context(s) to [`SquadExample`].
We currently support extractive question answering.
Arguments:
question (`str` or `List[str]`): The question(s) asked.
context (`str` or `List[str]`): The context(s) in which we will look for the answer.
Returns:
One or a list of [`SquadExample`]: The corresponding [`SquadExample`] grouping question and context.
"""
if isinstance(question, list):
return [SquadExample(None, q, c, None, None, None) for q, c in zip(question, context)]
else:
return SquadExample(None, question, context, None, None, None)
def _sanitize_parameters(
self,
padding=None,
topk=None,
top_k=None,
doc_stride=None,
max_answer_len=None,
max_seq_len=None,
max_question_len=None,
handle_impossible_answer=None,
align_to_words=None,
**kwargs,
):
# Set defaults values
preprocess_params = {}
if padding is not None:
preprocess_params["padding"] = padding
if doc_stride is not None:
preprocess_params["doc_stride"] = doc_stride
if max_question_len is not None:
preprocess_params["max_question_len"] = max_question_len
if max_seq_len is not None:
preprocess_params["max_seq_len"] = max_seq_len
postprocess_params = {}
if topk is not None and top_k is None:
warnings.warn("topk parameter is deprecated, use top_k instead", UserWarning)
top_k = topk
if top_k is not None:
if top_k < 1:
raise ValueError(f"top_k parameter should be >= 1 (got {top_k})")
postprocess_params["top_k"] = top_k
if max_answer_len is not None:
if max_answer_len < 1:
raise ValueError(f"max_answer_len parameter should be >= 1 (got {max_answer_len}")
if max_answer_len is not None:
postprocess_params["max_answer_len"] = max_answer_len
if handle_impossible_answer is not None:
postprocess_params["handle_impossible_answer"] = handle_impossible_answer
if align_to_words is not None:
postprocess_params["align_to_words"] = align_to_words
return preprocess_params, {}, postprocess_params
def __call__(self, *args, **kwargs):
"""
Answer the question(s) given as inputs by using the context(s).
Args:
args ([`SquadExample`] or a list of [`SquadExample`]):
One or several [`SquadExample`] containing the question and context.
X ([`SquadExample`] or a list of [`SquadExample`], *optional*):
One or several [`SquadExample`] containing the question and context (will be treated the same way as if
passed as the first positional argument).
data ([`SquadExample`] or a list of [`SquadExample`], *optional*):
One or several [`SquadExample`] containing the question and context (will be treated the same way as if
passed as the first positional argument).
question (`str` or `List[str]`):
One or several question(s) (must be used in conjunction with the `context` argument).
context (`str` or `List[str]`):
One or several context(s) associated with the question(s) (must be used in conjunction with the
`question` argument).
topk (`int`, *optional*, defaults to 1):
The number of answers to return (will be chosen by order of likelihood). Note that we return less than
topk answers if there are not enough options available within the context.
doc_stride (`int`, *optional*, defaults to 128):
If the context is too long to fit with the question for the model, it will be split in several chunks
with some overlap. This argument controls the size of that overlap.
max_answer_len (`int`, *optional*, defaults to 15):
The maximum length of predicted answers (e.g., only answers with a shorter length are considered).
max_seq_len (`int`, *optional*, defaults to 384):
The maximum length of the total sentence (context + question) in tokens of each chunk passed to the
model. The context will be split in several chunks (using `doc_stride` as overlap) if needed.
max_question_len (`int`, *optional*, defaults to 64):
The maximum length of the question after tokenization. It will be truncated if needed.
handle_impossible_answer (`bool`, *optional*, defaults to `False`):
Whether or not we accept impossible as an answer.
align_to_words (`bool`, *optional*, defaults to `True`):
Attempts to align the answer to real words. Improves quality on space separated langages. Might hurt on
non-space-separated languages (like Japanese or Chinese)
Return:
A `dict` or a list of `dict`: Each result comes as a dictionary with the following keys:
- **score** (`float`) -- The probability associated to the answer.
- **start** (`int`) -- The character start index of the answer (in the tokenized version of the input).
- **end** (`int`) -- The character end index of the answer (in the tokenized version of the input).
- **answer** (`str`) -- The answer to the question.
"""
# Convert inputs to features
examples = self._args_parser(*args, **kwargs)
if isinstance(examples, (list, tuple)) and len(examples) == 1:
return super().__call__(examples[0], **kwargs)
return super().__call__(examples, **kwargs)
def preprocess(self, example, padding="do_not_pad", doc_stride=None, max_question_len=64, max_seq_len=None):
# XXX: This is specal, args_parser will not handle anything generator or dataset like
# For those we expect user to send a simple valid example either directly as a SquadExample or simple dict.
# So we still need a little sanitation here.
if isinstance(example, dict):
example = SquadExample(None, example["question"], example["context"], None, None, None)
if max_seq_len is None:
max_seq_len = min(self.tokenizer.model_max_length, 384)
if doc_stride is None:
doc_stride = min(max_seq_len // 2, 128)
if doc_stride > max_seq_len:
raise ValueError(f"`doc_stride` ({doc_stride}) is larger than `max_seq_len` ({max_seq_len})")
if not self.tokenizer.is_fast:
features = squad_convert_examples_to_features(
examples=[example],
tokenizer=self.tokenizer,
max_seq_length=max_seq_len,
doc_stride=doc_stride,
max_query_length=max_question_len,
padding_strategy=PaddingStrategy.MAX_LENGTH,
is_training=False,
tqdm_enabled=False,
)
else:
# Define the side we want to truncate / pad and the text/pair sorting
question_first = self.tokenizer.padding_side == "right"
encoded_inputs = self.tokenizer(
text=example.question_text if question_first else example.context_text,
text_pair=example.context_text if question_first else example.question_text,
padding=padding,
truncation="only_second" if question_first else "only_first",
max_length=max_seq_len,
stride=doc_stride,
return_token_type_ids=True,
return_overflowing_tokens=True,
return_offsets_mapping=True,
return_special_tokens_mask=True,
)
# When the input is too long, it's converted in a batch of inputs with overflowing tokens
# and a stride of overlap between the inputs. If a batch of inputs is given, a special output
# "overflow_to_sample_mapping" indicate which member of the encoded batch belong to which original batch sample.
# Here we tokenize examples one-by-one so we don't need to use "overflow_to_sample_mapping".
# "num_span" is the number of output samples generated from the overflowing tokens.
num_spans = len(encoded_inputs["input_ids"])
# p_mask: mask with 1 for token than cannot be in the answer (0 for token which can be in an answer)
# We put 0 on the tokens from the context and 1 everywhere else (question and special tokens)
p_mask = [
[tok != 1 if question_first else 0 for tok in encoded_inputs.sequence_ids(span_id)]
for span_id in range(num_spans)
]
features = []
for span_idx in range(num_spans):
input_ids_span_idx = encoded_inputs["input_ids"][span_idx]
attention_mask_span_idx = (
encoded_inputs["attention_mask"][span_idx] if "attention_mask" in encoded_inputs else None
)
token_type_ids_span_idx = (
encoded_inputs["token_type_ids"][span_idx] if "token_type_ids" in encoded_inputs else None
)
# keep the cls_token unmasked (some models use it to indicate unanswerable questions)
if self.tokenizer.cls_token_id is not None:
cls_indices = np.nonzero(np.array(input_ids_span_idx) == self.tokenizer.cls_token_id)[0]
for cls_index in cls_indices:
p_mask[span_idx][cls_index] = 0
submask = p_mask[span_idx]
features.append(
SquadFeatures(
input_ids=input_ids_span_idx,
attention_mask=attention_mask_span_idx,
token_type_ids=token_type_ids_span_idx,
p_mask=submask,
encoding=encoded_inputs[span_idx],
# We don't use the rest of the values - and actually
# for Fast tokenizer we could totally avoid using SquadFeatures and SquadExample
cls_index=None,
token_to_orig_map={},
example_index=0,
unique_id=0,
paragraph_len=0,
token_is_max_context=0,
tokens=[],
start_position=0,
end_position=0,
is_impossible=False,
qas_id=None,
)
)
for i, feature in enumerate(features):
fw_args = {}
others = {}
model_input_names = self.tokenizer.model_input_names + ["p_mask", "token_type_ids"]
for k, v in feature.__dict__.items():
if k in model_input_names:
if self.framework == "tf":
tensor = tf.constant(v)
if tensor.dtype == tf.int64:
tensor = tf.cast(tensor, tf.int32)
fw_args[k] = tf.expand_dims(tensor, 0)
elif self.framework == "pt":
tensor = torch.tensor(v)
if tensor.dtype == torch.int32:
tensor = tensor.long()
fw_args[k] = tensor.unsqueeze(0)
else:
others[k] = v
is_last = i == len(features) - 1
yield {"example": example, "is_last": is_last, **fw_args, **others}
def _forward(self, inputs):
example = inputs["example"]
model_inputs = {k: inputs[k] for k in self.tokenizer.model_input_names}
output = self.model(**model_inputs)
if isinstance(output, dict):
return {"start": output["start_logits"], "end": output["end_logits"], "example": example, **inputs}
else:
start, end = output[:2]
return {"start": start, "end": end, "example": example, **inputs}
def postprocess(
self,
model_outputs,
top_k=1,
handle_impossible_answer=False,
max_answer_len=15,
align_to_words=True,
):
min_null_score = 1000000 # large and positive
answers = []
for output in model_outputs:
start_ = output["start"]
end_ = output["end"]
example = output["example"]
p_mask = output["p_mask"]
attention_mask = (
output["attention_mask"].numpy() if output.get("attention_mask", None) is not None else None
)
starts, ends, scores, min_null_score = select_starts_ends(
start_, end_, p_mask, attention_mask, min_null_score, top_k, handle_impossible_answer, max_answer_len
)
if not self.tokenizer.is_fast:
char_to_word = np.array(example.char_to_word_offset)
# Convert the answer (tokens) back to the original text
# Score: score from the model
# Start: Index of the first character of the answer in the context string
# End: Index of the character following the last character of the answer in the context string
# Answer: Plain text of the answer
for s, e, score in zip(starts, ends, scores):
token_to_orig_map = output["token_to_orig_map"]
answers.append(
{
"score": score.item(),
"start": np.where(char_to_word == token_to_orig_map[s])[0][0].item(),
"end": np.where(char_to_word == token_to_orig_map[e])[0][-1].item(),
"answer": " ".join(example.doc_tokens[token_to_orig_map[s] : token_to_orig_map[e] + 1]),
}
)
else:
# Convert the answer (tokens) back to the original text
# Score: score from the model
# Start: Index of the first character of the answer in the context string
# End: Index of the character following the last character of the answer in the context string
# Answer: Plain text of the answer
question_first = bool(self.tokenizer.padding_side == "right")
enc = output["encoding"]
# Encoding was *not* padded, input_ids *might*.
# It doesn't make a difference unless we're padding on
# the left hand side, since now we have different offsets
# everywhere.
if self.tokenizer.padding_side == "left":
offset = (output["input_ids"] == self.tokenizer.pad_token_id).numpy().sum()
else:
offset = 0
# Sometimes the max probability token is in the middle of a word so:
# - we start by finding the right word containing the token with `token_to_word`
# - then we convert this word in a character span with `word_to_chars`
sequence_index = 1 if question_first else 0
for s, e, score in zip(starts, ends, scores):
s = s - offset
e = e - offset
start_index, end_index = self.get_indices(enc, s, e, sequence_index, align_to_words)
answers.append(
{
"score": score.item(),
"start": start_index,
"end": end_index,
"answer": example.context_text[start_index:end_index],
}
)
if handle_impossible_answer:
answers.append({"score": min_null_score, "start": 0, "end": 0, "answer": ""})
answers = sorted(answers, key=lambda x: x["score"], reverse=True)[:top_k]
if len(answers) == 1:
return answers[0]
return answers
def get_indices(
self, enc: "tokenizers.Encoding", s: int, e: int, sequence_index: int, align_to_words: bool
) -> Tuple[int, int]:
if align_to_words:
try:
start_word = enc.token_to_word(s)
end_word = enc.token_to_word(e)
start_index = enc.word_to_chars(start_word, sequence_index=sequence_index)[0]
end_index = enc.word_to_chars(end_word, sequence_index=sequence_index)[1]
except Exception:
# Some tokenizers don't really handle words. Keep to offsets then.
start_index = enc.offsets[s][0]
end_index = enc.offsets[e][1]
else:
start_index = enc.offsets[s][0]
end_index = enc.offsets[e][1]
return start_index, end_index
def span_to_answer(self, text: str, start: int, end: int) -> Dict[str, Union[str, int]]:
"""
When decoding from token probabilities, this method maps token indexes to actual word in the initial context.
Args:
text (`str`): The actual context to extract the answer from.
start (`int`): The answer starting token index.
end (`int`): The answer end token index.
Returns:
Dictionary like `{'answer': str, 'start': int, 'end': int}`
"""
words = []
token_idx = char_start_idx = char_end_idx = chars_idx = 0
for i, word in enumerate(text.split(" ")):
token = self.tokenizer.tokenize(word)
# Append words if they are in the span
if start <= token_idx <= end:
if token_idx == start:
char_start_idx = chars_idx
if token_idx == end:
char_end_idx = chars_idx + len(word)
words += [word]
# Stop if we went over the end of the answer
if token_idx > end:
break
# Append the subtokenization length to the running index
token_idx += len(token)
chars_idx += len(word) + 1
# Join text with spaces
return {
"answer": " ".join(words),
"start": max(0, char_start_idx),
"end": min(len(text), char_end_idx),
}
|
27182812/ChatGLM-LLaMA-chinese-insturct | 16,760 | src/transformers/pipelines/text2text_generation.py | import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
logger = logging.get_logger(__name__)
class ReturnType(enum.Enum):
TENSORS = 0
TEXT = 1
@add_end_docstrings(PIPELINE_INIT_ARGS)
class Text2TextGenerationPipeline(Pipeline):
"""
Pipeline for text to text generation using seq2seq models.
Example:
```python
>>> from transformers import pipeline
>>> generator = pipeline(model="mrm8488/t5-base-finetuned-question-generation-ap")
>>> generator(
... "answer: Manuel context: Manuel has created RuPERTa-base with the support of HF-Transformers and Google"
... )
[{'generated_text': 'question: Who created the RuPERTa-base?'}]
```
Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
This Text2TextGenerationPipeline pipeline can currently be loaded from [`pipeline`] using the following task
identifier: `"text2text-generation"`.
The models that this pipeline can use are models that have been fine-tuned on a translation task. See the
up-to-date list of available models on
[huggingface.co/models](https://huggingface.co/models?filter=text2text-generation). For a list of available
parameters, see the [following
documentation](https://huggingface.co/docs/transformers/en/main_classes/text_generation#transformers.generation.GenerationMixin.generate)
Usage:
```python
text2text_generator = pipeline("text2text-generation")
text2text_generator("question: What is 42 ? context: 42 is the answer to life, the universe and everything")
```"""
# Used in the return key of the pipeline.
return_name = "generated"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
)
def _sanitize_parameters(
self,
return_tensors=None,
return_text=None,
return_type=None,
clean_up_tokenization_spaces=None,
truncation=None,
stop_sequence=None,
**generate_kwargs,
):
preprocess_params = {}
if truncation is not None:
preprocess_params["truncation"] = truncation
forward_params = generate_kwargs
postprocess_params = {}
if return_tensors is not None and return_type is None:
return_type = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
postprocess_params["return_type"] = return_type
if clean_up_tokenization_spaces is not None:
postprocess_params["clean_up_tokenization_spaces"] = clean_up_tokenization_spaces
if stop_sequence is not None:
stop_sequence_ids = self.tokenizer.encode(stop_sequence, add_special_tokens=False)
if len(stop_sequence_ids) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim."
)
generate_kwargs["eos_token_id"] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def check_inputs(self, input_length: int, min_length: int, max_length: int):
"""
Checks whether there might be something wrong with given input with regard to the model.
"""
return True
def _parse_and_tokenize(self, *args, truncation):
prefix = self.model.config.prefix if self.model.config.prefix is not None else ""
if isinstance(args[0], list):
if self.tokenizer.pad_token_id is None:
raise ValueError("Please make sure that the tokenizer has a pad_token_id when using a batch input")
args = ([prefix + arg for arg in args[0]],)
padding = True
elif isinstance(args[0], str):
args = (prefix + args[0],)
padding = False
else:
raise ValueError(
f" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`"
)
inputs = self.tokenizer(*args, padding=padding, truncation=truncation, return_tensors=self.framework)
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__(self, *args, **kwargs):
r"""
Generate the output text(s) using text(s) given as inputs.
Args:
args (`str` or `List[str]`):
Input text for the encoder.
return_tensors (`bool`, *optional*, defaults to `False`):
Whether or not to include the tensors of predictions (as token indices) in the outputs.
return_text (`bool`, *optional*, defaults to `True`):
Whether or not to include the decoded texts in the outputs.
clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
Whether or not to clean up the potential extra spaces in the text output.
truncation (`TruncationStrategy`, *optional*, defaults to `TruncationStrategy.DO_NOT_TRUNCATE`):
The truncation strategy for the tokenization within the pipeline. `TruncationStrategy.DO_NOT_TRUNCATE`
(default) will never truncate, but it is sometimes desirable to truncate the input to fit the model's
max_length instead of throwing an error down the line.
generate_kwargs:
Additional keyword arguments to pass along to the generate method of the model (see the generate method
corresponding to your framework [here](./model#generative-models)).
Return:
A list or a list of list of `dict`: Each result comes as a dictionary with the following keys:
- **generated_text** (`str`, present when `return_text=True`) -- The generated text.
- **generated_token_ids** (`torch.Tensor` or `tf.Tensor`, present when `return_tensors=True`) -- The token
ids of the generated text.
"""
result = super().__call__(*args, **kwargs)
if (
isinstance(args[0], list)
and all(isinstance(el, str) for el in args[0])
and all(len(res) == 1 for res in result)
):
return [res[0] for res in result]
return result
def preprocess(self, inputs, truncation=TruncationStrategy.DO_NOT_TRUNCATE, **kwargs):
inputs = self._parse_and_tokenize(inputs, truncation=truncation, **kwargs)
return inputs
def _forward(self, model_inputs, **generate_kwargs):
if self.framework == "pt":
in_b, input_length = model_inputs["input_ids"].shape
elif self.framework == "tf":
in_b, input_length = tf.shape(model_inputs["input_ids"]).numpy()
generate_kwargs["min_length"] = generate_kwargs.get("min_length", self.model.config.min_length)
generate_kwargs["max_length"] = generate_kwargs.get("max_length", self.model.config.max_length)
self.check_inputs(input_length, generate_kwargs["min_length"], generate_kwargs["max_length"])
output_ids = self.model.generate(**model_inputs, **generate_kwargs)
out_b = output_ids.shape[0]
if self.framework == "pt":
output_ids = output_ids.reshape(in_b, out_b // in_b, *output_ids.shape[1:])
elif self.framework == "tf":
output_ids = tf.reshape(output_ids, (in_b, out_b // in_b, *output_ids.shape[1:]))
return {"output_ids": output_ids}
def postprocess(self, model_outputs, return_type=ReturnType.TEXT, clean_up_tokenization_spaces=False):
records = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
record = {f"{self.return_name}_token_ids": output_ids}
elif return_type == ReturnType.TEXT:
record = {
f"{self.return_name}_text": self.tokenizer.decode(
output_ids,
skip_special_tokens=True,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
)
}
records.append(record)
return records
@add_end_docstrings(PIPELINE_INIT_ARGS)
class SummarizationPipeline(Text2TextGenerationPipeline):
"""
Summarize news articles and other documents.
This summarizing pipeline can currently be loaded from [`pipeline`] using the following task identifier:
`"summarization"`.
The models that this pipeline can use are models that have been fine-tuned on a summarization task, which is
currently, '*bart-large-cnn*', '*t5-small*', '*t5-base*', '*t5-large*', '*t5-3b*', '*t5-11b*'. See the up-to-date
list of available models on [huggingface.co/models](https://huggingface.co/models?filter=summarization). For a list
of available parameters, see the [following
documentation](https://huggingface.co/docs/transformers/en/main_classes/text_generation#transformers.generation.GenerationMixin.generate)
Usage:
```python
# use bart in pytorch
summarizer = pipeline("summarization")
summarizer("An apple a day, keeps the doctor away", min_length=5, max_length=20)
# use t5 in tf
summarizer = pipeline("summarization", model="t5-base", tokenizer="t5-base", framework="tf")
summarizer("An apple a day, keeps the doctor away", min_length=5, max_length=20)
```"""
# Used in the return key of the pipeline.
return_name = "summary"
def __call__(self, *args, **kwargs):
r"""
Summarize the text(s) given as inputs.
Args:
documents (*str* or `List[str]`):
One or several articles (or one list of articles) to summarize.
return_text (`bool`, *optional*, defaults to `True`):
Whether or not to include the decoded texts in the outputs
return_tensors (`bool`, *optional*, defaults to `False`):
Whether or not to include the tensors of predictions (as token indices) in the outputs.
clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
Whether or not to clean up the potential extra spaces in the text output.
generate_kwargs:
Additional keyword arguments to pass along to the generate method of the model (see the generate method
corresponding to your framework [here](./model#generative-models)).
Return:
A list or a list of list of `dict`: Each result comes as a dictionary with the following keys:
- **summary_text** (`str`, present when `return_text=True`) -- The summary of the corresponding input.
- **summary_token_ids** (`torch.Tensor` or `tf.Tensor`, present when `return_tensors=True`) -- The token
ids of the summary.
"""
return super().__call__(*args, **kwargs)
def check_inputs(self, input_length: int, min_length: int, max_length: int) -> bool:
"""
Checks whether there might be something wrong with given input with regard to the model.
"""
if max_length < min_length:
logger.warning(f"Your min_length={min_length} must be inferior than your max_length={max_length}.")
if input_length < max_length:
logger.warning(
f"Your max_length is set to {max_length}, but you input_length is only {input_length}. You might "
f"consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})"
)
@add_end_docstrings(PIPELINE_INIT_ARGS)
class TranslationPipeline(Text2TextGenerationPipeline):
"""
Translates from one language to another.
This translation pipeline can currently be loaded from [`pipeline`] using the following task identifier:
`"translation_xx_to_yy"`.
The models that this pipeline can use are models that have been fine-tuned on a translation task. See the
up-to-date list of available models on [huggingface.co/models](https://huggingface.co/models?filter=translation).
For a list of available parameters, see the [following
documentation](https://huggingface.co/docs/transformers/en/main_classes/text_generation#transformers.generation.GenerationMixin.generate)
Usage:
```python
en_fr_translator = pipeline("translation_en_to_fr")
en_fr_translator("How old are you?")
```"""
# Used in the return key of the pipeline.
return_name = "translation"
def check_inputs(self, input_length: int, min_length: int, max_length: int):
if input_length > 0.9 * max_length:
logger.warning(
f"Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider "
"increasing your max_length manually, e.g. translator('...', max_length=400)"
)
return True
def preprocess(self, *args, truncation=TruncationStrategy.DO_NOT_TRUNCATE, src_lang=None, tgt_lang=None):
if getattr(self.tokenizer, "_build_translation_inputs", None):
return self.tokenizer._build_translation_inputs(
*args, return_tensors=self.framework, truncation=truncation, src_lang=src_lang, tgt_lang=tgt_lang
)
else:
return super()._parse_and_tokenize(*args, truncation=truncation)
def _sanitize_parameters(self, src_lang=None, tgt_lang=None, **kwargs):
preprocess_params, forward_params, postprocess_params = super()._sanitize_parameters(**kwargs)
if src_lang is not None:
preprocess_params["src_lang"] = src_lang
if tgt_lang is not None:
preprocess_params["tgt_lang"] = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
task = kwargs.get("task", self.task)
items = task.split("_")
if task and len(items) == 4:
# translation, XX, to YY
preprocess_params["src_lang"] = items[1]
preprocess_params["tgt_lang"] = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__(self, *args, **kwargs):
r"""
Translate the text(s) given as inputs.
Args:
args (`str` or `List[str]`):
Texts to be translated.
return_tensors (`bool`, *optional*, defaults to `False`):
Whether or not to include the tensors of predictions (as token indices) in the outputs.
return_text (`bool`, *optional*, defaults to `True`):
Whether or not to include the decoded texts in the outputs.
clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
Whether or not to clean up the potential extra spaces in the text output.
src_lang (`str`, *optional*):
The language of the input. Might be required for multilingual models. Will not have any effect for
single pair translation models
tgt_lang (`str`, *optional*):
The language of the desired output. Might be required for multilingual models. Will not have any effect
for single pair translation models
generate_kwargs:
Additional keyword arguments to pass along to the generate method of the model (see the generate method
corresponding to your framework [here](./model#generative-models)).
Return:
A list or a list of list of `dict`: Each result comes as a dictionary with the following keys:
- **translation_text** (`str`, present when `return_text=True`) -- The translation.
- **translation_token_ids** (`torch.Tensor` or `tf.Tensor`, present when `return_tensors=True`) -- The
token ids of the translation.
"""
return super().__call__(*args, **kwargs)
|
27182812/ChatGLM-LLaMA-chinese-insturct | 5,968 | src/transformers/pipelines/visual_question_answering.py | from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
logger = logging.get_logger(__name__)
@add_end_docstrings(PIPELINE_INIT_ARGS)
class VisualQuestionAnsweringPipeline(Pipeline):
"""
Visual Question Answering pipeline using a `AutoModelForVisualQuestionAnswering`. This pipeline is currently only
available in PyTorch.
Example:
```python
>>> from transformers import pipeline
>>> oracle = pipeline(model="dandelin/vilt-b32-finetuned-vqa")
>>> image_url = "https://huggingface.co/datasets/Narsil/image_dummy/raw/main/lena.png"
>>> oracle(question="What is she wearing ?", image=image_url)
[{'score': 0.948, 'answer': 'hat'}, {'score': 0.009, 'answer': 'fedora'}, {'score': 0.003, 'answer': 'clothes'}, {'score': 0.003, 'answer': 'sun hat'}, {'score': 0.002, 'answer': 'nothing'}]
>>> oracle(question="What is she wearing ?", image=image_url, top_k=1)
[{'score': 0.948, 'answer': 'hat'}]
>>> oracle(question="Is this a person ?", image=image_url, top_k=1)
[{'score': 0.993, 'answer': 'yes'}]
>>> oracle(question="Is this a man ?", image=image_url, top_k=1)
[{'score': 0.996, 'answer': 'no'}]
```
Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
This visual question answering pipeline can currently be loaded from [`pipeline`] using the following task
identifiers: `"visual-question-answering", "vqa"`.
The models that this pipeline can use are models that have been fine-tuned on a visual question answering task. See
the up-to-date list of available models on
[huggingface.co/models](https://huggingface.co/models?filter=visual-question-answering).
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.check_model_type(MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING)
def _sanitize_parameters(self, top_k=None, padding=None, truncation=None, **kwargs):
preprocess_params, postprocess_params = {}, {}
if padding is not None:
preprocess_params["padding"] = padding
if truncation is not None:
preprocess_params["truncation"] = truncation
if top_k is not None:
postprocess_params["top_k"] = top_k
return preprocess_params, {}, postprocess_params
def __call__(self, image: Union["Image.Image", str], question: str = None, **kwargs):
r"""
Answers open-ended questions about images. The pipeline accepts several types of inputs which are detailed
below:
- `pipeline(image=image, question=question)`
- `pipeline({"image": image, "question": question})`
- `pipeline([{"image": image, "question": question}])`
- `pipeline([{"image": image, "question": question}, {"image": image, "question": question}])`
Args:
image (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`):
The pipeline handles three types of images:
- A string containing a http link pointing to an image
- A string containing a local path to an image
- An image loaded in PIL directly
The pipeline accepts either a single image or a batch of images. If given a single image, it can be
broadcasted to multiple questions.
question (`str`, `List[str]`):
The question(s) asked. If given a single question, it can be broadcasted to multiple images.
top_k (`int`, *optional*, defaults to 5):
The number of top labels that will be returned by the pipeline. If the provided number is higher than
the number of labels available in the model configuration, it will default to the number of labels.
Return:
A dictionary or a list of dictionaries containing the result. The dictionaries contain the following keys:
- **label** (`str`) -- The label identified by the model.
- **score** (`int`) -- The score attributed by the model for that label.
"""
if isinstance(image, (Image.Image, str)) and isinstance(question, str):
inputs = {"image": image, "question": question}
else:
"""
Supports the following format
- {"image": image, "question": question}
- [{"image": image, "question": question}]
- Generator and datasets
"""
inputs = image
results = super().__call__(inputs, **kwargs)
return results
def preprocess(self, inputs, padding=False, truncation=False):
image = load_image(inputs["image"])
model_inputs = self.tokenizer(
inputs["question"], return_tensors=self.framework, padding=padding, truncation=truncation
)
image_features = self.image_processor(images=image, return_tensors=self.framework)
model_inputs.update(image_features)
return model_inputs
def _forward(self, model_inputs):
model_outputs = self.model(**model_inputs)
return model_outputs
def postprocess(self, model_outputs, top_k=5):
if top_k > self.model.config.num_labels:
top_k = self.model.config.num_labels
if self.framework == "pt":
probs = model_outputs.logits.sigmoid()[0]
scores, ids = probs.topk(top_k)
else:
raise ValueError(f"Unsupported framework: {self.framework}")
scores = scores.tolist()
ids = ids.tolist()
return [{"score": score, "answer": self.model.config.id2label[_id]} for score, _id in zip(scores, ids)]
|
27182812/ChatGLM-LLaMA-chinese-insturct | 9,029 | src/transformers/pipelines/zero_shot_object_detection.py | from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
logger = logging.get_logger(__name__)
@add_end_docstrings(PIPELINE_INIT_ARGS)
class ZeroShotObjectDetectionPipeline(ChunkPipeline):
"""
Zero shot object detection pipeline using `OwlViTForObjectDetection`. This pipeline predicts bounding boxes of
objects when you provide an image and a set of `candidate_labels`.
Example:
```python
>>> from transformers import pipeline
>>> detector = pipeline(model="google/owlvit-base-patch32", task="zero-shot-object-detection")
>>> detector(
... "http://images.cocodataset.org/val2017/000000039769.jpg",
... candidate_labels=["cat", "couch"],
... )
[{'score': 0.287, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}}, {'score': 0.254, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}}, {'score': 0.121, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}}]
>>> detector(
... "https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png",
... candidate_labels=["head", "bird"],
... )
[{'score': 0.119, 'label': 'bird', 'box': {'xmin': 71, 'ymin': 170, 'xmax': 410, 'ymax': 508}}]
```
Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
This object detection pipeline can currently be loaded from [`pipeline`] using the following task identifier:
`"zero-shot-object-detection"`.
See the list of available models on
[huggingface.co/models](https://huggingface.co/models?filter=zero-shot-object-detection).
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
if self.framework == "tf":
raise ValueError(f"The {self.__class__} is only available in PyTorch.")
requires_backends(self, "vision")
self.check_model_type(MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING)
def __call__(
self,
image: Union[str, "Image.Image", List[Dict[str, Any]]],
candidate_labels: Union[str, List[str]] = None,
**kwargs,
):
"""
Detect objects (bounding boxes & classes) in the image(s) passed as inputs.
Args:
image (`str`, `PIL.Image` or `List[Dict[str, Any]]`):
The pipeline handles three types of images:
- A string containing an http url pointing to an image
- A string containing a local path to an image
- An image loaded in PIL directly
You can use this parameter to send directly a list of images, or a dataset or a generator like so:
```python
>>> from transformers import pipeline
>>> detector = pipeline(model="google/owlvit-base-patch32", task="zero-shot-object-detection")
>>> detector(
... [
... {
... "image": "http://images.cocodataset.org/val2017/000000039769.jpg",
... "candidate_labels": ["cat", "couch"],
... },
... {
... "image": "http://images.cocodataset.org/val2017/000000039769.jpg",
... "candidate_labels": ["cat", "couch"],
... },
... ]
... )
[[{'score': 0.287, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}}, {'score': 0.25, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}}, {'score': 0.121, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}}], [{'score': 0.287, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}}, {'score': 0.254, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}}, {'score': 0.121, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}}]]
```
candidate_labels (`str` or `List[str]` or `List[List[str]]`):
What the model should recognize in the image.
threshold (`float`, *optional*, defaults to 0.1):
The probability necessary to make a prediction.
top_k (`int`, *optional*, defaults to None):
The number of top predictions that will be returned by the pipeline. If the provided number is `None`
or higher than the number of predictions available, it will default to the number of predictions.
Return:
A list of lists containing prediction results, one list per input image. Each list contains dictionaries
with the following keys:
- **label** (`str`) -- Text query corresponding to the found object.
- **score** (`float`) -- Score corresponding to the object (between 0 and 1).
- **box** (`Dict[str,int]`) -- Bounding box of the detected object in image's original size. It is a
dictionary with `x_min`, `x_max`, `y_min`, `y_max` keys.
"""
if "text_queries" in kwargs:
candidate_labels = kwargs.pop("text_queries")
if isinstance(image, (str, Image.Image)):
inputs = {"image": image, "candidate_labels": candidate_labels}
else:
inputs = image
results = super().__call__(inputs, **kwargs)
return results
def _sanitize_parameters(self, **kwargs):
postprocess_params = {}
if "threshold" in kwargs:
postprocess_params["threshold"] = kwargs["threshold"]
if "top_k" in kwargs:
postprocess_params["top_k"] = kwargs["top_k"]
return {}, {}, postprocess_params
def preprocess(self, inputs):
image = load_image(inputs["image"])
candidate_labels = inputs["candidate_labels"]
if isinstance(candidate_labels, str):
candidate_labels = candidate_labels.split(",")
target_size = torch.tensor([[image.height, image.width]], dtype=torch.int32)
for i, candidate_label in enumerate(candidate_labels):
text_inputs = self.tokenizer(candidate_label, return_tensors=self.framework)
image_features = self.image_processor(image, return_tensors=self.framework)
yield {
"is_last": i == len(candidate_labels) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def _forward(self, model_inputs):
target_size = model_inputs.pop("target_size")
candidate_label = model_inputs.pop("candidate_label")
is_last = model_inputs.pop("is_last")
outputs = self.model(**model_inputs)
model_outputs = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def postprocess(self, model_outputs, threshold=0.1, top_k=None):
results = []
for model_output in model_outputs:
label = model_output["candidate_label"]
model_output = BaseModelOutput(model_output)
outputs = self.image_processor.post_process_object_detection(
outputs=model_output, threshold=threshold, target_sizes=model_output["target_size"]
)[0]
for index in outputs["scores"].nonzero():
score = outputs["scores"][index].item()
box = self._get_bounding_box(outputs["boxes"][index][0])
result = {"score": score, "label": label, "box": box}
results.append(result)
results = sorted(results, key=lambda x: x["score"], reverse=True)
if top_k:
results = results[:top_k]
return results
def _get_bounding_box(self, box: "torch.Tensor") -> Dict[str, int]:
"""
Turns list [xmin, xmax, ymin, ymax] into dict { "xmin": xmin, ... }
Args:
box (`torch.Tensor`): Tensor containing the coordinates in corners format.
Returns:
bbox (`Dict[str, int]`): Dict containing the coordinates in corners format.
"""
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch.")
xmin, ymin, xmax, ymax = box.int().tolist()
bbox = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
|
233zzh/TitanDataOperationSystem | 4,374 | 代码/web代码/titanApp/src/main/java/edu/neu/titan/titanApp/service/IUserAnalyseService.java | package edu.neu.titan.titanApp.service;
import edu.neu.titan.titanApp.common.beans.Condition;
import edu.neu.titan.titanApp.common.beans.Distribution;
import edu.neu.titan.titanApp.common.sql.SQLResultBuilder;
import edu.neu.titan.titanApp.common.utils.SetConditionUtils;
import edu.neu.titan.titanApp.dao.sql.UserAnalyseTemplate;
import java.util.List;
import java.util.Map;
/**
* Created by IntelliJ IDEA.
*
* @Author: Zhao Lei
* @Email: 1176066749@qq.com
* @Date: 2020/6/17
* @Time: 16:21
* @Version: 1.0
* @Description: 定义提供用户分析所需数据的方法,这里只做大体方法定义,具体准备哪些数据在实现类中准备
*/
public interface IUserAnalyseService {
/**
* 返回新增用户分析中新增趋势的天、周、月所需的数据
* @param condition 传入的条件
* @return K可以是天、周、月或者 “x轴”,V可以是x轴数据、y轴数据
*/
Map<String, List> getInstallationData(Condition condition);
/**
* 返回活跃用户分析中活跃趋势、活跃构成、活跃粘度天、周、月所需的数据和周活跃率、月活跃率所需的数据
* @param condition 传入的条件
* @return 这个 Map 中的数据比较多,K 有x_天,x_周,x_月,活跃用户_天,活跃用户_周,活跃用户_月,活跃构成天、周、月,活跃粘度天、周、月,周活跃率,月活跃率
* V 就是公用的三个 x 轴数据和各个K所需的y轴数据
*/
Map<String, List> getActiveData(Condition condition);
/**
* 返回启动次数分析中所需的天、周、月数据
* @param condition 传入的条件
* @return K是x_天,x_周,x_月,V是K对应的x轴和y轴所需的数据
*/
Map<String, List> getLaunchData(Condition condition);
/**
* 返回版本分布所需的数据
* @param condition 传入的条件
* @return K是标签和“x轴”,V是x轴数据和对应标签的y轴数据
*/
Map<String, Map<String, List>> getVersionData(Condition condition);
/**
* 获得以天为间隔的新增用户数据
* @param condition 传入的条件
* @return 某段时间的数据
*/
List<Integer> getInstallationDataDay(Condition condition);
/**
* 获得以周为间隔的新增用户数据
* @param condition 传入的条件
* @return 某段时间的数据
*/
List<Integer> getInstallationDataWeek(Condition condition);
/**
* 获得以月为间隔的新增用户数据
* @param condition 传入的条件
* @return 某段时间的数据
*/
List<Integer> getInstallationDataMonth(Condition condition);
/**
* 获得以天为间隔的活跃趋势数据
* @param condition 传入的条件
* @return 某段时间的数据
*/
List<Integer> getActiveTrendDataDay(Condition condition);
/**
* 获得以周为间隔的活跃趋势数据
* @param condition 传入的条件
* @return 某段时间的数据
*/
List<Integer> getActiveTrendDataWeek(Condition condition);
/**
* 获得以月为间隔的活跃趋势数据
* @param condition 传入的条件
* @return 某段时间的数据
*/
List<Integer> getActiveTrendDataMonth(Condition condition);
/**
* 获得以天为间隔的活跃用户构成数据,先查询新增用户和活跃用户,在做除法
* @param condition 传入的条件
* @return 某段时间的数据
*/
List<Double> getActiveFormDataDay(Condition condition);
/**
* 获得以周为间隔的活跃用户构成数据
* @param condition 传入的条件
* @return 某段时间的数据
*/
List<Double> getActiveFormDataWeek(Condition condition);
/**
* 获得以月为间隔的活跃用户构成数据
* @param condition 传入的条件
* @return 某段时间的数据
*/
List<Double> getActiveFormDataMonth(Condition condition);
/**
* 获得每个日期前最近day天活跃粘度数据 DAU/最近day天活跃用户,DAU/最近30天活跃用户
* @param condition 传入的条件
* @param days 前 days 天
* @return 某段时间的数据
*/
List<Double> getActiveViscosityData(int days, Condition condition);
/**
* 获得周活跃率
* @param condition 传入的条件
* @return 某段时间的数据
*/
List<Double> getActiveRateWeekData(Condition condition);
/**
* 获得月活跃率
* @param condition 传入的条件
* @return 某段时间的数据
*/
List<Double> getActiveRateMonthData(Condition condition);
/**
* 获得以天为间隔的启动次数数据
* @param condition 传入的条件
* @return 某段时间的数据
*/
List<Integer> getLaunchDataDay(Condition condition);
/**
* 获得以周为间隔的启动次数数据
* @param condition 传入的条件
* @return 某段时间的数据
*/
List<Integer> getLaunchDataWeek(Condition condition);
/**
* 获得以月为间隔的启动次数数据
* @param condition 传入的条件
* @return 某段时间的数据
*/
List<Integer> getLaunchDataMonth(Condition condition);
/**
* 获得新增用户的版本分布数据
* @param condition 传入的条件
* @return 某段时间的数据
*/
Map<String, List> getTopVersionDataInstallation(Condition condition);
/**
* 获得活跃用户的版本分布数据
* @param condition 传入的条件
* @return 某段时间的数据
*/
Map<String, List> getTopVersionDataActive(Condition condition);
/**
* 获得启动次数的版本分布数据
* @param condition 传入的条件
* @return 某段时间的数据
*/
Map<String, List> getTopVersionDataLaunch(Condition condition);
}
|
233zzh/TitanDataOperationSystem | 831 | 代码/web代码/titanApp/src/main/java/edu/neu/titan/titanApp/service/IDateService.java | package edu.neu.titan.titanApp.service;
import edu.neu.titan.titanApp.common.beans.Condition;
import edu.neu.titan.titanApp.common.utils.DateUtils;
/**
* Created by IntelliJ IDEA.
*
* @Author: Wang Kuo
* @Email: 2383536228@qq.com
* @Date: 2020/6/19
* @Time: 13:53
* @Version: 1.0
* @Description: 提供日期转化服务的接口
*/
public interface IDateService {
/**
* 将条件的日期范围转换为日期字符串数组
* @param condition 封装条件的Condition类实例
* @return 日期字符串数据
*/
public String[] getDays(Condition condition) ;
/**
* 将日期范围转换为周范围字符串数组
* @param condition 封装条件的Condition类实例
* @return 日期字符串数据
*/
public String[] getWeeks(Condition condition);
/**
* 将日期范围转换为月范围字符串数组
* @param condition 封装条件的Condition类实例
* @return 日期字符串数据
*/
public String[] getMonths(Condition condition);
}
|
27182812/ChatGLM-LLaMA-chinese-insturct | 11,983 | src/transformers/pipelines/zero_shot_classification.py | from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
logger = logging.get_logger(__name__)
class ZeroShotClassificationArgumentHandler(ArgumentHandler):
"""
Handles arguments for zero-shot for text classification by turning each possible label into an NLI
premise/hypothesis pair.
"""
def _parse_labels(self, labels):
if isinstance(labels, str):
labels = [label.strip() for label in labels.split(",") if label.strip()]
return labels
def __call__(self, sequences, labels, hypothesis_template):
if len(labels) == 0 or len(sequences) == 0:
raise ValueError("You must include at least one label and at least one sequence.")
if hypothesis_template.format(labels[0]) == hypothesis_template:
raise ValueError(
(
'The provided hypothesis_template "{}" was not able to be formatted with the target labels. '
"Make sure the passed template includes formatting syntax such as {{}} where the label should go."
).format(hypothesis_template)
)
if isinstance(sequences, str):
sequences = [sequences]
sequence_pairs = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(label)] for label in labels])
return sequence_pairs, sequences
@add_end_docstrings(PIPELINE_INIT_ARGS)
class ZeroShotClassificationPipeline(ChunkPipeline):
"""
NLI-based zero-shot classification pipeline using a `ModelForSequenceClassification` trained on NLI (natural
language inference) tasks. Equivalent of `text-classification` pipelines, but these models don't require a
hardcoded number of potential classes, they can be chosen at runtime. It usually means it's slower but it is
**much** more flexible.
Any combination of sequences and labels can be passed and each combination will be posed as a premise/hypothesis
pair and passed to the pretrained model. Then, the logit for *entailment* is taken as the logit for the candidate
label being valid. Any NLI model can be used, but the id of the *entailment* label must be included in the model
config's :attr:*~transformers.PretrainedConfig.label2id*.
Example:
```python
>>> from transformers import pipeline
>>> oracle = pipeline(model="facebook/bart-large-mnli")
>>> oracle(
... "I have a problem with my iphone that needs to be resolved asap!!",
... candidate_labels=["urgent", "not urgent", "phone", "tablet", "computer"],
... )
{'sequence': 'I have a problem with my iphone that needs to be resolved asap!!', 'labels': ['urgent', 'phone', 'computer', 'not urgent', 'tablet'], 'scores': [0.504, 0.479, 0.013, 0.003, 0.002]}
>>> oracle(
... "I have a problem with my iphone that needs to be resolved asap!!",
... candidate_labels=["english", "german"],
... )
{'sequence': 'I have a problem with my iphone that needs to be resolved asap!!', 'labels': ['english', 'german'], 'scores': [0.814, 0.186]}
```
Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
This NLI pipeline can currently be loaded from [`pipeline`] using the following task identifier:
`"zero-shot-classification"`.
The models that this pipeline can use are models that have been fine-tuned on an NLI task. See the up-to-date list
of available models on [huggingface.co/models](https://huggingface.co/models?search=nli).
"""
def __init__(self, args_parser=ZeroShotClassificationArgumentHandler(), *args, **kwargs):
self._args_parser = args_parser
super().__init__(*args, **kwargs)
if self.entailment_id == -1:
logger.warning(
"Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to "
"-1. Define a descriptive label2id mapping in the model config to ensure correct outputs."
)
@property
def entailment_id(self):
for label, ind in self.model.config.label2id.items():
if label.lower().startswith("entail"):
return ind
return -1
def _parse_and_tokenize(
self, sequence_pairs, padding=True, add_special_tokens=True, truncation=TruncationStrategy.ONLY_FIRST, **kwargs
):
"""
Parse arguments and tokenize only_first so that hypothesis (label) is not truncated
"""
return_tensors = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"Tokenizer was not supporting padding necessary for zero-shot, attempting to use "
" `pad_token=eos_token`"
)
self.tokenizer.pad_token = self.tokenizer.eos_token
try:
inputs = self.tokenizer(
sequence_pairs,
add_special_tokens=add_special_tokens,
return_tensors=return_tensors,
padding=padding,
truncation=truncation,
)
except Exception as e:
if "too short" in str(e):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
inputs = self.tokenizer(
sequence_pairs,
add_special_tokens=add_special_tokens,
return_tensors=return_tensors,
padding=padding,
truncation=TruncationStrategy.DO_NOT_TRUNCATE,
)
else:
raise e
return inputs
def _sanitize_parameters(self, **kwargs):
if kwargs.get("multi_class", None) is not None:
kwargs["multi_label"] = kwargs["multi_class"]
logger.warning(
"The `multi_class` argument has been deprecated and renamed to `multi_label`. "
"`multi_class` will be removed in a future version of Transformers."
)
preprocess_params = {}
if "candidate_labels" in kwargs:
preprocess_params["candidate_labels"] = self._args_parser._parse_labels(kwargs["candidate_labels"])
if "hypothesis_template" in kwargs:
preprocess_params["hypothesis_template"] = kwargs["hypothesis_template"]
postprocess_params = {}
if "multi_label" in kwargs:
postprocess_params["multi_label"] = kwargs["multi_label"]
return preprocess_params, {}, postprocess_params
def __call__(
self,
sequences: Union[str, List[str]],
*args,
**kwargs,
):
"""
Classify the sequence(s) given as inputs. See the [`ZeroShotClassificationPipeline`] documentation for more
information.
Args:
sequences (`str` or `List[str]`):
The sequence(s) to classify, will be truncated if the model input is too large.
candidate_labels (`str` or `List[str]`):
The set of possible class labels to classify each sequence into. Can be a single label, a string of
comma-separated labels, or a list of labels.
hypothesis_template (`str`, *optional*, defaults to `"This example is {}."`):
The template used to turn each label into an NLI-style hypothesis. This template must include a {} or
similar syntax for the candidate label to be inserted into the template. For example, the default
template is `"This example is {}."` With the candidate label `"sports"`, this would be fed into the
model like `"<cls> sequence to classify <sep> This example is sports . <sep>"`. The default template
works well in many cases, but it may be worthwhile to experiment with different templates depending on
the task setting.
multi_label (`bool`, *optional*, defaults to `False`):
Whether or not multiple candidate labels can be true. If `False`, the scores are normalized such that
the sum of the label likelihoods for each sequence is 1. If `True`, the labels are considered
independent and probabilities are normalized for each candidate by doing a softmax of the entailment
score vs. the contradiction score.
Return:
A `dict` or a list of `dict`: Each result comes as a dictionary with the following keys:
- **sequence** (`str`) -- The sequence for which this is the output.
- **labels** (`List[str]`) -- The labels sorted by order of likelihood.
- **scores** (`List[float]`) -- The probabilities for each of the labels.
"""
if len(args) == 0:
pass
elif len(args) == 1 and "candidate_labels" not in kwargs:
kwargs["candidate_labels"] = args[0]
else:
raise ValueError(f"Unable to understand extra arguments {args}")
return super().__call__(sequences, **kwargs)
def preprocess(self, inputs, candidate_labels=None, hypothesis_template="This example is {}."):
sequence_pairs, sequences = self._args_parser(inputs, candidate_labels, hypothesis_template)
for i, (candidate_label, sequence_pair) in enumerate(zip(candidate_labels, sequence_pairs)):
model_input = self._parse_and_tokenize([sequence_pair])
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(candidate_labels) - 1,
**model_input,
}
def _forward(self, inputs):
candidate_label = inputs["candidate_label"]
sequence = inputs["sequence"]
model_inputs = {k: inputs[k] for k in self.tokenizer.model_input_names}
outputs = self.model(**model_inputs)
model_outputs = {
"candidate_label": candidate_label,
"sequence": sequence,
"is_last": inputs["is_last"],
**outputs,
}
return model_outputs
def postprocess(self, model_outputs, multi_label=False):
candidate_labels = [outputs["candidate_label"] for outputs in model_outputs]
sequences = [outputs["sequence"] for outputs in model_outputs]
logits = np.concatenate([output["logits"].numpy() for output in model_outputs])
N = logits.shape[0]
n = len(candidate_labels)
num_sequences = N // n
reshaped_outputs = logits.reshape((num_sequences, n, -1))
if multi_label or len(candidate_labels) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
entailment_id = self.entailment_id
contradiction_id = -1 if entailment_id == 0 else 0
entail_contr_logits = reshaped_outputs[..., [contradiction_id, entailment_id]]
scores = np.exp(entail_contr_logits) / np.exp(entail_contr_logits).sum(-1, keepdims=True)
scores = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
entail_logits = reshaped_outputs[..., self.entailment_id]
scores = np.exp(entail_logits) / np.exp(entail_logits).sum(-1, keepdims=True)
top_inds = list(reversed(scores[0].argsort()))
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
|
27182812/ChatGLM-LLaMA-chinese-insturct | 10,042 | src/transformers/pipelines/text_classification.py | import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def sigmoid(_outputs):
return 1.0 / (1.0 + np.exp(-_outputs))
def softmax(_outputs):
maxes = np.max(_outputs, axis=-1, keepdims=True)
shifted_exp = np.exp(_outputs - maxes)
return shifted_exp / shifted_exp.sum(axis=-1, keepdims=True)
class ClassificationFunction(ExplicitEnum):
SIGMOID = "sigmoid"
SOFTMAX = "softmax"
NONE = "none"
@add_end_docstrings(
PIPELINE_INIT_ARGS,
r"""
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `"default"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `"sigmoid"`: Applies the sigmoid function on the output.
- `"softmax"`: Applies the softmax function on the output.
- `"none"`: Does not apply any function on the output.
""",
)
class TextClassificationPipeline(Pipeline):
"""
Text classification pipeline using any `ModelForSequenceClassification`. See the [sequence classification
examples](../task_summary#sequence-classification) for more information.
Example:
```python
>>> from transformers import pipeline
>>> classifier = pipeline(model="distilbert-base-uncased-finetuned-sst-2-english")
>>> classifier("This movie is disgustingly good !")
[{'label': 'POSITIVE', 'score': 1.0}]
>>> classifier("Director tried too much.")
[{'label': 'NEGATIVE', 'score': 0.996}]
```
Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
This text classification pipeline can currently be loaded from [`pipeline`] using the following task identifier:
`"sentiment-analysis"` (for classifying sequences according to positive or negative sentiments).
If multiple classification labels are available (`model.config.num_labels >= 2`), the pipeline will run a softmax
over the results. If there is a single label, the pipeline will run a sigmoid over the result.
The models that this pipeline can use are models that have been fine-tuned on a sequence classification task. See
the up-to-date list of available models on
[huggingface.co/models](https://huggingface.co/models?filter=text-classification).
"""
return_all_scores = False
function_to_apply = ClassificationFunction.NONE
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
)
def _sanitize_parameters(self, return_all_scores=None, function_to_apply=None, top_k="", **tokenizer_kwargs):
# Using "" as default argument because we're going to use `top_k=None` in user code to declare
# "No top_k"
preprocess_params = tokenizer_kwargs
postprocess_params = {}
if hasattr(self.model.config, "return_all_scores") and return_all_scores is None:
return_all_scores = self.model.config.return_all_scores
if isinstance(top_k, int) or top_k is None:
postprocess_params["top_k"] = top_k
postprocess_params["_legacy"] = False
elif return_all_scores is not None:
warnings.warn(
"`return_all_scores` is now deprecated, if want a similar funcionality use `top_k=None` instead of"
" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.",
UserWarning,
)
if return_all_scores:
postprocess_params["top_k"] = None
else:
postprocess_params["top_k"] = 1
if isinstance(function_to_apply, str):
function_to_apply = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
postprocess_params["function_to_apply"] = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__(self, *args, **kwargs):
"""
Classify the text(s) given as inputs.
Args:
args (`str` or `List[str]` or `Dict[str]`, or `List[Dict[str]]`):
One or several texts to classify. In order to use text pairs for your classification, you can send a
dictionary containing `{"text", "text_pair"}` keys, or a list of those.
top_k (`int`, *optional*, defaults to `1`):
How many results to return.
function_to_apply (`str`, *optional*, defaults to `"default"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different
values:
If this argument is not specified, then it will apply the following functions according to the number
of labels:
- If the model has a single label, will apply the sigmoid function on the output.
- If the model has several labels, will apply the softmax function on the output.
Possible values are:
- `"sigmoid"`: Applies the sigmoid function on the output.
- `"softmax"`: Applies the softmax function on the output.
- `"none"`: Does not apply any function on the output.
Return:
A list or a list of list of `dict`: Each result comes as list of dictionaries with the following keys:
- **label** (`str`) -- The label predicted.
- **score** (`float`) -- The corresponding probability.
If `top_k` is used, one such dictionary is returned per label.
"""
result = super().__call__(*args, **kwargs)
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
_legacy = "top_k" not in kwargs
if isinstance(args[0], str) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def preprocess(self, inputs, **tokenizer_kwargs) -> Dict[str, GenericTensor]:
return_tensors = self.framework
if isinstance(inputs, dict):
return self.tokenizer(**inputs, return_tensors=return_tensors, **tokenizer_kwargs)
elif isinstance(inputs, list) and len(inputs) == 1 and isinstance(inputs[0], list) and len(inputs[0]) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0], text_pair=inputs[0][1], return_tensors=return_tensors, **tokenizer_kwargs
)
elif isinstance(inputs, list):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"
' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.'
)
return self.tokenizer(inputs, return_tensors=return_tensors, **tokenizer_kwargs)
def _forward(self, model_inputs):
return self.model(**model_inputs)
def postprocess(self, model_outputs, function_to_apply=None, top_k=1, _legacy=True):
# `_legacy` is used to determine if we're running the naked pipeline and in backward
# compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running
# the more natural result containing the list.
# Default value before `set_parameters`
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
function_to_apply = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
function_to_apply = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config, "function_to_apply") and function_to_apply is None:
function_to_apply = self.model.config.function_to_apply
else:
function_to_apply = ClassificationFunction.NONE
outputs = model_outputs["logits"][0]
outputs = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
scores = sigmoid(outputs)
elif function_to_apply == ClassificationFunction.SOFTMAX:
scores = softmax(outputs)
elif function_to_apply == ClassificationFunction.NONE:
scores = outputs
else:
raise ValueError(f"Unrecognized `function_to_apply` argument: {function_to_apply}")
if top_k == 1 and _legacy:
return {"label": self.model.config.id2label[scores.argmax().item()], "score": scores.max().item()}
dict_scores = [
{"label": self.model.config.id2label[i], "score": score.item()} for i, score in enumerate(scores)
]
if not _legacy:
dict_scores.sort(key=lambda x: x["score"], reverse=True)
if top_k is not None:
dict_scores = dict_scores[:top_k]
return dict_scores
|
27182812/ChatGLM-LLaMA-chinese-insturct | 13,596 | src/transformers/pipelines/conversational.py | import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
logger = logging.get_logger(__name__)
class Conversation:
"""
Utility class containing a conversation and its history. This class is meant to be used as an input to the
[`ConversationalPipeline`]. The conversation contains a number of utility function to manage the addition of new
user input and generated model responses. A conversation needs to contain an unprocessed user input before being
passed to the [`ConversationalPipeline`]. This user input is either created when the class is instantiated, or by
calling `conversational_pipeline.append_response("input")` after a conversation turn.
Arguments:
text (`str`, *optional*):
The initial user input to start the conversation. If not provided, a user input needs to be provided
manually using the [`~Conversation.add_user_input`] method before the conversation can begin.
conversation_id (`uuid.UUID`, *optional*):
Unique identifier for the conversation. If not provided, a random UUID4 id will be assigned to the
conversation.
past_user_inputs (`List[str]`, *optional*):
Eventual past history of the conversation of the user. You don't need to pass it manually if you use the
pipeline interactively but if you want to recreate history you need to set both `past_user_inputs` and
`generated_responses` with equal length lists of strings
generated_responses (`List[str]`, *optional*):
Eventual past history of the conversation of the model. You don't need to pass it manually if you use the
pipeline interactively but if you want to recreate history you need to set both `past_user_inputs` and
`generated_responses` with equal length lists of strings
Usage:
```python
conversation = Conversation("Going to the movies tonight - any suggestions?")
# Steps usually performed by the model when generating a response:
# 1. Mark the user input as processed (moved to the history)
conversation.mark_processed()
# 2. Append a mode response
conversation.append_response("The Big lebowski.")
conversation.add_user_input("Is it good?")
```"""
def __init__(
self, text: str = None, conversation_id: uuid.UUID = None, past_user_inputs=None, generated_responses=None
):
if not conversation_id:
conversation_id = uuid.uuid4()
if past_user_inputs is None:
past_user_inputs = []
if generated_responses is None:
generated_responses = []
self.uuid: uuid.UUID = conversation_id
self.past_user_inputs: List[str] = past_user_inputs
self.generated_responses: List[str] = generated_responses
self.new_user_input: Optional[str] = text
def __eq__(self, other):
if not isinstance(other, Conversation):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def add_user_input(self, text: str, overwrite: bool = False):
"""
Add a user input to the conversation for the next round. This populates the internal `new_user_input` field.
Args:
text (`str`): The user input for the next conversation round.
overwrite (`bool`, *optional*, defaults to `False`):
Whether or not existing and unprocessed user input should be overwritten when this function is called.
"""
if self.new_user_input:
if overwrite:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
f'with: "{text}".'
)
self.new_user_input = text
else:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
f'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input'
)
else:
self.new_user_input = text
def mark_processed(self):
"""
Mark the conversation as processed (moves the content of `new_user_input` to `past_user_inputs`) and empties
the `new_user_input` field.
"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input)
self.new_user_input = None
def append_response(self, response: str):
"""
Append a response to the list of generated responses.
Args:
response (`str`): The model generated response.
"""
self.generated_responses.append(response)
def iter_texts(self):
"""
Iterates over all blobs of the conversation.
Returns: Iterator of (is_user, text_chunk) in chronological order of the conversation. `is_user` is a `bool`,
`text_chunks` is a `str`.
"""
for user_input, generated_response in zip(self.past_user_inputs, self.generated_responses):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__(self):
"""
Generates a string representation of the conversation.
Return:
`str`:
Example: Conversation id: 7d15686b-dc94-49f2-9c4b-c9eac6a1f114 user >> Going to the movies tonight - any
suggestions? bot >> The Big Lebowski
"""
output = f"Conversation id: {self.uuid} \n"
for is_user, text in self.iter_texts():
name = "user" if is_user else "bot"
output += f"{name} >> {text} \n"
return output
@add_end_docstrings(
PIPELINE_INIT_ARGS,
r"""
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
""",
)
class ConversationalPipeline(Pipeline):
"""
Multi-turn conversational pipeline.
Example:
```python
>>> from transformers import pipeline, Conversation
>>> chatbot = pipeline(model="microsoft/DialoGPT-medium")
>>> conversation = Conversation("Going to the movies tonight - any suggestions?")
>>> conversation = chatbot(conversation)
>>> conversation.generated_responses[-1]
'The Big Lebowski'
>>> conversation.add_user_input("Is it an action movie?")
>>> conversation = chatbot(conversation)
>>> conversation.generated_responses[-1]
"It's a comedy."
```
Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
This conversational pipeline can currently be loaded from [`pipeline`] using the following task identifier:
`"conversational"`.
The models that this pipeline can use are models that have been fine-tuned on a multi-turn conversational task,
currently: *'microsoft/DialoGPT-small'*, *'microsoft/DialoGPT-medium'*, *'microsoft/DialoGPT-large'*. See the
up-to-date list of available models on
[huggingface.co/models](https://huggingface.co/models?filter=conversational).
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.tokenizer.pad_token_id is None:
self.tokenizer.pad_token = self.tokenizer.eos_token
def _sanitize_parameters(
self, min_length_for_response=None, minimum_tokens=None, clean_up_tokenization_spaces=None, **generate_kwargs
):
preprocess_params = {}
forward_params = {}
postprocess_params = {}
if min_length_for_response is not None:
preprocess_params["min_length_for_response"] = min_length_for_response
if minimum_tokens is not None:
forward_params["minimum_tokens"] = minimum_tokens
if "max_length" in generate_kwargs:
forward_params["max_length"] = generate_kwargs["max_length"]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
postprocess_params["clean_up_tokenization_spaces"] = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(generate_kwargs)
return preprocess_params, forward_params, postprocess_params
def __call__(self, conversations: Union[Conversation, List[Conversation]], num_workers=0, **kwargs):
r"""
Generate responses for the conversation(s) given as inputs.
Args:
conversations (a [`Conversation`] or a list of [`Conversation`]):
Conversations to generate responses for.
clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
Whether or not to clean up the potential extra spaces in the text output.
generate_kwargs:
Additional keyword arguments to pass along to the generate method of the model (see the generate method
corresponding to your framework [here](./model#generative-models)).
Returns:
[`Conversation`] or a list of [`Conversation`]: Conversation(s) with updated generated responses for those
containing a new user input.
"""
# XXX: num_workers==0 is required to be backward compatible
# Otherwise the threads will require a Conversation copy.
# This will definitely hinder performance on GPU, but has to be opted
# in because of this BC change.
outputs = super().__call__(conversations, num_workers=num_workers, **kwargs)
if isinstance(outputs, list) and len(outputs) == 1:
return outputs[0]
return outputs
def preprocess(self, conversation: Conversation, min_length_for_response=32) -> Dict[str, Any]:
if not isinstance(conversation, Conversation):
raise ValueError("ConversationalPipeline, expects Conversation as inputs")
if conversation.new_user_input is None:
raise ValueError(
f"Conversation with UUID {type(conversation.uuid)} does not contain new user input to process. "
"Add user inputs with the conversation's `add_user_input` method"
)
if hasattr(self.tokenizer, "_build_conversation_input_ids"):
input_ids = self.tokenizer._build_conversation_input_ids(conversation)
else:
# If the tokenizer cannot handle conversations, we default to only the old version
input_ids = self._legacy_parse_and_tokenize(conversation)
if self.framework == "pt":
input_ids = torch.LongTensor([input_ids])
elif self.framework == "tf":
input_ids = tf.constant([input_ids])
return {"input_ids": input_ids, "conversation": conversation}
def _forward(self, model_inputs, minimum_tokens=10, **generate_kwargs):
max_length = generate_kwargs.get("max_length", self.model.config.max_length)
n = model_inputs["input_ids"].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f"Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})")
trim = max_length - minimum_tokens
model_inputs["input_ids"] = model_inputs["input_ids"][:, -trim:]
if "attention_mask" in model_inputs:
model_inputs["attention_mask"] = model_inputs["attention_mask"][:, -trim:]
conversation = model_inputs.pop("conversation")
generate_kwargs["max_length"] = max_length
output_ids = self.model.generate(**model_inputs, **generate_kwargs)
if self.model.config.is_encoder_decoder:
start_position = 1
else:
start_position = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def postprocess(self, model_outputs, clean_up_tokenization_spaces=True):
output_ids = model_outputs["output_ids"]
answer = self.tokenizer.decode(
output_ids[0],
skip_special_tokens=True,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
)
conversation = model_outputs["conversation"]
conversation.mark_processed()
conversation.append_response(answer)
return conversation
def _legacy_parse_and_tokenize(self, conversation: Conversation) -> Dict:
eos_token_id = self.tokenizer.eos_token_id
input_ids = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(text, add_special_tokens=False) + [eos_token_id])
else:
input_ids.extend(self.tokenizer.encode(text, add_special_tokens=False))
if len(input_ids) > self.tokenizer.model_max_length:
input_ids = input_ids[-self.tokenizer.model_max_length :]
return input_ids
|
233zzh/TitanDataOperationSystem | 1,737 | 代码/web代码/titanApp/src/main/java/edu/neu/titan/titanApp/service/IRetentionService.java | package edu.neu.titan.titanApp.service;
import edu.neu.titan.titanApp.common.beans.Condition;
import org.springframework.data.util.Pair;
import java.util.List;
/**
* Created by IntelliJ IDEA.
*
* @Author: Wang Kuo
* @Email: 2383536228@qq.com
* @Date: 2020/6/17
* @Time: 12:00
* @Version: 1.0
* @Description: 用于处理留存分析业务的service接口
*/
public interface IRetentionService {
/**
* 得到天新增用户存留数所需的数据
* @param condition 封装条件的实例
* @return 结果数组
*/
Integer[][] getRetentionDataIncreaseDay(Condition condition) ;
/**
* 得到周新增用户存留数所需的数据
* @param condition 封装条件的实例
* @return 结果数组
*/
Integer[][] getRetentionDataIncreaseWeek(Condition condition) ;
/**
* 得到月新增用户存留数所需的数据
* @param condition 封装条件的实例
* @return 结果数组
*/
Integer[][] getRetentionDataIncreaseMonth(Condition condition) ;
/**
* 得到天活跃用户存留数所需的数据
* @param condition 封装条件的实例
* @return 结果数组
*/
Integer[][] getRetentionDataActiveDay(Condition condition) ;
/**
* 得到周活跃用户存留数所需的数据
* @param condition 封装条件的实例
* @return 结果数组
*/
Integer[][] getRetentionDataActiveWeek(Condition condition) ;
/**
* 得到月活跃用户存留数所需的数据
* @param condition 封装条件的实例
* @return 结果数组
*/
Integer[][] getRetentionDataActiveMonth(Condition condition) ;
/**
* 得到近期一段时间的用户新鲜度数据
* @return 结果数组
*/
Pair<String[], Integer[][]> getRetentionDataFreshness() ;
/**
* 得到近期一段时间的用户活跃度数据
* @return 结果数组
*/
Pair<String[], Integer[][]> getRetentionDataActivity() ;
/**
* 得到一段时间以来新增用户留存率数据
* @param condition 查询条件
* @return 结果数组
*/
Double[] getOneDayAfterRetentionRate(Condition condition);
}
|
27182812/ChatGLM-LLaMA-chinese-insturct | 4,940 | src/transformers/pipelines/image_classification.py | from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
logger = logging.get_logger(__name__)
@add_end_docstrings(PIPELINE_INIT_ARGS)
class ImageClassificationPipeline(Pipeline):
"""
Image classification pipeline using any `AutoModelForImageClassification`. This pipeline predicts the class of an
image.
Example:
```python
>>> from transformers import pipeline
>>> classifier = pipeline(model="microsoft/beit-base-patch16-224-pt22k-ft22k")
>>> classifier("https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png")
[{'score': 0.442, 'label': 'macaw'}, {'score': 0.088, 'label': 'popinjay'}, {'score': 0.075, 'label': 'parrot'}, {'score': 0.073, 'label': 'parodist, lampooner'}, {'score': 0.046, 'label': 'poll, poll_parrot'}]
```
Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
This image classification pipeline can currently be loaded from [`pipeline`] using the following task identifier:
`"image-classification"`.
See the list of available models on
[huggingface.co/models](https://huggingface.co/models?filter=image-classification).
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
requires_backends(self, "vision")
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
)
def _sanitize_parameters(self, top_k=None):
postprocess_params = {}
if top_k is not None:
postprocess_params["top_k"] = top_k
return {}, {}, postprocess_params
def __call__(self, images: Union[str, List[str], "Image.Image", List["Image.Image"]], **kwargs):
"""
Assign labels to the image(s) passed as inputs.
Args:
images (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`):
The pipeline handles three types of images:
- A string containing a http link pointing to an image
- A string containing a local path to an image
- An image loaded in PIL directly
The pipeline accepts either a single image or a batch of images, which must then be passed as a string.
Images in a batch must all be in the same format: all as http links, all as local paths, or all as PIL
images.
top_k (`int`, *optional*, defaults to 5):
The number of top labels that will be returned by the pipeline. If the provided number is higher than
the number of labels available in the model configuration, it will default to the number of labels.
Return:
A dictionary or a list of dictionaries containing result. If the input is a single image, will return a
dictionary, if the input is a list of several images, will return a list of dictionaries corresponding to
the images.
The dictionaries contain the following keys:
- **label** (`str`) -- The label identified by the model.
- **score** (`int`) -- The score attributed by the model for that label.
"""
return super().__call__(images, **kwargs)
def preprocess(self, image):
image = load_image(image)
model_inputs = self.image_processor(images=image, return_tensors=self.framework)
return model_inputs
def _forward(self, model_inputs):
model_outputs = self.model(**model_inputs)
return model_outputs
def postprocess(self, model_outputs, top_k=5):
if top_k > self.model.config.num_labels:
top_k = self.model.config.num_labels
if self.framework == "pt":
probs = model_outputs.logits.softmax(-1)[0]
scores, ids = probs.topk(top_k)
elif self.framework == "tf":
probs = stable_softmax(model_outputs.logits, axis=-1)[0]
topk = tf.math.top_k(probs, k=top_k)
scores, ids = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f"Unsupported framework: {self.framework}")
scores = scores.tolist()
ids = ids.tolist()
return [{"score": score, "label": self.model.config.id2label[_id]} for score, _id in zip(scores, ids)]
|
Subsets and Splits
PyTorch Neural Network Imports
This query filters for code examples containing a specific PyTorch import pattern, which is useful for finding code snippets that use PyTorch's neural network module but doesn't provide deeper analytical insights about the dataset.
HTML Files in Train Set
Retrieves all records from the dataset where the file path ends with .html or .htm, providing a basic filter for HTML files.
SQL Console for nick007x/github-code-2025
Retrieves 200 file paths that end with '.html' or '.htm', providing a basic overview of HTML files in the dataset.
Top HTML Files
The query retrieves a sample of HTML file paths, providing basic filtering but limited analytical value.
CSharp Repositories Excluding Unity
Retrieves all records for repositories that contain C# files but are not related to Unity, providing a basic filter of the dataset.
C# File Count per Repository
Counts the total number of C# files across distinct repositories, providing a basic measure of C# file presence.
SQL Console for nick007x/github-code-2025
Lists unique repository IDs containing C# files, providing basic filtering to understand which repositories have C# code.
Select Groovy Files: Train Set
Retrieves the first 1000 entries from the 'train' dataset where the file path ends with '.groovy', providing a basic sample of Groovy files.
GitHub Repos with WiFiClientSecure
Finds specific file paths in repositories that contain particular code snippets related to WiFiClientSecure and ChatGPT, providing basic filtering of relevant files.