instruction stringclasses 100
values | code stringlengths 78 193k | response stringlengths 259 170k | file stringlengths 59 203 |
|---|---|---|---|
Write reusable docstrings | # -*- coding:utf-8 -*-
from splinter.driver.webdriver.chrome import Options, Chrome
from splinter.browser import Browser
from contextlib import closing
import requests, json, time, re, os, sys, time
from bs4 import BeautifulSoup
class DouYin(object):
def __init__(self, width = 500, height = 300):
# 无头浏览器
chrome_o... | --- +++ @@ -7,12 +7,24 @@
class DouYin(object):
def __init__(self, width = 500, height = 300):
+ """
+ 抖音App视频下载
+ """
# 无头浏览器
chrome_options = Options()
chrome_options.add_argument('user-agent="Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36... | https://raw.githubusercontent.com/Jack-Cherish/python-spider/HEAD/douyin_pro.py |
Add docstrings for utility scripts | # Copyright 2025 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in ... | --- +++ @@ -12,6 +12,16 @@ # See the License for the specific language governing permissions and
# limitations under the License.
+r""""Converts music files to NoteSequence protos and writes TFRecord file.
+
+Currently supports MIDI (.mid, .midi) and MusicXML (.xml, .mxl) files.
+
+Example usage:
+ $ python magenta... | https://raw.githubusercontent.com/magenta/magenta/HEAD/magenta/scripts/convert_dir_to_note_sequences.py |
Write clean docstrings for readability | # Copyright 2025 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in ... | --- +++ @@ -12,6 +12,14 @@ # See the License for the specific language governing permissions and
# limitations under the License.
+"""Compare a directory of abc and midi files.
+
+Assumes a directory of abc files converted with something like:
+# First, remove 'hornpipe' rhythm marker because abc2midi changes note d... | https://raw.githubusercontent.com/magenta/magenta/HEAD/magenta/scripts/abc_compare.py |
Write proper docstrings for these functions | # Copyright 2025 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in ... | --- +++ @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and
# limitations under the License.
+"""Defines SVG decoder loss."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
@@ -34,6 +35,7 @@
def _get_mdn_loss(log... | https://raw.githubusercontent.com/magenta/magenta/HEAD/magenta/models/svg_vae/svg_decoder_loss.py |
Write docstrings for data processing functions | # -*- coding:utf-8 -*-
from contextlib import closing
import requests, json, re, os, sys
import urllib
class DouYin(object):
def __init__(self, width = 500, height = 300):
self.headers = {
'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/5... | --- +++ @@ -5,6 +5,9 @@
class DouYin(object):
def __init__(self, width = 500, height = 300):
+ """
+ 抖音App视频下载
+ """
self.headers = {
'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36',
'sec-fetch-mode': 'cors',
@@ -18,6 +... | https://raw.githubusercontent.com/Jack-Cherish/python-spider/HEAD/douyin/douyin.py |
Add docstrings following best practices | # -*-coding:utf-8 -*-
# Website: http://cuijiahua.com
# Author: Jack Cui
# Date: 2018.6.9
import requests, json, re, sys, os, urllib, argparse, time
from urllib.request import urlretrieve
from contextlib import closing
from urllib import parse
import xml2ass
class BiliBili:
def __init__(self, dirname, keyword):
se... | --- +++ @@ -37,6 +37,14 @@ self.dir = dirname
def video_downloader(self, video_url, video_name):
+ """
+ 视频下载
+ Parameters:
+ video_url: 带水印的视频地址
+ video_name: 视频名
+ Returns:
+ 无
+ """
size = 0
with closing(self.sess.get(video_url, headers=self.dn_headers, stream=True, verify=False)) as response:... | https://raw.githubusercontent.com/Jack-Cherish/python-spider/HEAD/bilibili/bilibili.py |
Document all endpoints with docstrings | # Copyright 2025 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in ... | --- +++ @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and
# limitations under the License.
+"""convert all files in a folder to jpg."""
from __future__ import print_function
import argparse
@@ -60,6 +61,23 @@
def convert2jpg(path_in, path_out, args):
+ """Convert all fi... | https://raw.githubusercontent.com/magenta/magenta/HEAD/magenta/video/tools/convert2jpg.py |
Document this module using docstrings | # -*- coding:utf-8 -*-
from bs4 import BeautifulSoup
from contextlib import closing
import requests, json, time, re, os, sys, time
class DouYin(object):
def __init__(self):
#SSL认证
pass
def get_video_urls(self, user_id):
video_names = []
video_urls = []
unique_id = ''
while unique_id != user_id:
searc... | --- +++ @@ -5,10 +5,22 @@
class DouYin(object):
def __init__(self):
+ """
+ 抖音App视频下载
+ """
#SSL认证
pass
def get_video_urls(self, user_id):
+ """
+ 获得视频播放地址
+ Parameters:
+ nickname:查询的用户名
+ Returns:
+ video_names: 视频名字列表
+ video_urls: 视频链接列表
+ aweme_count: 视频数量
+ """
video_names = []
... | https://raw.githubusercontent.com/Jack-Cherish/python-spider/HEAD/douyin.py |
Add docstrings to improve code quality | # -*-coding:utf-8 -*-
# Author:Jack Cui
# Website:http://cuijiahua.com
# Date:2018-7-7
import os
import re
import sys
import bs4
import json
import math
import time
import math
import argparse
import requests
from contextlib import closing
def search_goods(keyword, pages):
# 创建session
sess = requests.Session()
good... | --- +++ @@ -15,6 +15,14 @@ from contextlib import closing
def search_goods(keyword, pages):
+ """
+ 搜索商品
+ Parameters:
+ keyword - str 搜索关键词
+ pages - int 搜索页数
+ Returns:
+ goods_urls - list 商品链接
+ """
# 创建session
sess = requests.Session()
goods_urls = []
@@ -92,6 +100,13 @@ return goods_urls
def goods_... | https://raw.githubusercontent.com/Jack-Cherish/python-spider/HEAD/dingdong/jd.py |
Write docstrings for this repository | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or ag... | --- +++ @@ -11,6 +11,12 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+"""Join pairs Finds frames that matches and create pairs.
+
+The goal is to create pairs with a frame to the nex... | https://raw.githubusercontent.com/magenta/magenta/HEAD/magenta/video/next_frame_prediction_pix2pix/join_pairs.py |
Help me document legacy Python code | import collections.abc
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from transformers.activations import ACT2FN
from transformers.pytorch_utils import (
find_pruneable_heads_and_indices,
meshgrid,
pr... | --- +++ @@ -38,6 +38,9 @@
# Copied from transformers.models.swin.modeling_swin.window_partition
def window_partition(input_feature, window_size):
+ """
+ Partitions the given input into windows.
+ """
batch_size, height, width, num_channels = input_feature.shape
input_feature = input_feature.view(... | https://raw.githubusercontent.com/datalab-to/surya/HEAD/surya/common/donut/encoder.py |
Add return value explanations in docstrings | import warnings
from typing import Optional, Tuple, TypedDict
from dataclasses import dataclass
import torch
from torch import nn
import torch.nn.functional as F
from transformers.modeling_outputs import CausalLMOutputWithPast
from transformers.cache_utils import Cache
from transformers.modeling_attn_mask_utils import... | --- +++ @@ -31,6 +31,19 @@
class FlashAttentionKwargs(TypedDict, total=False):
+ """
+ Keyword arguments for Flash Attention with Compile.
+
+ Attributes:
+ cu_seq_lens_q (`torch.LongTensor`, *optional*)
+ Gets cumlative sequence length for query state.
+ cu_seq_lens_k (`torch.Long... | https://raw.githubusercontent.com/datalab-to/surya/HEAD/surya/common/surya/__init__.py |
Write proper docstrings for these functions | import math
from typing import Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers.activations import ACT2FN
from surya.common.pretrained import SuryaPreTrainedModel
from surya.common.surya.encoder.config import SuryaEncoderConfig
from surya.common.xla import get_neare... | --- +++ @@ -89,6 +89,9 @@
class Qwen2RMSNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
+ """
+ Qwen2RMSNorm is equivalent to T5LayerNorm
+ """
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
@@ -313,6 ... | https://raw.githubusercontent.com/datalab-to/surya/HEAD/surya/common/surya/encoder/__init__.py |
Generate docstrings with examples | # coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless r... | --- +++ @@ -12,6 +12,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+"""Modified image processor class for Segformer based on transformers"""
import warnings
from typing import A... | https://raw.githubusercontent.com/datalab-to/surya/HEAD/surya/detection/processor.py |
Add docstrings to meet PEP guidelines | from typing import Optional
import torch
import torch.nn.functional as F
from flash_attn import flash_attn_varlen_func as _flash_attn_varlen_func
from flash_attn import flash_attn_with_kvcache as _flash_attn_with_kvcache
from flash_attn.bert_padding import index_first_axis as _index_first_axis
from flash_attn.bert_padd... | --- +++ @@ -7,6 +7,21 @@ from flash_attn.bert_padding import pad_input
def _get_unpad_data(attention_mask: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, int]:
+ """
+ Retrieves indexing data required to repad unpadded (ragged) tensors.
+
+ Arguments:
+ attention_mask (`torch.Tensor`):
+ ... | https://raw.githubusercontent.com/datalab-to/surya/HEAD/surya/common/surya/flash_attn_utils.py |
Write docstrings describing each step |
from __future__ import annotations
from typing import Optional, Union, Tuple, List, Any
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers.modeling_outputs import SemanticSegmenterOutput
from surya.common.pretrained import SuryaPreTrainedModel
from su... | --- +++ @@ -1,3 +1,13 @@+"""
+This is an implementation of efficientvit, with some modifications (decode head, etc).
+
+Original paper at https://arxiv.org/abs/2205.14756
+
+Code adapted from timm, https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/efficientvit_mit.py
+Original code (that timm ad... | https://raw.githubusercontent.com/datalab-to/surya/HEAD/surya/detection/model/encoderdecoder.py |
Help me comply with documentation standards | from __future__ import annotations
import math
from typing import Optional, Set, List, Tuple, Union, Dict
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F, MSELoss, CrossEntropyLoss, BCEWithLogitsLoss
from transformers import apply_chunking_to_forward
from transformers.activat... | --- +++ @@ -71,6 +71,17 @@ def forward(
self, input_ids: torch.Tensor, input_embeds: Optional[torch.Tensor] = None
) -> torch.Tensor:
+ """
+ Parameters:
+ input_ids (torch.Tensor):
+ torch.tensor(bs, max_seq_length) The token ids to embed.
+ input_em... | https://raw.githubusercontent.com/datalab-to/surya/HEAD/surya/ocr_error/model/encoder.py |
Add docstrings to incomplete code | import collections
import os
import json
import unicodedata
from typing import List, Optional, Tuple
from tokenizers import normalizers
from transformers.tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
from transformers.tokenization_utils_fast import PreTrainedTokenizerFast... | --- +++ @@ -15,6 +15,7 @@
# Copied from transformers.models.bert.tokenization_bert.load_vocab
def load_vocab(vocab_file):
+ """Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
with open(vocab_file, "r", encoding="utf-8") as reader:
tokens = reader.readlines()
@@ -... | https://raw.githubusercontent.com/datalab-to/surya/HEAD/surya/ocr_error/tokenizer.py |
Can you add docstrings to this Python file? | from typing import Optional, Union, Tuple
import torch
import torch.nn as nn
from surya.common.donut.encoder import DonutSwinPreTrainedModel, DonutSwinModelOutput, DonutSwinEmbeddings, DonutSwinEncoder
class DonutSwinModel(DonutSwinPreTrainedModel):
def __init__(self, config, add_pooling_layer=True, use_mask_to... | --- +++ @@ -27,6 +27,10 @@ return self.embeddings.patch_embeddings
def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
for layer, he... | https://raw.githubusercontent.com/datalab-to/surya/HEAD/surya/table_rec/model/encoder.py |
Create documentation strings for testing functions | # Implementation adapted from https://github.com/EdwardDixon/snake under the MIT license.
# LICENSE is in incl_licenses directory.
import torch
from torch import nn, pow, sin
from torch.nn import Parameter
class Snake(nn.Module):
def __init__(self, in_features, alpha=1.0, alpha_trainable=True, alpha_logscale=... | --- +++ @@ -7,8 +7,31 @@
class Snake(nn.Module):
+ '''
+ Implementation of a sine-based periodic activation function
+ Shape:
+ - Input: (B, C, T)
+ - Output: (B, C, T), same shape as the input
+ Parameters:
+ - alpha - trainable parameter
+ References:
+ - This activation... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/BigVGAN/activations.py |
Document this script properly | # Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0
# LICENSE is in incl_licenses directory.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
if "sinc" in dir(torch):
sinc = torch.sinc
else:
# This code is adopted from adefossez's julius.... | --- +++ @@ -14,6 +14,10 @@ # https://adefossez.github.io/julius/julius/core.html
# LICENSE is in incl_licenses directory.
def sinc(x: torch.Tensor):
+ """
+ Implementation of sinc, i.e. sin(pi * x) / (pi * x)
+ __Warning__: Different to julius.sinc, the input is multiplied by `pi`!
... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/BigVGAN/alias_free_activation/torch/filter.py |
Write reusable docstrings |
import torch # noqa: F401
import torch.nn as nn
import torch.nn.functional as F
from indextts.BigVGAN.nnet.CNN import Conv1d as _Conv1d
from indextts.BigVGAN.nnet.linear import Linear
from indextts.BigVGAN.nnet.normalization import BatchNorm1d as _BatchNorm1d
def length_to_mask(length, max_len=None, dtype=None, de... | --- +++ @@ -1,3 +1,8 @@+"""A popular speaker recognition and diarization model.
+
+Authors
+ * Hwidong Na 2020
+"""
import torch # noqa: F401
import torch.nn as nn
@@ -9,6 +14,35 @@
def length_to_mask(length, max_len=None, dtype=None, device=None):
+ """Creates a binary mask for each sequence.
+
+ Refere... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/BigVGAN/ECAPA_TDNN.py |
Improve my code by adding docstrings | # Copyright (c) 2024 NVIDIA CORPORATION.
# Licensed under the MIT license.
# Adapted from https://github.com/jik876/hifi-gan under the MIT license.
# LICENSE is in incl_licenses directory.
import json
import os
from pathlib import Path
from typing import Dict, Optional, Union
import torch
import torch.nn as nn
f... | --- +++ @@ -30,6 +30,17 @@
class AMPBlock1(torch.nn.Module):
+ """
+ AMPBlock applies Snake / SnakeBeta activation functions with trainable parameters that control periodicity, defined for each layer.
+ AMPBlock1 has additional self.convs2 that contains additional Conv1d layers with a fixed dilation=1 foll... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/BigVGAN/bigvgan.py |
Create structured documentation for my script |
import logging
import torch
import torch.nn as nn
class Linear(torch.nn.Module):
def __init__(
self,
n_neurons,
input_shape=None,
input_size=None,
bias=True,
max_norm=None,
combine_dims=False,
):
super().__init__()
self.max_norm = max_... | --- +++ @@ -1,3 +1,9 @@+"""Library implementing linear transformation.
+
+Authors
+ * Mirco Ravanelli 2020
+ * Davide Borra 2021
+"""
import logging
@@ -6,6 +12,32 @@
class Linear(torch.nn.Module):
+ """Computes a linear transformation y = wx + b.
+
+ Arguments
+ ---------
+ n_neurons : int
+ ... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/BigVGAN/nnet/linear.py |
Generate consistent docstrings |
import torch
import torch.nn as nn
class BatchNorm1d(nn.Module):
def __init__(
self,
input_shape=None,
input_size=None,
eps=1e-05,
momentum=0.1,
affine=True,
track_running_stats=True,
combine_batch_time=False,
skip_transpose=False,
):
... | --- +++ @@ -1,9 +1,48 @@+"""Library implementing normalization.
+
+Authors
+ * Mirco Ravanelli 2020
+ * Guillermo Cámbara 2021
+ * Sarthak Yadav 2022
+"""
import torch
import torch.nn as nn
class BatchNorm1d(nn.Module):
+ """Applies 1d batch normalization to the input tensor.
+
+ Arguments
+ ---------... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/BigVGAN/nnet/normalization.py |
Add docstrings for production code | import sys
from typing import List, Optional
import torch
from torch import nn
from .attention import (
ForwardContext,
get_forward_context,
reset_forward_context,
set_forward_context,
)
from .kv_manager import KVCacheManager, Seq
class Sampler(nn.Module):
def __init__(self):
super().__i... | --- +++ @@ -43,6 +43,17 @@ num_blocks: int = 128,
use_cuda_graph: bool = True,
):
+ """
+ Args:
+ model: The GPT transformer model (should have accel attention)
+ lm_head: Language model head for generating logits
+ num_layers: Number of transformer l... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/accel/accel_engine.py |
Add standardized docstrings across the file | # Copyright (c) 2019 Shigeki Karita
# 2020 Mobvoi Inc (Binbin Zhang)
# 2022 Xingchen Song (sxc19@mails.tsinghua.edu.cn)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the Licens... | --- +++ @@ -14,6 +14,7 @@ # See the License for the specific language governing permissions and
# limitations under the License.
+"""Multi-Head Attention layer definition."""
import math
from typing import Tuple
@@ -23,7 +24,16 @@
class MultiHeadedAttention(nn.Module):
+ """Multi-Head Attention layer.
+
+... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/gpt/conformer/attention.py |
Document functions with clear intent | import functools
import torch
import torch.nn as nn
import torch.nn.functional as F
import transformers
from transformers import GPT2Config, LogitsProcessorList
from indextts.gpt.transformers_gpt2 import GPT2PreTrainedModel, GPT2Model
# from transformers import GPT2Config, GPT2PreTrainedModel, LogitsProcessorList
fr... | --- +++ @@ -24,6 +24,9 @@
class ResBlock(nn.Module):
+ """
+ Basic residual convolutional block that uses GroupNorm.
+ """
def __init__(self, chan):
super().__init__()
@@ -195,6 +198,11 @@
@staticmethod
def _reorder_cache(past, beam_idx):
+ """
+ This function is us... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/gpt/model.py |
Add docstrings for utility scripts | import os
os.environ['HF_HUB_CACHE'] = './checkpoints/hf_cache'
import time
from subprocess import CalledProcessError
from typing import Dict, List
import torch
import torchaudio
from torch.nn.utils.rnn import pad_sequence
from omegaconf import OmegaConf
from tqdm import tqdm
import warnings
warnings.filterwarnings... | --- +++ @@ -29,6 +29,14 @@ self, cfg_path="checkpoints/config.yaml", model_dir="checkpoints", use_fp16=True, device=None,
use_cuda_kernel=None,
):
+ """
+ Args:
+ cfg_path (str): path to the config file.
+ model_dir (str): path to the model directory.
+ ... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/infer.py |
Help me add docstrings to my project | # Copyright (c) 2021 Mobvoi Inc (Binbin Zhang, Di Wu)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or... | --- +++ @@ -14,6 +14,7 @@ # Modified from ESPnet(https://github.com/espnet/espnet)
+"""Subsampling layer definition."""
from typing import Tuple, Union
@@ -32,8 +33,17 @@
class LinearNoSubsampling(BaseSubsampling):
- def __init__(self, idim: int, odim: int, dropout_rate: float,
- pos_enc... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/gpt/conformer/subsampling.py |
Add well-formatted docstrings | import functools
import torch
import torch.nn as nn
import torch.nn.functional as F
import transformers
from transformers import GPT2Config, LogitsProcessorList
from indextts.gpt.transformers_gpt2 import GPT2PreTrainedModel, GPT2Model
# from transformers import GPT2Config, GPT2PreTrainedModel, LogitsProcessorList
fr... | --- +++ @@ -24,6 +24,9 @@
class ResBlock(nn.Module):
+ """
+ Basic residual convolutional block that uses GroupNorm.
+ """
def __init__(self, chan):
super().__init__()
@@ -195,6 +198,11 @@
@staticmethod
def _reorder_cache(past, beam_idx):
+ """
+ This function is us... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/gpt/model_v2.py |
Add docstrings including usage examples |
from typing import Optional, Tuple
import torch
import torch.nn as nn
from indextts.gpt.conformer.attention import (MultiHeadedAttention,
RelPositionMultiHeadedAttention)
from indextts.gpt.conformer.embedding import (NoPositionalEncoding,
... | --- +++ @@ -18,12 +18,24 @@
class PositionwiseFeedForward(torch.nn.Module):
+ """Positionwise feed forward layer.
+
+ FeedForward are appied on each position of the sequence.
+ The output dim is same with the input dim.
+
+ Args:
+ idim (int): Input dimenstion.
+ hidden_units (int): The nu... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/gpt/conformer_encoder.py |
Provide docstrings following PEP 257 | # Copyright (c) 2020 Mobvoi Inc. (authors: Binbin Zhang, Di Wu)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by appli... | --- +++ @@ -13,6 +13,7 @@ # limitations under the License.
# Modified from ESPnet(https://github.com/espnet/espnet)
+"""Positonal Encoding Module."""
import math
from typing import Tuple, Union
@@ -22,11 +23,21 @@
class PositionalEncoding(torch.nn.Module):
+ """Positional encoding.
+
+ :param int d_mod... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/gpt/conformer/embedding.py |
Add docstrings including usage examples | from typing import Union
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
from torch.nn.utils import weight_norm
from indextts.s2mel.dac.nn.layers import WNConv1d
class VectorQuantizeLegacy(nn.Module):
def __init__(self, input_dim: int, codebook_... | --- +++ @@ -10,6 +10,11 @@ from indextts.s2mel.dac.nn.layers import WNConv1d
class VectorQuantizeLegacy(nn.Module):
+ """
+ Implementation of VQ similar to Karpathy's repo:
+ https://github.com/karpathy/deep-vector-quantization
+ removed in-out projection
+ """
def __init__(self, input_dim: int... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/s2mel/dac/nn/quantize.py |
Add docstrings including usage examples | from typing import Dict, Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from transformers import PretrainedConfig
from transformers.activations import ACT2FN
from transformers.modeling_attn_mask_utils import AttentionMaskConverter
from transformers.modeling_outputs import BaseM... | --- +++ @@ -86,6 +86,7 @@
# Copied from transformers.models.llama.modeling_llama.rotate_half
def rotate_half(x):
+ """Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
@@ -93,6 +94,23 @@
# Copied from ... | https://raw.githubusercontent.com/datalab-to/surya/HEAD/surya/common/adetr/decoder.py |
Add docstrings to make code maintainable | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import math
import typing as tp
import warnings
import torch
from torch import nn
from torch.nn import functional as F
... | --- +++ @@ -4,6 +4,7 @@ # This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
+"""Convolutional layers wrappers and utilities."""
import math
import typing as tp
@@ -20,6 +21,10 @@
class ConvLayerNorm(nn.LayerNorm):
+ """
+ Convolution-f... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/s2mel/dac/model/encodec.py |
Create docstrings for each class method | from typing import Callable, List, Optional, Tuple, Union
import torch
from torch import nn
from transformers.activations import ACT2FN
from transformers.cache_utils import (
Cache,
)
from transformers.modeling_flash_attention_utils import FlashAttentionKwargs
from transformers.modeling_outputs import (
BaseM... | --- +++ @@ -42,12 +42,32 @@
def rotate_half(x):
+ """Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
+ """Applies ... | https://raw.githubusercontent.com/datalab-to/surya/HEAD/surya/common/surya/decoder/__init__.py |
Add inline docstrings for readability | import torch
from torch import nn
import math
from modules.gpt_fast.model import ModelArgs, Transformer
# from modules.torchscript_modules.gpt_fast_model import ModelArgs, Transformer
from modules.wavenet import WN
from modules.commons import sequence_mask
from torch.nn.utils import weight_norm
def modulate(x, shift... | --- +++ @@ -18,6 +18,9 @@ #################################################################################
class TimestepEmbedder(nn.Module):
+ """
+ Embeds scalar timesteps into vector representations.
+ """
def __init__(self, hidden_size, frequency_embedding_size=256):
super().__init__()
... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/s2mel/modules/.ipynb_checkpoints/diffusion_transformer-checkpoint.py |
Add clean documentation to messy code | import os
from subprocess import CalledProcessError
os.environ['HF_HUB_CACHE'] = './checkpoints/hf_cache'
import json
import re
import time
import librosa
import torch
import torchaudio
from torch.nn.utils.rnn import pad_sequence
import warnings
warnings.filterwarnings("ignore", category=FutureWarning)
warnings.filt... | --- +++ @@ -40,6 +40,17 @@ self, cfg_path="checkpoints/config.yaml", model_dir="checkpoints", use_fp16=False, device=None,
use_cuda_kernel=None,use_deepspeed=False, use_accel=False, use_torch_compile=False
):
+ """
+ Args:
+ cfg_path (str): path to the config file.... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/infer_v2.py |
Generate docstrings with parameter types | import math
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
from munch import Munch
import json
import argparse
from torch.nn.parallel import DistributedDataParallel as DDP
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "... | --- +++ @@ -47,6 +47,7 @@
def kl_divergence(m_p, logs_p, m_q, logs_q):
+ """KL(P||Q)"""
kl = (logs_q - logs_p) - 0.5
kl += (
0.5 * (torch.exp(2.0 * logs_p) + ((m_p - m_q) ** 2)) * torch.exp(-2.0 * logs_q)
@@ -55,6 +56,7 @@
def rand_gumbel(shape):
+ """Sample from the Gumbel distribution... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/s2mel/modules/.ipynb_checkpoints/commons-checkpoint.py |
Expand my code with proper documentation strings | from abc import ABC
import torch
import torch.nn.functional as F
from modules.diffusion_transformer import DiT
from modules.commons import sequence_mask
from tqdm import tqdm
class BASECFM(torch.nn.Module, ABC):
def __init__(
self,
args,
):
super().__init__()
self.sigma_min =... | --- +++ @@ -29,6 +29,25 @@
@torch.inference_mode()
def inference(self, mu, x_lens, prompt, style, f0, n_timesteps, temperature=1.0, inference_cfg_rate=0.5):
+ """Forward diffusion
+
+ Args:
+ mu (torch.Tensor): semantic info of reference audio and altered audio
+ shape... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/s2mel/modules/.ipynb_checkpoints/flow_matching-checkpoint.py |
Write Python docstrings for this snippet | # Implementation adapted from https://github.com/EdwardDixon/snake under the MIT license.
# LICENSE is in incl_licenses directory.
import torch
from torch import nn, sin, pow
from torch.nn import Parameter
class Snake(nn.Module):
def __init__(self, in_features, alpha=1.0, alpha_trainable=True, alpha_logscale=F... | --- +++ @@ -7,7 +7,30 @@
class Snake(nn.Module):
+ '''
+ Implementation of a sine-based periodic activation function
+ Shape:
+ - Input: (B, C, T)
+ - Output: (B, C, T), same shape as the input
+ Parameters:
+ - alpha - trainable parameter
+ References:
+ - This activation... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/s2mel/modules/bigvgan/activations.py |
Write documentation strings for class attributes | import typing
from typing import List
import torch
import torch.nn.functional as F
from audiotools import AudioSignal
from audiotools import STFTParams
from torch import nn
class L1Loss(nn.L1Loss):
def __init__(self, attribute: str = "audio_data", weight: float = 1.0, **kwargs):
self.attribute = attribu... | --- +++ @@ -9,6 +9,19 @@
class L1Loss(nn.L1Loss):
+ """L1 Loss between AudioSignals. Defaults
+ to comparing ``audio_data``, but any
+ attribute of an AudioSignal can be used.
+
+ Parameters
+ ----------
+ attribute : str, optional
+ Attribute of signal to compare, defaults to ``audio_data`... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/s2mel/dac/nn/loss.py |
Create structured documentation for my script | # Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0
# LICENSE is in incl_licenses directory.
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
if "sinc" in dir(torch):
sinc = torch.sinc
else:
# This code is adopted from adefossez's julius.c... | --- +++ @@ -13,6 +13,10 @@ # https://adefossez.github.io/julius/julius/core.html
# LICENSE is in incl_licenses directory.
def sinc(x: torch.Tensor):
+ """
+ Implementation of sinc, i.e. sin(pi * x) / (pi * x)
+ __Warning__: Different to julius.sinc, the input is multiplied by `pi`!
... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/s2mel/modules/bigvgan/alias_free_activation/torch/filter.py |
Document classes and their methods | # Copyright (c) 2024 NVIDIA CORPORATION.
# Licensed under the MIT license.
# Adapted from https://github.com/jik876/hifi-gan under the MIT license.
# LICENSE is in incl_licenses directory.
import math
import os
import random
import torch
import torch.utils.data
import numpy as np
from librosa.util import normaliz... | --- +++ @@ -67,6 +67,24 @@ fmax: int = None,
center: bool = False,
) -> torch.Tensor:
+ """
+ Calculate the mel spectrogram of an input signal.
+ This function uses slaney norm for the librosa mel filterbank (using librosa.filters.mel) and uses Hann window for STFT (using torch.stft).
+
+ Args:
+ ... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/s2mel/modules/bigvgan/meldataset.py |
Add concise docstrings to each method | import math
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
from munch import Munch
import json
import argparse
from torch.nn.parallel import DistributedDataParallel as DDP
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "... | --- +++ @@ -47,6 +47,7 @@
def kl_divergence(m_p, logs_p, m_q, logs_q):
+ """KL(P||Q)"""
kl = (logs_q - logs_p) - 0.5
kl += (
0.5 * (torch.exp(2.0 * logs_p) + ((m_p - m_q) ** 2)) * torch.exp(-2.0 * logs_q)
@@ -55,6 +56,7 @@
def rand_gumbel(shape):
+ """Sample from the Gumbel distribution... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/s2mel/modules/commons.py |
Add docstrings for internal functions | # Copyright (c) 2024 NVIDIA CORPORATION.
# Licensed under the MIT license.
# Adapted from https://github.com/jik876/hifi-gan under the MIT license.
# LICENSE is in incl_licenses directory.
import os
import json
from pathlib import Path
from typing import Optional, Union, Dict
import torch
import torch.nn as nn
f... | --- +++ @@ -29,6 +29,17 @@
class AMPBlock1(torch.nn.Module):
+ """
+ AMPBlock applies Snake / SnakeBeta activation functions with trainable parameters that control periodicity, defined for each layer.
+ AMPBlock1 has additional self.convs2 that contains additional Conv1d layers with a fixed dilation=1 foll... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/s2mel/modules/bigvgan/bigvgan.py |
Generate documentation strings for clarity | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import math
import typing as tp
import warnings
import torch
from torch import nn
from torch.nn import functional as F
... | --- +++ @@ -4,6 +4,7 @@ # This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
+"""Convolutional layers wrappers and utilities."""
import math
import typing as tp
@@ -20,6 +21,10 @@
class ConvLayerNorm(nn.LayerNorm):
+ """
+ Convolution-f... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/s2mel/modules/encodec.py |
Add docstrings following best practices | import torch
from torch import nn
import math
from indextts.s2mel.modules.gpt_fast.model import ModelArgs, Transformer
from indextts.s2mel.modules.wavenet import WN
from indextts.s2mel.modules.commons import sequence_mask
from torch.nn.utils import weight_norm
def modulate(x, shift, scale):
return x * (1 + scale... | --- +++ @@ -17,6 +17,9 @@ #################################################################################
class TimestepEmbedder(nn.Module):
+ """
+ Embeds scalar timesteps into vector representations.
+ """
def __init__(self, hidden_size, frequency_embedding_size=256):
super().__init__()
... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/s2mel/modules/diffusion_transformer.py |
Add docstrings to existing functions | import math
import torch
from torch import nn
from typing import Optional, Any
from torch import Tensor
import torch.nn.functional as F
import torchaudio
import torchaudio.functional as audio_F
import random
random.seed(0)
def _get_activation_fn(activ):
if activ == 'relu':
return nn.ReLU()
elif activ... | --- +++ @@ -165,6 +165,16 @@
def get_alignment_energies(self, query, processed_memory,
attention_weights_cat):
+ """
+ PARAMS
+ ------
+ query: decoder output (batch, n_mel_channels * n_frames_per_step)
+ processed_memory: processed encoder output... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/s2mel/modules/layers.py |
Write clean docstrings for readability | # Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu, Kai Hu)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applica... | --- +++ @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and
# limitations under the License.
+"""HIFI-GAN"""
import typing as tp
import numpy as np
@@ -37,7 +38,30 @@
"""
class Snake(nn.Module):
+ '''
+ Implementation of a sine-based periodic activation function
+ ... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/s2mel/modules/hifigan/generator.py |
Write docstrings including parameters and return values | import math
import torch
from torch import nn
from torch.nn import functional as F
from . import commons
import logging
logger = logging.getLogger(__name__)
class LayerNorm(nn.Module):
def __init__(self, channels, eps=1e-5):
super().__init__()
self.channels = channels
self.eps = eps
... | --- +++ @@ -182,6 +182,10 @@ self.norm_layers_2.append(LayerNorm(hidden_channels))
def forward(self, x, x_mask, h, h_mask):
+ """
+ x: decoder input
+ h: encoder output
+ """
self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(
device=x.device... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/s2mel/modules/openvoice/attentions.py |
Create structured documentation for my script | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import sys
import time
from pathlib import Path
from typing import Optional, Tuple
import torch
import to... | --- +++ @@ -146,6 +146,9 @@ callback = lambda x: x,
**sampling_kwargs
) -> torch.Tensor:
+ """
+ Takes a conditioning sequence (prompt) as input and continues to generate as many tokens as requested.
+ """
is_speculative = draft_model is not None
# create an empty tensor of the expected f... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/s2mel/modules/gpt_fast/generate.py |
Write proper docstrings for these functions | import torch
import torch.utils.data
from librosa.filters import mel as librosa_mel_fn
MAX_WAV_VALUE = 32768.0
def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
return torch.log(torch.clamp(x, min=clip_val) * C)
def dynamic_range_decompression_torch(x, C=1):
return torch.exp(x) / C
def spectral... | --- +++ @@ -6,10 +6,20 @@
def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
+ """
+ PARAMS
+ ------
+ C: compression factor
+ """
return torch.log(torch.clamp(x, min=clip_val) * C)
def dynamic_range_decompression_torch(x, C=1):
+ """
+ PARAMS
+ ------
+ C: compression ... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/s2mel/modules/openvoice/mel_processing.py |
Add docstrings to make code maintainable | from abc import ABC
import torch
import torch.nn.functional as F
from indextts.s2mel.modules.diffusion_transformer import DiT
from indextts.s2mel.modules.commons import sequence_mask
from tqdm import tqdm
class BASECFM(torch.nn.Module, ABC):
def __init__(
self,
args,
):
super().__ini... | --- +++ @@ -29,6 +29,25 @@
@torch.inference_mode()
def inference(self, mu, x_lens, prompt, style, f0, n_timesteps, temperature=1.0, inference_cfg_rate=0.5):
+ """Forward diffusion
+
+ Args:
+ mu (torch.Tensor): semantic info of reference audio and altered audio
+ shape... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/s2mel/modules/flow_matching.py |
Create Google-style docstrings for my code | from typing import Optional
import torch
from torch import nn
from torchaudio.functional.functional import _hz_to_mel, _mel_to_hz
from .spectral_ops import IMDCT, ISTFT
from .modules import symexp
class FourierHead(nn.Module):
def forward(self, x: torch.Tensor) -> torch.Tensor:
raise NotImplementedErro... | --- +++ @@ -9,12 +9,31 @@
class FourierHead(nn.Module):
+ """Base class for inverse fourier modules."""
def forward(self, x: torch.Tensor) -> torch.Tensor:
+ """
+ Args:
+ x (Tensor): Input tensor of shape (B, L, H), where B is the batch size,
+ L is the seq... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/s2mel/modules/vocos/heads.py |
Create docstrings for reusable components | from dac.nn.quantize import ResidualVectorQuantize
from torch import nn
from modules.wavenet import WN
import torch
import torchaudio
import torchaudio.functional as audio_F
import numpy as np
from .alias_free_torch import *
from torch.nn.utils import weight_norm
from torch import nn, sin, pow
from einops.layers.torch ... | --- +++ @@ -25,10 +25,36 @@ return weight_norm(nn.ConvTranspose1d(*args, **kwargs))
class SnakeBeta(nn.Module):
+ """
+ A modified Snake function which uses separate parameters for the magnitude of the periodic components
+ Shape:
+ - Input: (B, C, T)
+ - Output: (B, C, T), same shape as t... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/s2mel/modules/quantize.py |
Fill in missing docstrings in my code | from io import BytesIO
import os
from typing import List, Optional, Tuple
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from librosa.util import normalize, pad_center, tiny
from scipy.signal import get_window
import logging
logger = logging.getLogger(__name__)
class STFT(tor... | --- +++ @@ -18,6 +18,21 @@ def __init__(
self, filter_length=1024, hop_length=512, win_length=None, window="hann"
):
+ """
+ This module implements an STFT using 1D convolution and 1D transpose convolutions.
+ This is a bit tricky so there are some cases that probably won't work a... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/s2mel/modules/rmvpe.py |
Document this module using docstrings | from typing import Optional
import torch
from torch import nn
from torch.nn.utils import weight_norm
from .modules import ConvNeXtBlock, ResBlock1, AdaLayerNorm
class Backbone(nn.Module):
def forward(self, x: torch.Tensor, **kwargs) -> torch.Tensor:
raise NotImplementedError("Subclasses must implement ... | --- +++ @@ -8,12 +8,34 @@
class Backbone(nn.Module):
+ """Base class for the generator's backbone. It preserves the same temporal resolution across all layers."""
def forward(self, x: torch.Tensor, **kwargs) -> torch.Tensor:
+ """
+ Args:
+ x (Tensor): Input tensor of shape (B, C,... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/s2mel/modules/vocos/models.py |
Create structured documentation for my script | import matplotlib
import numpy as np
import torch
from matplotlib import pyplot as plt
from pytorch_lightning import Callback
matplotlib.use("Agg")
def save_figure_to_numpy(fig: plt.Figure) -> np.ndarray:
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep="")
data = data.reshape(fig.canvas.g... | --- +++ @@ -8,12 +8,30 @@
def save_figure_to_numpy(fig: plt.Figure) -> np.ndarray:
+ """
+ Save a matplotlib figure to a numpy array.
+
+ Args:
+ fig (Figure): Matplotlib figure object.
+
+ Returns:
+ ndarray: Numpy array representing the figure.
+ """
data = np.fromstring(fig.canv... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/s2mel/modules/vocos/helpers.py |
Generate docstrings for exported functions | import re
import json
import numpy as np
def get_hparams_from_file(config_path):
with open(config_path, "r", encoding="utf-8") as f:
data = f.read()
config = json.loads(data)
hparams = HParams(**config)
return hparams
class HParams:
def __init__(self, **kwargs):
for k, v in kwarg... | --- +++ @@ -83,6 +83,14 @@ return segments
def split_segments_latin(text, min_len=10):
+ """Split Long sentences into list of short segments.
+
+ Args:
+ str: Input sentences.
+
+ Returns:
+ List[str]: list of output segments.
+ """
# deal with dirty text characters
text = re.... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/s2mel/modules/openvoice/utils.py |
Add well-formatted docstrings | from typing import List, Tuple
import torch
import torchaudio
from torch import nn
from vocos.modules import safe_log
class MelSpecReconstructionLoss(nn.Module):
def __init__(
self, sample_rate: int = 24000, n_fft: int = 1024, hop_length: int = 256, n_mels: int = 100,
):
super().__init__()
... | --- +++ @@ -8,6 +8,9 @@
class MelSpecReconstructionLoss(nn.Module):
+ """
+ L1 distance between the mel-scaled magnitude spectrograms of the ground truth sample and the generated sample
+ """
def __init__(
self, sample_rate: int = 24000, n_fft: int = 1024, hop_length: int = 256, n_mels: int... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/s2mel/modules/vocos/loss.py |
Add docstrings that explain purpose and usage | import numpy as np
import scipy
import torch
from torch import nn, view_as_real, view_as_complex
class ISTFT(nn.Module):
def __init__(self, n_fft: int, hop_length: int, win_length: int, padding: str = "same"):
super().__init__()
if padding not in ["center", "same"]:
raise ValueError("... | --- +++ @@ -5,6 +5,19 @@
class ISTFT(nn.Module):
+ """
+ Custom implementation of ISTFT since torch.istft doesn't allow custom padding (other than `center=True`) with
+ windowing. This is because the NOLA (Nonzero Overlap Add) check fails at the edges.
+ See issue: https://github.com/pytorch/pytorch/iss... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/s2mel/modules/vocos/spectral_ops.py |
Add docstrings following best practices | from typing import Optional, Tuple
import torch
from torch import nn
from torch.nn.utils import weight_norm, remove_weight_norm
class ConvNeXtBlock(nn.Module):
def __init__(
self,
dim: int,
intermediate_dim: int,
layer_scale_init_value: float,
adanorm_num_embeddings: Opti... | --- +++ @@ -6,6 +6,16 @@
class ConvNeXtBlock(nn.Module):
+ """ConvNeXt Block adapted from https://github.com/facebookresearch/ConvNeXt to 1D audio signal.
+
+ Args:
+ dim (int): Number of input channels.
+ intermediate_dim (int): Dimensionality of the intermediate layer.
+ layer_scale_ini... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/s2mel/modules/vocos/modules.py |
Create docstrings for reusable components | from __future__ import annotations
from typing import Any, Dict, Tuple, Union, Optional
import torch
import yaml
from torch import nn
from .heads import ISTFTHead
from .models import VocosBackbone
class Vocos(nn.Module):
def __init__(
self, args,
):
super().__init__()
self.backbone ... | --- +++ @@ -10,6 +10,12 @@
class Vocos(nn.Module):
+ """
+ The Vocos class represents a Fourier-based neural vocoder for audio synthesis.
+ This class is primarily designed for inference, with support for loading from pretrained
+ model checkpoints. It consists of three main components: a feature extrac... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/s2mel/modules/vocos/pretrained.py |
Write docstrings describing each step | from transformers import SeamlessM4TFeatureExtractor
from transformers import Wav2Vec2BertModel
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import librosa
import os
import pickle
import math
import json
import safetensors
import json5
# from codec.kmeans.repcodec_model import R... | --- +++ @@ -47,6 +47,15 @@
def _load_config(config_fn, lowercase=False):
+ """Load configurations into a dictionary
+
+ Args:
+ config_fn (str): path to configuration file
+ lowercase (bool, optional): whether changing keys to lower case. Defaults to False.
+
+ Returns:
+ dict: diction... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/s2mel/wav2vecbert_extract.py |
Add inline docstrings for readability | import html
import re
from typing import List, Union, Dict, Optional, Tuple, Iterable
import numpy as np
import torch
from tokenizers import AddedToken
import json
import os
from transformers import PreTrainedTokenizer, Qwen2Tokenizer as Qwen2OriginalTokenizer
from surya.common.s3 import S3DownloaderMixin
from surya.... | --- +++ @@ -29,6 +29,16 @@ pass
class GreedyMathUTF16Tokenizer(S3DownloaderMixin, PreTrainedTokenizer):
+ """
+ HuggingFace slow tokenizer implementing:
+ - UTF-16 code units as the base [0..65535]
+ - Math tokens as greedy-longest-match ids after UTF-16
+ - Literal special tokens after math... | https://raw.githubusercontent.com/datalab-to/surya/HEAD/surya/common/surya/processor/tokenizer.py |
Add inline docstrings for readability | import os
import random
import re
import torch
import torchaudio
MATPLOTLIB_FLAG = False
def load_audio(audiopath, sampling_rate):
audio, sr = torchaudio.load(audiopath)
# print(f"wave shape: {audio.shape}, sample_rate: {sr}")
if audio.size(0) > 1: # mix to mono
audio = audio[0].unsqueeze(0)
... | --- +++ @@ -27,6 +27,22 @@
def tokenize_by_CJK_char(line: str, do_upper_case=True) -> str:
+ """
+ Tokenize a line of text with CJK char.
+
+ Note: All return charaters will be upper case.
+
+ Example:
+ input = "你好世界是 hello world 的中文"
+ output = "你 好 世 界 是 HELLO WORLD 的 中 文"
+
+ Args:
+ ... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/utils/common.py |
Document this script properly |
import logging
import math
from typing import Tuple
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchaudio
class SincConv(nn.Module):
def __init__(
self,
out_channels,
kernel_size,
input_shape=None,
in_channels=None,
... | --- +++ @@ -1,3 +1,13 @@+"""Library implementing convolutional neural networks.
+
+Authors
+ * Mirco Ravanelli 2020
+ * Jianyuan Zhong 2020
+ * Cem Subakan 2021
+ * Davide Borra 2021
+ * Andreas Nautsch 2022
+ * Sarthak Yadav 2022
+"""
import logging
import math
@@ -11,6 +21,49 @@
class SincConv(nn.Module):
+ ... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/BigVGAN/nnet/CNN.py |
Document this module using docstrings | # -*- coding: utf-8 -*-
from functools import lru_cache
import os
import traceback
import re
from typing import List, Union, overload
import warnings
from indextts.utils.common import tokenize_by_CJK_char, de_tokenized_by_CJK_char
from sentencepiece import SentencePieceProcessor
class TextNormalizer:
def __init__... | --- +++ @@ -183,6 +183,10 @@ return result
def correct_pinyin(self, pinyin: str):
+ """
+ 将 jqx 的韵母为 u/ü 的拼音转换为 v
+ 如:ju -> jv , que -> qve, xün -> xvn
+ """
if pinyin[0] not in "jqxJQX":
return pinyin
# 匹配 jqx 的韵母为 u/ü 的拼音
@@ -192,6 +196,10 @@ ... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/utils/front.py |
Add detailed docstrings explaining each function | # Copyright (c) 2024 Amphion.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional, Tuple
import numpy as np
import scipy
import torch
from torch import nn, view_as_real, view_as_complex
from torch import nn
from torc... | --- +++ @@ -16,6 +16,16 @@
def safe_log(x: torch.Tensor, clip_val: float = 1e-7) -> torch.Tensor:
+ """
+ Computes the element-wise logarithm of the input tensor with clipping to avoid near-zero values.
+
+ Args:
+ x (Tensor): Input tensor.
+ clip_val (float, optional): Minimum value to clip ... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/utils/maskgct/models/codec/amphion_codec/vocos.py |
Document all public functions with docstrings | # Copyright (c) 2023 Amphion.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import random
import numpy as np
import torchaudio
import librosa
from torch.nn import functional as F
from torch.nn.utils.rnn import pad_sequence
... | --- +++ @@ -19,6 +19,12 @@
class FAcodecDataset(torch.utils.data.Dataset):
def __init__(self, cfg, dataset, is_valid=False):
+ """
+ Args:
+ cfg: config
+ dataset: dataset name
+ is_valid: whether to use train or valid dataset
+ """
self.data_root_di... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/utils/maskgct/models/codec/facodec/facodec_dataset.py |
Include argument descriptions in docstrings | # Copyright (c) 2023 Amphion.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import random
from pathlib import Path
import re
import accelerate
import json5
import numpy as np
import torch
from accelerate.utils import ProjectConf... | --- +++ @@ -24,6 +24,7 @@ super().__init__()
def _init_accelerator(self):
+ """Initialize the accelerator components."""
self.exp_dir = os.path.join(
os.path.abspath(self.cfg.log_dir), self.args.exp_name
)
@@ -51,6 +52,7 @@ pass
def _build_dataloader(s... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/utils/maskgct/models/codec/codec_trainer.py |
Document my Python code with docstrings | # Copyright (c) 2024 Amphion.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Union
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
from torch.nn.utils im... | --- +++ @@ -20,6 +20,10 @@
class ResidualVQ(nn.Module):
+ """
+ Introduced in SoundStream: An end2end neural audio codec
+ https://arxiv.org/abs/2107.03312
+ """
def __init__(
self,
@@ -62,6 +66,26 @@ )
def forward(self, z, n_quantizers: int = None):
+ """
+ ... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/utils/maskgct/models/codec/amphion_codec/quantize/residual_vq.py |
Document this code for team use | # Copyright (c) 2023 Amphion.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import torch
import json
import json5
import time
import accelerate
import random
import numpy as np
import shutil
from pathlib import Path
from tqdm im... | --- +++ @@ -246,6 +246,7 @@ return model
def _build_dataloader(self):
+ """Build dataloader which merges a series of datasets."""
Dataset, Collator = self._build_test_dataset()
datasets_list = []
@@ -267,6 +268,11 @@ return test_dataloader
def _load_model(self, c... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/utils/maskgct/models/codec/codec_inference.py |
Generate consistent docstrings | # Copyright (c) 2023 Amphion.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Iterable
import torch
import numpy as np
import torch.utils.data
from torch.nn.utils.rnn import pad_sequence
from utils.data_utils import *
from... | --- +++ @@ -14,6 +14,12 @@
class CodecDataset(torch.utils.data.Dataset):
def __init__(self, cfg, dataset, is_valid=False):
+ """
+ Args:
+ cfg: config
+ dataset: dataset name
+ is_valid: whether to use train or valid dataset
+ """
assert isinstance(d... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/utils/maskgct/models/codec/codec_dataset.py |
Provide docstrings following PEP 257 | # coding=utf-8
# Copyright 2020 The HuggingFace Inc. team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable ... | --- +++ @@ -89,6 +89,10 @@
class BeamScorer(ABC):
+ """
+ Abstract base class for all beam scorers that are used for [`~PreTrainedModel.beam_search`] and
+ [`~PreTrainedModel.beam_sample`].
+ """
@abstractmethod
@add_start_docstrings(PROCESS_INPUTS_DOCSTRING)
@@ -117,6 +121,43 @@
class B... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/gpt/transformers_beam_search.py |
Add docstrings to improve readability | import math
from dataclasses import dataclass
from pathlib import Path
from typing import Union
import numpy as np
import torch
import tqdm
from audiotools import AudioSignal
from torch import nn
SUPPORTED_VERSIONS = ["1.0.0"]
@dataclass
class DACFile:
codes: torch.Tensor
# Metadata
chunk_length: int
... | --- +++ @@ -131,6 +131,27 @@ normalize_db: float = -16,
n_quantizers: int = None,
) -> DACFile:
+ """Processes an audio signal from a file or AudioSignal object into
+ discrete codes. This function processes the signal in short windows,
+ using constant GPU memory.
+
+ ... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/s2mel/dac/model/base.py |
Can you add docstrings to this Python file? | import torch
import torch.nn as nn
import torch.nn.functional as F
from audiotools import AudioSignal
from audiotools import ml
from audiotools import STFTParams
from einops import rearrange
from torch.nn.utils import weight_norm
def WNConv1d(*args, **kwargs):
act = kwargs.pop("act", True)
conv = weight_norm(... | --- +++ @@ -106,6 +106,18 @@ sample_rate: int = 44100,
bands: list = BANDS,
):
+ """Complex multi-band spectrogram discriminator.
+ Parameters
+ ----------
+ window_length : int
+ Window length of STFT.
+ hop_factor : float, optional
+ Hop f... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/s2mel/dac/model/discriminator.py |
Fill in missing docstrings in my code | # Copyright (c) 2023 Amphion.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from modules.dac.nn.quantize import ResidualVectorQuantize
from torch import nn
from .wavenet import WN
from .style_encoder import StyleEncoder
from .gradient_reve... | --- +++ @@ -34,10 +34,36 @@
class SnakeBeta(nn.Module):
+ """
+ A modified Snake function which uses separate parameters for the magnitude of the periodic components
+ Shape:
+ - Input: (B, C, T)
+ - Output: (B, C, T), same shape as the input
+ Parameters:
+ - alpha - trainable para... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/utils/maskgct/models/codec/facodec/modules/quantize.py |
Generate docstrings for script automation | # coding=utf-8
# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License... | --- +++ @@ -13,6 +13,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+"""PyTorch OpenAI GPT-2 model."""
import math
import os
@@ -70,6 +71,7 @@
def load_tf_weights_in_gpt2(mod... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/gpt/transformers_gpt2.py |
Document this script properly | # Copyright (c) 2023 Amphion.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import os.path
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
from munch import Munch
import json
class ... | --- +++ @@ -44,6 +44,7 @@
def kl_divergence(m_p, logs_p, m_q, logs_q):
+ """KL(P||Q)"""
kl = (logs_q - logs_p) - 0.5
kl += (
0.5 * (torch.exp(2.0 * logs_p) + ((m_p - m_q) ** 2)) * torch.exp(-2.0 * logs_q)
@@ -52,6 +53,7 @@
def rand_gumbel(shape):
+ """Sample from the Gumbel distribution... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/utils/maskgct/models/codec/facodec/modules/commons.py |
Add docstrings to improve collaboration | # Copyright (c) 2024 Amphion.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional, Tuple
import numpy as np
import scipy
import torch
from torch import nn, view_as_real, view_as_complex
from torch import nn
from torc... | --- +++ @@ -15,6 +15,16 @@
def safe_log(x: torch.Tensor, clip_val: float = 1e-7) -> torch.Tensor:
+ """
+ Computes the element-wise logarithm of the input tensor with clipping to avoid near-zero values.
+
+ Args:
+ x (Tensor): Input tensor.
+ clip_val (float, optional): Minimum value to clip ... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/utils/maskgct/models/codec/kmeans/vocos.py |
Add well-formatted docstrings | # Copyright (c) 2023 Amphion.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
from torch import nn
from typing import Optional, Any
from torch import Tensor
import torch.nn.functional as F
import torchaudio
import to... | --- +++ @@ -245,6 +245,16 @@ self.score_mask_value = -float("inf")
def get_alignment_energies(self, query, processed_memory, attention_weights_cat):
+ """
+ PARAMS
+ ------
+ query: decoder output (batch, n_mel_channels * n_frames_per_step)
+ processed_memory: processed... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/utils/maskgct/models/codec/facodec/modules/layers.py |
Document this module using docstrings | # Copyright (c) 2023 Amphion.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This code is modified from https://github.com/sh-lee-prml/HierSpeechpp/blob/main/ttv_v1/attentions.py
import copy
import math
import numpy as np
import torch
fr... | --- +++ @@ -154,6 +154,10 @@ self.norm_layers_2.append(LayerNorm(hidden_channels))
def forward(self, x, x_mask, h, h_mask):
+ """
+ x: decoder input
+ h: encoder output
+ """
self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(
device=x.device... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/utils/maskgct/models/codec/facodec/modules/attentions.py |
Help me comply with documentation standards | import math
from typing import List
from typing import Union
import numpy as np
import torch
from audiotools import AudioSignal
from audiotools.ml import BaseModel
from torch import nn
from .base import CodecMixin
from indextts.s2mel.dac.nn.layers import Snake1d
from indextts.s2mel.dac.nn.layers import WNConv1d
from ... | --- +++ @@ -247,6 +247,35 @@ audio_data: torch.Tensor,
n_quantizers: int = None,
):
+ """Encode given audio data and return quantized latent codes
+
+ Parameters
+ ----------
+ audio_data : Tensor[B x 1 x T]
+ Audio data to encode
+ n_quantizers : int,... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/s2mel/dac/model/dac.py |
Document functions with clear intent | # Copyright (c) 2023 Amphion.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This source file is copied from https://github.com/facebookresearch/encodec
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This ... | --- +++ @@ -35,6 +35,7 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
+"""Core vector quantization implementation."""
import typing as tp
from einops import rearrange, repeat
@@ -98,6 +99,20 @@
class EuclideanCodebook(nn.Module):
+ """Codebook with Euclidean... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/utils/maskgct/models/codec/speechtokenizer/modules/quantization/core_vq.py |
Help me document legacy Python code | # Copyright (c) 2023 Amphion.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import time
import random
from pathlib import Path
import re
import glob
import accelerate
import json
import numpy as np
import torch
from accelerate.u... | --- +++ @@ -275,6 +275,7 @@ return optimizer
def train_loop(self):
+ """Training process"""
self.accelerator.wait_for_everyone()
# Dump config
@@ -369,6 +370,9 @@ torch.save(state, save_path)
def _train_epoch(self):
+ """Training epoch. Should return a... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/utils/maskgct/models/codec/facodec/facodec_trainer.py |
Generate docstrings for each module | # Copyright (c) 2023 Amphion.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This source file is copied from https://github.com/facebookresearch/encodec
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This ... | --- +++ @@ -10,6 +10,7 @@ # This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
+"""Convolutional layers wrappers and utilities."""
import math
import typing as tp
@@ -50,6 +51,9 @@ def get_norm_module(
module: nn.Module, causal: bool = False... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/utils/maskgct/models/codec/speechtokenizer/modules/conv.py |
Add docstrings that explain purpose and usage | import math
import torch
import torch.nn as nn
from indextts.utils.xtransformers import RelativePositionBias
def zero_module(module):
for p in module.parameters():
p.detach().zero_()
return module
class GroupNorm32(nn.GroupNorm):
def forward(self, x):
return super().forward(x.float()).... | --- +++ @@ -7,6 +7,9 @@
def zero_module(module):
+ """
+ Zero out the parameters of a module and return it.
+ """
for p in module.parameters():
p.detach().zero_()
return module
@@ -18,6 +21,12 @@
def normalization(channels):
+ """
+ Make a standard normalization layer.
+
+ :... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/utils/arch_util.py |
Add docstrings that explain purpose and usage | # Copyright (c) 2023 Amphion.
#
# This code is modified from https://github.com/ZhangXInFD/SpeechTokenizer/blob/main/speechtokenizer/model.py
# Licensed under Apache License 2.0
from .modules.seanet import SEANetEncoder, SEANetDecoder
from .modules.quantization import ResidualVectorQuantizer
import torch.nn as nn
from... | --- +++ @@ -13,6 +13,14 @@
class SpeechTokenizer(nn.Module):
def __init__(self, config):
+ """
+
+ Parameters
+ ----------
+ config : json
+ Model Config.
+
+ """
super().__init__()
self.encoder = SEANetEncoder(
n_filters=config.get("n_... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/utils/maskgct/models/codec/speechtokenizer/model.py |
Help me write clear docstrings | import torch
import torchaudio
from torch import nn
from indextts.utils.common import safe_log
class FeatureExtractor(nn.Module):
def forward(self, audio: torch.Tensor, **kwargs) -> torch.Tensor:
raise NotImplementedError("Subclasses must implement the forward method.")
class MelSpectrogramFeatures(Fea... | --- +++ @@ -5,8 +5,19 @@
class FeatureExtractor(nn.Module):
+ """Base class for feature extractors."""
def forward(self, audio: torch.Tensor, **kwargs) -> torch.Tensor:
+ """
+ Extract features from the given audio.
+
+ Args:
+ audio (Tensor): Input audio waveform.
+
+ ... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/utils/feature_extractors.py |
Create simple docstrings for beginners | # Copyright (c) 2024 Amphion.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange, repeat
from torch.nn.utils import weight_norm
... | --- +++ @@ -256,6 +256,24 @@
class VectorQuantize(nn.Module):
+ """Vector quantization and factorized vecotor quantization implementation
+ Args:
+ input_dim (int): Dimension of input.
+ codebook_size (int): Codebook size.
+ codebook_dim (int): Codebook dimension. We suggest use codebook_... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/utils/maskgct/models/codec/amphion_codec/quantize/vector_quantize.py |
Help me document legacy Python code | # Copyright (c) 2023 Amphion.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This source file is copied from https://github.com/facebookresearch/encodec
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This ... | --- +++ @@ -10,6 +10,7 @@ # This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
+"""Arithmetic coder."""
import io
import math
@@ -27,6 +28,21 @@ min_range: int = 2,
check: bool = True,
) -> torch.Tensor:
+ """Turn the given PDF into ... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/utils/maskgct/models/codec/speechtokenizer/modules/quantization/ac.py |
Fill in missing docstrings in my code | # Copyright (c) 2023 Amphion.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This source file is copied from https://github.com/facebookresearch/encodec
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This ... | --- +++ @@ -10,6 +10,7 @@ # This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
+"""Torch distributed utilities."""
import typing as tp
@@ -61,6 +62,9 @@
def broadcast_tensors(tensors: tp.Iterable[torch.Tensor], src: int = 0):
+ """Broadca... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/utils/maskgct/models/codec/speechtokenizer/modules/quantization/distrib.py |
Document this module using docstrings | # Copyright (c) 2023 Amphion.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
from torch import nn, sin, pow
from torch.nn import Parameter
import torch.nn.functional as F
from torch.nn.utils import weight_nor... | --- +++ @@ -55,10 +55,36 @@
class SnakeBeta(nn.Module):
+ """
+ A modified Snake function which uses separate parameters for the magnitude of the periodic components
+ Shape:
+ - Input: (B, C, T)
+ - Output: (B, C, T), same shape as the input
+ Parameters:
+ - alpha - trainable para... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/utils/maskgct/models/codec/ns3_codec/facodec.py |
Include argument descriptions in docstrings | # Copyright (c) 2024 Amphion.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from transformers import LlamaConfig, LlamaForCausalLM, LlamaModel
import torch
import torch.nn.functional as F
import numpy as np
import os
import torch.nn as nn
... | --- +++ @@ -55,6 +55,7 @@
class LlamaNARDecoderLayer(LlamaDecoderLayer):
def __init__(self, config: LlamaConfig, layer_idx: int):
+ """Override to adaptive layer norm"""
super().__init__(config, layer_idx) # init attention, mlp, etc.
self.input_layernorm = LlamaAdaptiveRMSNorm(
... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/utils/maskgct/models/tts/maskgct/llama_nar.py |
Write beginner-friendly docstrings | import torch
def show_device_list(backend: str) -> int:
backend_upper = backend.upper()
try:
# Get the backend module from PyTorch, e.g., `torch.cuda`.
# NOTE: Backends always exist even if the user has no devices.
backend_module = getattr(torch, backend)
# Determine which v... | --- +++ @@ -2,6 +2,15 @@
def show_device_list(backend: str) -> int:
+ """
+ Displays a list of all detected devices for a given PyTorch backend.
+
+ Args:
+ backend: The name of the device backend module (e.g., "cuda", "xpu").
+
+ Returns:
+ The number of devices found if the backend is us... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/tools/gpu_check.py |
Write docstrings for backend logic | import argparse
import base64
import configparser
import datetime
import json
import os
import re
from collections import namedtuple
import arxiv
import numpy as np
import openai
import requests
import tenacity
import tiktoken
import fitz, io, os
from PIL import Image
class Paper:
def __init__(self, path, title... | --- +++ @@ -59,6 +59,12 @@ return first_page_text
def get_image_path(self, image_path=''):
+ """
+ 将PDF中的第一张图保存到image.png里面,存到本地目录,返回文件名称,供gitee读取
+ :param filename: 图片所在路径,"C:\\Users\\Administrator\\Desktop\\nwd.pdf"
+ :param image_path: 图片提取后的保存路径
+ :return:
+... | https://raw.githubusercontent.com/kaixindelele/ChatPaper/HEAD/chat_paper.py |
Write docstrings for algorithm functions | # Copyright (c) 2023 Amphion.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This source file is copied from https://github.com/facebookresearch/encodec
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This ... | --- +++ @@ -10,6 +10,7 @@ # This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
+"""Encodec SEANet-based encoder and decoder implementation."""
import typing as tp
@@ -39,6 +40,20 @@
class SEANetResnetBlock(nn.Module):
+ """Residual block ... | https://raw.githubusercontent.com/index-tts/index-tts/HEAD/indextts/utils/maskgct/models/codec/speechtokenizer/modules/seanet.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.