repo_id stringlengths 6 101 | size int64 367 5.14M | file_path stringlengths 2 269 | content stringlengths 367 5.14M |
|---|---|---|---|
2881099/dotnetGen_sqlserver | 21,409 | GenMs/FastExcel/Worksheet.cs | using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Linq;
using System.Reflection;
using System.Text;
using System.Threading.Tasks;
using System.IO.Compression;
using System.IO;
using System.Xml.Linq;
namespace FastExcel
{
public class Worksheet
{
/// <summary>
/// Collection of rows in this worksheet
/// </summary>
public IEnumerable<Row> Rows { get; set; }
public IEnumerable<string> Headings { get; set; }
public int Index { get; internal set; }
public string Name { get; set; }
public int ExistingHeadingRows { get; set; }
private int? InsertAfterIndex { get; set; }
public bool Template { get; set; }
internal string Headers { get; set; }
internal string Footers { get; set; }
public FastExcel FastExcel { get; private set; }
internal string FileName
{
get
{
return Worksheet.GetFileName(this.Index);
}
}
public static string GetFileName(int index)
{
return string.Format("xl/worksheets/sheet{0}.xml", index);
}
private const string DEFAULT_HEADERS = "<?xml version=\"1.0\" encoding=\"UTF-8\" ?><worksheet xmlns=\"http://schemas.openxmlformats.org/spreadsheetml/2006/main\"><sheetData>";
private const string DEFAULT_FOOTERS = "</sheetData></worksheet>";
public Worksheet() { }
public Worksheet(FastExcel fastExcel)
{
FastExcel = fastExcel;
}
public void PopulateRows<T>(IEnumerable<T> rows, int existingHeadingRows = 0, bool usePropertiesAsHeadings = false)
{
if ((rows.FirstOrDefault() as IEnumerable<object>) == null)
{
PopulateRowsFromObjects(rows, existingHeadingRows, usePropertiesAsHeadings);
}
else
{
PopulateRowsFromIEnumerable(rows as IEnumerable<IEnumerable<object>>, existingHeadingRows);
}
}
private string GetHeaderName(PropertyInfo propertyInfo)
{
var descriptionAttribute = propertyInfo.GetCustomAttribute(typeof (DescriptionAttribute)) as DescriptionAttribute;
if (descriptionAttribute != null && !string.IsNullOrWhiteSpace(descriptionAttribute.Description))
{
return descriptionAttribute.Description;
}
return propertyInfo.Name;
}
private void PopulateRowsFromObjects<T>(IEnumerable<T> rows, int existingHeadingRows = 0, bool usePropertiesAsHeadings = false)
{
int rowNumber = existingHeadingRows + 1;
// Get all properties
PropertyInfo[] properties = typeof(T).GetTypeInfo().GetProperties();
List<Row> newRows = new List<Row>();
if (usePropertiesAsHeadings)
{
this.Headings = properties.Select(GetHeaderName);
int headingColumnNumber = 1;
IEnumerable<Cell> headingCells = (from h in this.Headings
select new Cell(headingColumnNumber++, h)).ToArray();
Row headingRow = new Row(rowNumber++, headingCells);
newRows.Add(headingRow);
}
foreach (T rowObject in rows)
{
List<Cell> cells = new List<Cell>();
int columnNumber = 1;
// Get value from each property
foreach (PropertyInfo propertyInfo in properties)
{
object value = propertyInfo.GetValue(rowObject, null);
if(value != null)
{
Cell cell = new Cell(columnNumber, value);
cells.Add(cell);
}
columnNumber++;
}
Row row = new Row(rowNumber++, cells);
newRows.Add(row);
}
this.Rows = newRows;
}
private void PopulateRowsFromIEnumerable(IEnumerable<IEnumerable<object>> rows, int existingHeadingRows = 0)
{
int rowNumber = existingHeadingRows + 1;
List<Row> newRows = new List<Row>();
foreach (IEnumerable<object> rowOfObjects in rows)
{
List<Cell> cells = new List<Cell>();
int columnNumber = 1;
foreach (object value in rowOfObjects)
{
if (value != null)
{
Cell cell = new Cell(columnNumber, value);
cells.Add(cell);
}
columnNumber++;
}
Row row = new Row(rowNumber++, cells);
newRows.Add(row);
}
this.Rows = newRows;
}
/// <summary>
/// Add a row using a collection of value objects
/// </summary>
/// <param name="cellValues">Collection of objects</param>
public void AddRow(params object[] cellValues)
{
if (this.Rows == null)
{
this.Rows = new List<Row>();
}
List<Cell> cells = new List<Cell>();
int columnNumber = 1;
foreach (object value in cellValues)
{
if (value != null)
{
Cell cell = new Cell(columnNumber++, value);
cells.Add(cell);
}
else
{
columnNumber++;
}
}
Row row = new Row(this.Rows.Count() + 1, cells);
(this.Rows as List<Row>).Add(row);
}
/// <summary>
/// Note: This method is slow
/// </summary>
public void AddValue(int rowNumber, int columnNumber, object value)
{
if (this.Rows == null)
{
this.Rows = new List<Row>();
}
Row row = (from r in this.Rows
where r.RowNumber == rowNumber
select r).FirstOrDefault();
Cell cell = null;
if (row == null)
{
cell = new Cell(columnNumber, value);
row = new Row(rowNumber, new List<Cell>{ cell });
(this.Rows as List<Row>).Add(row);
}
if (cell == null)
{
cell = (from c in row.Cells
where c.ColumnNumber == columnNumber
select c).FirstOrDefault();
if (cell == null)
{
cell = new Cell(columnNumber, value);
(row.Cells as List<Cell>).Add(cell);
}
}
}
/// <summary>
/// Merges the parameter into the current DatSet object, the parameter takes precedence
/// </summary>
/// <param name="data">A DataSet to merge</param>
public void Merge(Worksheet data)
{
// Merge headings
if (this.Headings == null || !this.Headings.Any())
{
this.Headings = data.Headings;
}
// Merge rows
data.Rows = MergeRows(data.Rows);
}
private IEnumerable<Row> MergeRows(IEnumerable<Row> rows)
{
foreach (var row in this.Rows.Union(rows).GroupBy(r => r.RowNumber))
{
int count = row.Count();
if (count == 1)
{
yield return row.First();
}
else
{
row.First().Merge(row.Skip(1).First());
yield return row.First();
}
}
}
public bool Exists
{
get
{
return !string.IsNullOrEmpty(this.FileName);
}
}
internal void Read(int? sheetNumber = null, string sheetName = null, int existingHeadingRows = 0)
{
GetWorksheetProperties(FastExcel, sheetNumber, sheetName);
Read(existingHeadingRows);
}
public void Read(int existingHeadingRows = 0)
{
FastExcel.CheckFiles();
FastExcel.PrepareArchive();
ExistingHeadingRows = existingHeadingRows;
IEnumerable<Row> rows = null;
List<string> headings = new List<string>();
using (Stream stream = FastExcel.Archive.GetEntry(FileName).Open())
{
XDocument document = XDocument.Load(stream);
int skipRows = 0;
Row possibleHeadingRow = new Row(document.Descendants().Where(d => d.Name.LocalName == "row").FirstOrDefault(), FastExcel.SharedStrings);
if (ExistingHeadingRows == 1 && possibleHeadingRow.RowNumber == 1)
{
foreach (Cell headerCell in possibleHeadingRow.Cells)
{
headings.Add(headerCell.Value.ToString());
}
}
rows = GetRows(document.Descendants().Where(d => d.Name.LocalName == "row").Skip(skipRows));
}
Headings = headings;
Rows = rows;
}
private IEnumerable<Row> GetRows(IEnumerable<XElement> rowElements)
{
foreach (var rowElement in rowElements)
{
yield return new Row(rowElement, FastExcel.SharedStrings);
}
}
/// <summary>
/// Read the existing sheet and copy some of the existing content
/// </summary>
/// <param name="stream">Worksheet stream</param>
/// <param name="worksheet">Saves the header and footer to the worksheet</param>
internal void ReadHeadersAndFooters(StreamReader stream, ref Worksheet worksheet)
{
StringBuilder headers = new StringBuilder();
StringBuilder footers = new StringBuilder();
bool headersComplete = false;
bool rowsComplete = false;
int existingHeadingRows = worksheet.ExistingHeadingRows;
while (stream.Peek() >= 0)
{
string line = stream.ReadLine();
int currentLineIndex = 0;
if (!headersComplete)
{
if (line.Contains("<sheetData/>"))
{
currentLineIndex = line.IndexOf("<sheetData/>");
headers.Append(line.Substring(0, currentLineIndex));
//remove the read section from line
line = line.Substring(currentLineIndex, line.Length - currentLineIndex);
headers.Append("<sheetData>");
// Headers complete now skip any content and start footer
headersComplete = true;
footers = new StringBuilder();
footers.Append("</sheetData>");
//There is no rows
rowsComplete = true;
}
else if (line.Contains("<sheetData>"))
{
currentLineIndex = line.IndexOf("<sheetData>");
headers.Append(line.Substring(0, currentLineIndex));
//remove the read section from line
line = line.Substring(currentLineIndex, line.Length - currentLineIndex);
headers.Append("<sheetData>");
// Headers complete now skip any content and start footer
headersComplete = true;
footers = new StringBuilder();
footers.Append("</sheetData>");
}
else
{
headers.Append(line);
}
}
if (headersComplete && !rowsComplete)
{
if (existingHeadingRows == 0)
{
rowsComplete = true;
}
if (!rowsComplete)
{
while (!string.IsNullOrEmpty(line) && existingHeadingRows != 0)
{
if (line.Contains("<row"))
{
if (line.Contains("</row>"))
{
int index = line.IndexOf("<row");
currentLineIndex = line.IndexOf("</row>") + "</row>".Length;
headers.Append(line.Substring(index, currentLineIndex - index));
//remove the read section from line
line = line.Substring(currentLineIndex, line.Length - currentLineIndex);
existingHeadingRows--;
}
else
{
int index = line.IndexOf("<row");
headers.Append(line.Substring(index, line.Length - index));
line = string.Empty;
}
}
else if (line.Contains("</row>"))
{
currentLineIndex = line.IndexOf("</row>") + "</row>".Length;
headers.Append(line.Substring(0, currentLineIndex));
//remove the read section from line
line = line.Substring(currentLineIndex, line.Length - currentLineIndex);
existingHeadingRows--;
}
}
}
if (existingHeadingRows == 0)
{
rowsComplete = true;
}
}
if (rowsComplete)
{
if (line.Contains("</sheetData>"))
{
int index = line.IndexOf("</sheetData>") + "</sheetData>".Length;
footers.Append(line.Substring(index, line.Length - index));
}
else if (line.Contains("<sheetData/>"))
{
int index = line.IndexOf("<sheetData/>") + "<sheetData/>".Length;
footers.Append(line.Substring(index, line.Length - index));
}
else
{
footers.Append(line);
}
}
}
worksheet.Headers = headers.ToString();
worksheet.Footers = footers.ToString();
}
/// <summary>
/// Get worksheet file name from xl/workbook.xml
/// </summary>
internal void GetWorksheetProperties(FastExcel fastExcel, int? sheetNumber = null, string sheetName = null)
{
GetWorksheetPropertiesAndValidateNewName(fastExcel, sheetNumber, sheetName);
}
private bool GetWorksheetPropertiesAndValidateNewName(FastExcel fastExcel, int? sheetNumber = null, string sheetName = null, string newSheetName = null)
{
FastExcel = fastExcel;
bool newSheetNameExists = false;
FastExcel.CheckFiles();
FastExcel.PrepareArchive();
//If index has already been loaded then we can skip this function
if (this.Index != 0)
{
return true;
}
if (!sheetNumber.HasValue && string.IsNullOrEmpty(sheetName))
{
throw new Exception("No worksheet name or number was specified");
}
using (Stream stream = FastExcel.Archive.GetEntry("xl/workbook.xml").Open())
{
XDocument document = XDocument.Load(stream);
if (document == null)
{
throw new Exception("Unable to load workbook.xml");
}
List<XElement> sheetsElements = document.Descendants().Where(d => d.Name.LocalName == "sheet").ToList();
XElement sheetElement = null;
if (sheetNumber.HasValue)
{
if (sheetNumber.Value <= sheetsElements.Count)
{
sheetElement = sheetsElements[sheetNumber.Value - 1];
}
else
{
throw new Exception(string.Format("There is no sheet at index '{0}'", sheetNumber));
}
}
else if (!string.IsNullOrEmpty(sheetName))
{
sheetElement = (from sheet in sheetsElements
from attribute in sheet.Attributes()
where attribute.Name == "name" && attribute.Value.Equals(sheetName, StringComparison.CurrentCultureIgnoreCase)
select sheet).FirstOrDefault();
if (sheetElement == null)
{
throw new Exception(string.Format("There is no sheet named '{0}'", sheetName));
}
if (!string.IsNullOrEmpty(newSheetName))
{
newSheetNameExists = (from sheet in sheetsElements
from attribute in sheet.Attributes()
where attribute.Name == "name" && attribute.Value.Equals(newSheetName, StringComparison.CurrentCultureIgnoreCase)
select sheet).Any();
if (FastExcel.MaxSheetNumber == 0)
{
FastExcel.MaxSheetNumber = (from sheet in sheetsElements
from attribute in sheet.Attributes()
where attribute.Name == "sheetId"
select int.Parse(attribute.Value)).Max();
}
}
}
this.Index = sheetsElements.IndexOf(sheetElement)+1;
this.Name = (from attribute in sheetElement.Attributes()
where attribute.Name == "name"
select attribute.Value).FirstOrDefault();
}
if (!this.Exists)
{
throw new Exception("No worksheet was found with the name or number was specified");
}
if (string.IsNullOrEmpty(newSheetName))
{
return false;
}
else
{
return !newSheetNameExists;
}
}
internal void ValidateNewWorksheet(FastExcel fastExcel, int? insertAfterSheetNumber = null, string insertAfterSheetName = null)
{
if (string.IsNullOrEmpty(this.Name))
{
// TODO possibly could calulcate a new worksheet name
throw new Exception("Name for new worksheet is not specified");
}
// Get worksheet details
Worksheet previousWorksheet = new Worksheet(fastExcel);
bool isNameValid = previousWorksheet.GetWorksheetPropertiesAndValidateNewName(fastExcel, insertAfterSheetNumber, insertAfterSheetName, this.Name);
this.InsertAfterIndex = previousWorksheet.Index;
if (!isNameValid)
{
throw new Exception(string.Format("Worksheet name '{0}' already exists", this.Name));
}
fastExcel.MaxSheetNumber += 1;
this.Index = fastExcel.MaxSheetNumber;
if (string.IsNullOrEmpty(this.Headers))
{
this.Headers = DEFAULT_HEADERS;
}
if (string.IsNullOrEmpty(this.Footers))
{
this.Footers = DEFAULT_FOOTERS;
}
}
internal WorksheetAddSettings AddSettings
{
get
{
if (this.InsertAfterIndex.HasValue)
{
return new WorksheetAddSettings()
{
Name = this.Name,
SheetId = this.Index,
InsertAfterSheetId = this.InsertAfterIndex.Value
};
}
else
{
return null;
}
}
}
}
}
|
2881099/dotnetGen_sqlserver | 16,482 | GenMs/FastExcel/FastExcel.cs | using System;
using System.Collections.Generic;
using System.IO;
using System.IO.Compression;
using System.Linq;
using System.Text;
using System.Xml.Linq;
namespace FastExcel {
public partial class FastExcel : IDisposable {
public FileInfo ExcelFile { get; private set; }
public FileInfo TemplateFile { get; private set; }
public bool ReadOnly { get; private set; }
internal SharedStrings SharedStrings { get; set; }
internal ZipArchive Archive { get; set; }
private bool UpdateExisting { get; set; }
private bool _filesChecked;
/// <summary>
/// Maximum sheet number, obtained when a sheet is added
/// </summary>
internal int MaxSheetNumber { get; set; }
/// <summary>
/// A list of worksheet indexs to delete
/// </summary>
private List<int> DeleteWorksheets { get; set; }
/// <summary>
/// A list of worksheet indexs to insert
/// </summary>
private List<WorksheetAddSettings> AddWorksheets { get; set; }
/// <summary>
/// Update an existing excel file
/// </summary>
/// <param name="excelFile">location of an existing excel file</param>
public FastExcel(FileInfo excelFile, bool readOnly = false) : this(null, excelFile, true, readOnly) { }
/// <summary>
/// Create a new excel file from a template
/// </summary>
/// <param name="templateFile">template location</param>
/// <param name="excelFile">location of where a new excel file will be saved to</param>
public FastExcel(FileInfo templateFile, FileInfo excelFile) : this(templateFile, excelFile, false, false) { }
private FastExcel(FileInfo templateFile, FileInfo excelFile, bool updateExisting, bool readOnly = false) {
this.TemplateFile = templateFile;
this.ExcelFile = excelFile;
this.UpdateExisting = updateExisting;
this.ReadOnly = readOnly;
CheckFiles();
}
internal void PrepareArchive(bool openSharedStrings = true) {
if (this.Archive == null) {
if (this.ReadOnly) {
Archive = ZipFile.Open(this.ExcelFile.FullName, ZipArchiveMode.Read);
} else {
Archive = ZipFile.Open(this.ExcelFile.FullName, ZipArchiveMode.Update);
}
}
// Get Strings file
if (this.SharedStrings == null && openSharedStrings) {
this.SharedStrings = new SharedStrings(this.Archive);
}
}
/// <summary>
/// Ensure files are ready for use
/// </summary>
internal void CheckFiles() {
if (_filesChecked) {
return;
}
if (this.UpdateExisting) {
if (this.ExcelFile == null) {
throw new Exception("No input file name was supplied");
} else if (!this.ExcelFile.Exists) {
throw new Exception(string.Format("Input file '{0}' does not exist", this.ExcelFile.FullName));
}
} else {
if (this.TemplateFile == null) {
throw new Exception("No Template file was supplied");
} else if (!this.TemplateFile.Exists) {
throw new FileNotFoundException(string.Format("Template file '{0}' was not found", this.TemplateFile.FullName));
}
if (this.ExcelFile == null) {
throw new Exception("No Ouput file name was supplied");
} else if (this.ExcelFile.Exists) {
throw new Exception(string.Format("Output file '{0}' already exists", this.ExcelFile.FullName));
}
}
_filesChecked = true;
}
/// <summary>
/// Update xl/_rels/workbook.xml.rels file
/// </summary>
private void UpdateRelations(bool ensureStrings) {
if (!(ensureStrings ||
(this.DeleteWorksheets != null && this.DeleteWorksheets.Any()) ||
(this.AddWorksheets != null && this.AddWorksheets.Any()))) {
// Nothing to update
return;
}
using (Stream stream = this.Archive.GetEntry("xl/_rels/workbook.xml.rels").Open()) {
XDocument document = XDocument.Load(stream);
if (document == null) {
//TODO error
}
bool update = false;
List<XElement> relationshipElements = document.Descendants().Where(d => d.Name.LocalName == "Relationship").ToList();
int id = relationshipElements.Count;
if (ensureStrings) {
//Ensure SharedStrings
XElement relationshipElement = (from element in relationshipElements
from attribute in element.Attributes()
where attribute.Name == "Target" && attribute.Value.Equals("sharedStrings.xml", StringComparison.CurrentCultureIgnoreCase)
select element).FirstOrDefault();
if (relationshipElement == null) {
relationshipElement = new XElement(document.Root.GetDefaultNamespace() + "Relationship");
relationshipElement.Add(new XAttribute("Target", "sharedStrings.xml"));
relationshipElement.Add(new XAttribute("Type", "http://schemas.openxmlformats.org/officeDocument/2006/relationships/sharedStrings"));
relationshipElement.Add(new XAttribute("Id", string.Format("rId{0}", ++id)));
document.Root.Add(relationshipElement);
update = true;
}
}
// Remove all references to sheets from this file as they are not requried
if ((this.DeleteWorksheets != null && this.DeleteWorksheets.Any()) ||
(this.AddWorksheets != null && this.AddWorksheets.Any())) {
XElement[] worksheetElements = (from element in relationshipElements
from attribute in element.Attributes()
where attribute.Name == "Type" && attribute.Value == "http://schemas.openxmlformats.org/officeDocument/2006/relationships/worksheet"
select element).ToArray();
for (int i = worksheetElements.Length - 1; i > 0; i--) {
worksheetElements[i].Remove();
update = true;
}
}
if (update) {
// Set the stream to the start
stream.Position = 0;
// Clear the stream
stream.SetLength(0);
// Open the stream so we can override all content of the sheet
StreamWriter streamWriter = new StreamWriter(stream, Encoding.UTF8);
document.Save(streamWriter);
streamWriter.Flush();
}
}
}
/// <summary>
/// Update xl/workbook.xml file
/// </summary>
private string[] UpdateWorkbook() {
if (!(this.DeleteWorksheets != null && this.DeleteWorksheets.Any() ||
(this.AddWorksheets != null && this.AddWorksheets.Any()))) {
// Nothing to update
return null;
}
List<string> sheetNames = new List<string>();
using (Stream stream = this.Archive.GetEntry("xl/workbook.xml").Open()) {
XDocument document = XDocument.Load(stream);
if (document == null) {
throw new Exception("Unable to load workbook.xml");
}
bool update = false;
RenameAndRebildWorksheetProperties((from sheet in document.Descendants()
where sheet.Name.LocalName == "sheet"
select sheet).ToArray());
if (update) {
// Re number sheet ids
XNamespace r = "http://schemas.openxmlformats.org/officeDocument/2006/relationships";
int id = 1;
foreach (XElement sheetElement in (from sheet in document.Descendants()
where sheet.Name.LocalName == "sheet"
select sheet)) {
sheetElement.SetAttributeValue(r + "id", string.Format("rId{0}", id++));
sheetNames.Add(sheetElement.Attribute("name").Value);
}
//Set the stream to the start
stream.Position = 0;
// Clear the stream
stream.SetLength(0);
// Open the stream so we can override all content of the sheet
StreamWriter streamWriter = new StreamWriter(stream);
document.Save(streamWriter);
streamWriter.Flush();
}
}
return sheetNames.ToArray();
}
/// <summary>
/// If sheets have been added or deleted, sheets need to be renamed
/// </summary>
private void RenameAndRebildWorksheetProperties(XElement[] sheets) {
if (!((this.DeleteWorksheets != null && this.DeleteWorksheets.Any()) ||
(this.AddWorksheets != null && this.AddWorksheets.Any()))) {
// Nothing to update
return;
}
XNamespace r = "http://schemas.openxmlformats.org/officeDocument/2006/relationships";
List<WorksheetProperties> sheetProperties = (from sheet in sheets
select new WorksheetProperties
() {
SheetId = int.Parse(sheet.Attribute("sheetId").Value),
Name = sheet.Attribute("name").Value,
CurrentIndex = int.Parse(sheet.Attribute(r + "id").Value)
}).ToList();
// Remove deleted worksheets to sheetProperties
if (this.DeleteWorksheets != null && this.DeleteWorksheets.Any()) {
foreach (var item in this.DeleteWorksheets) {
WorksheetProperties sheetToDelete = (from sp in sheetProperties
where sp.SheetId == item
select sp).FirstOrDefault();
if (sheetToDelete != null) {
sheetProperties.Remove(sheetToDelete);
}
}
}
// Add new worksheets to sheetProperties
if (this.AddWorksheets != null && this.AddWorksheets.Any()) {
// Add the sheets in reverse, this will add them correctly with less work
foreach (var item in this.AddWorksheets.Reverse<WorksheetAddSettings>()) {
WorksheetProperties previousSheet = (from sp in sheetProperties
where sp.SheetId == item.InsertAfterSheetId
select sp).FirstOrDefault();
if (previousSheet == null) {
throw new Exception(string.Format("Sheet name {0} cannot be added because the insertAfterSheetNumber or insertAfterSheetName is now invalid", item.Name));
}
WorksheetProperties newWorksheet = new WorksheetProperties();
newWorksheet.SheetId = item.SheetId;
newWorksheet.Name = item.Name;
newWorksheet.CurrentIndex = 0;// TODO Something??
sheetProperties.Insert(sheetProperties.IndexOf(previousSheet), newWorksheet);
}
}
int index = 1;
foreach (WorksheetProperties worksheet in sheetProperties) {
if (worksheet.CurrentIndex != index) {
ZipArchiveEntry entry = this.Archive.GetEntry(Worksheet.GetFileName(worksheet.CurrentIndex));
if (entry == null) {
// TODO better message
throw new Exception("Worksheets could not be rebuilt");
}
}
index++;
}
}
public class WorksheetProperties {
public int CurrentIndex { get; set; }
public int SheetId { get; set; }
public string Name { get; set; }
}
/// <summary>
/// Update [Content_Types].xml file
/// </summary>
private void UpdateContentTypes(bool ensureStrings) {
if (!(ensureStrings ||
(this.DeleteWorksheets != null && this.DeleteWorksheets.Any()) ||
(this.AddWorksheets != null && this.AddWorksheets.Any()))) {
// Nothing to update
return;
}
using (Stream stream = this.Archive.GetEntry("[Content_Types].xml").Open()) {
XDocument document = XDocument.Load(stream);
if (document == null) {
//TODO error
}
bool update = false;
List<XElement> overrideElements = document.Descendants().Where(d => d.Name.LocalName == "Override").ToList();
//Ensure SharedStrings
if (ensureStrings) {
XElement overrideElement = (from element in overrideElements
from attribute in element.Attributes()
where attribute.Name == "PartName" && attribute.Value.Equals("/xl/sharedStrings.xml", StringComparison.CurrentCultureIgnoreCase)
select element).FirstOrDefault();
if (overrideElement == null) {
overrideElement = new XElement(document.Root.GetDefaultNamespace() + "Override");
overrideElement.Add(new XAttribute("ContentType", "application/vnd.openxmlformats-officedocument.spreadsheetml.sharedStrings+xml"));
overrideElement.Add(new XAttribute("PartName", "/xl/sharedStrings.xml"));
document.Root.Add(overrideElement);
update = true;
}
}
if (this.DeleteWorksheets != null && this.DeleteWorksheets.Any()) {
foreach (var item in this.DeleteWorksheets) {
// the file name is different for each xml file
string fileName = string.Format("/xl/worksheets/sheet{0}.xml", item);
XElement overrideElement = (from element in overrideElements
from attribute in element.Attributes()
where attribute.Name == "PartName" && attribute.Value == fileName
select element).FirstOrDefault();
if (overrideElement != null) {
overrideElement.Remove();
update = true;
}
}
}
if (this.AddWorksheets != null && this.AddWorksheets.Any()) {
foreach (var item in this.AddWorksheets) {
// the file name is different for each xml file
string fileName = string.Format("/xl/worksheets/sheet{0}.xml", item.SheetId);
XElement overrideElement = new XElement(document.Root.GetDefaultNamespace() + "Override");
overrideElement.Add(new XAttribute("ContentType", "application/vnd.openxmlformats-officedocument.spreadsheetml.worksheet+xml"));
overrideElement.Add(new XAttribute("PartName", fileName));
document.Root.Add(overrideElement);
update = true;
}
}
if (update) {
// Set the stream to the start
stream.Position = 0;
// Clear the stream
stream.SetLength(0);
// Open the stream so we can override all content of the sheet
StreamWriter streamWriter = new StreamWriter(stream, Encoding.UTF8);
document.Save(streamWriter);
streamWriter.Flush();
}
}
}
/// <summary>
/// Update docProps/app.xml file
/// </summary>
private void UpdateDocPropsApp(string[] sheetNames) {
/* if (sheetNames == null || !sheetNames.Any())
{
// Nothing to update
return;
}
using (Stream stream = this.Archive.GetEntry("docProps/app.xml ").Open())
{
XDocument document = XDocument.Load(stream);
if (document == null)
{
throw new Exception("Unable to load app.xml");
}
// Update TilesOfParts
// Update HeadingPairs
if (this.AddWorksheets != null && this.AddWorksheets.Any())
{
// Add the sheets in reverse, this will add them correctly with less work
foreach (var item in this.AddWorksheets.Reverse<WorksheetAddSettings>())
{
XElement previousSheetElement = (from sheet in document.Descendants()
where sheet.Name.LocalName == "sheet"
from attribute in sheet.Attributes()
where attribute.Name == "sheetId" && attribute.Value == item.InsertAfterIndex.ToString()
select sheet).FirstOrDefault();
if (previousSheetElement == null)
{
throw new Exception(string.Format("Sheet name {0} cannot be added because the insertAfterSheetNumber or insertAfterSheetName is now invalid", item.Name));
}
XElement newSheetElement = new XElement(document.Root.GetDefaultNamespace() + "sheet");
newSheetElement.Add(new XAttribute("name", item.Name));
newSheetElement.Add(new XAttribute("sheetId", item.Index));
previousSheetElement.AddAfterSelf(newSheetElement);
update = true;
}
}
if (update)
{
// Re number sheet ids
XNamespace r = "http://schemas.openxmlformats.org/officeDocument/2006/relationships";
int id = 1;
foreach (XElement sheetElement in (from sheet in document.Descendants()
where sheet.Name.LocalName == "sheet"
select sheet))
{
sheetElement.SetAttributeValue(r + "id", string.Format("rId{0}", id++));
}
//Set the stream to the start
stream.Position = 0;
// Clear the stream
stream.SetLength(0);
// Open the stream so we can override all content of the sheet
StreamWriter streamWriter = new StreamWriter(stream);
document.Save(streamWriter);
streamWriter.Flush();
}
}*/
}
/// <summary>
/// Saves any pending changes to the Excel stream and adds/updates associated files if needed
/// </summary>
public void Dispose() {
Dispose(true);
GC.SuppressFinalize(this);
}
protected virtual void Dispose(bool disposing) {
if (this.Archive == null) {
return;
}
if (this.Archive.Mode != ZipArchiveMode.Read) {
bool ensureSharedStrings = false;
// Update or create xl/sharedStrings.xml file
if (this.SharedStrings != null) {
ensureSharedStrings = this.SharedStrings.PendingChanges;
this.SharedStrings.Write();
}
// Update xl/_rels/workbook.xml.rels file
UpdateRelations(ensureSharedStrings);
// Update xl/workbook.xml file
string[] sheetNames = UpdateWorkbook();
// Update [Content_Types].xml file
UpdateContentTypes(ensureSharedStrings);
// Update docProps/app.xml file
UpdateDocPropsApp(sheetNames);
}
this.Archive.Dispose();
}
}
}
|
2881099/dotnetGen_sqlserver | 4,088 | GenMs/FastExcel/Cell.cs | using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Text.RegularExpressions;
using System.Threading.Tasks;
using System.Xml.Linq;
namespace FastExcel {
/// <summary>
/// Contains the actual value
/// </summary>
public class Cell {
/// <summary>
/// Column Numnber (Starts at 1)
/// </summary>
public int ColumnNumber { get; set; }
/// <summary>
/// The value that is stored
/// </summary>
public object Value { get; set; }
/// <summary>
/// Create a new Cell
/// </summary>
/// <param name="columnNumber">Column number starting at 1</param>
/// <param name="value">Cell Value</param>
public Cell(int columnNumber, object value) {
if (columnNumber <= 0) {
throw new Exception("Column numbers starting at 1");
}
this.ColumnNumber = columnNumber;
this.Value = value;
}
/// <summary>
/// Create a new Cell
/// </summary>
/// <param name="cellElement">Cell</param>
/// <param name="sharedStrings">The collection of shared strings used by this document</param>
public Cell(XElement cellElement, SharedStrings sharedStrings) {
bool isTextRow = (from a in cellElement.Attributes("t")
where a.Value == "s"
select a).Any();
string columnName = (from a in cellElement.Attributes("r")
select a.Value).FirstOrDefault();
this.ColumnNumber = GetExcelColumnNumber(columnName);
if (isTextRow) {
this.Value = sharedStrings.GetString(cellElement.Value);
} else {
this.Value = cellElement.Value;
}
}
internal StringBuilder ToXmlString(SharedStrings sharedStrings, int rowNumber) {
StringBuilder cell = new StringBuilder();
if (this.Value != null) {
bool isString = false;
object value = this.Value;
if (this.Value is int) {
isString = false;
} else if (this.Value is double) {
isString = false;
} else if (this.Value is string) {
isString = true;
}
if (isString) {
value = sharedStrings.AddString(value.ToString());
}
cell.AppendFormat("<c r=\"{0}{1}\"{2}>", GetExcelColumnName(this.ColumnNumber), rowNumber, (isString ? " t=\"s\"" : string.Empty));
cell.AppendFormat("<v>{0}</v>", value);
cell.Append("</c>");
}
return cell;
}
//http://stackoverflow.com/questions/181596/how-to-convert-a-column-number-eg-127-into-an-excel-column-eg-aa
/// <summary>
/// Convert Column Number into Column Name - Character(s) eg 1-A, 2-B
/// </summary>
/// <param name="columnNumber">Column Number</param>
/// <returns>Column Name - Character(s)</returns>
public static string GetExcelColumnName(int columnNumber) {
int dividend = columnNumber;
string columnName = String.Empty;
int modulo;
while (dividend > 0) {
modulo = (dividend - 1) % 26;
columnName = string.Concat(Convert.ToChar(65 + modulo), columnName);
dividend = (int)((dividend - modulo) / 26);
}
return columnName;
}
//http://stackoverflow.com/questions/181596/how-to-convert-a-column-number-eg-127-into-an-excel-column-eg-aa
/// <summary>
/// Covert Column Name - Character(s) into a Column Number eg A-1, B-2, A1 - 1, B9 - 2
/// </summary>
/// <param name="columnName">Column Name - Character(s) optinally with the Row Number</param>
/// <param name="includesRowNumber">Specify if the row number is included</param>
/// <returns>Column Number</returns>
public static int GetExcelColumnNumber(string columnName, bool includesRowNumber = true) {
if (includesRowNumber) {
columnName = Regex.Replace(columnName, @"\d", "");
}
int[] digits = new int[columnName.Length];
for (int i = 0; i < columnName.Length; ++i) {
digits[i] = Convert.ToInt32(columnName[i]) - 64;
}
int mul = 1; int res = 0;
for (int pos = digits.Length - 1; pos >= 0; --pos) {
res += digits[pos] * mul;
mul *= 26;
}
return res;
}
/// <summary>
/// Merge the parameter cell into this cell
/// </summary>
/// <param name="cell">Cell to merge</param>
public void Merge(Cell cell) {
this.Value = cell.Value;
}
}
}
|
2881099/dotnetGen_sqlserver | 2,198 | GenMs/FastExcel/FastExcel.Add.cs | using System;
using System.Collections.Generic;
using System.IO;
using System.IO.Compression;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Xml.Linq;
namespace FastExcel {
public partial class FastExcel {
/// <summary>
/// Append new worksheet
/// </summary>
/// <param name="worksheet">New worksheet</param>
public void Add(Worksheet worksheet) {
this.Add(worksheet, null, null);
}
public void Add(Worksheet worksheet, int insertAfterSheetNumber) {
this.Add(worksheet, insertAfterSheetNumber, null);
}
public void Add(Worksheet worksheet, string insertAfterSheetName) {
this.Add(worksheet, null, insertAfterSheetName);
}
private void Add(Worksheet worksheet, int? insertAfterSheetNumber = null, string insertAfterSheetName = null) {
CheckFiles();
PrepareArchive(true);
worksheet.ValidateNewWorksheet(this, insertAfterSheetNumber, insertAfterSheetName);
if (this.AddWorksheets == null) {
this.AddWorksheets = new List<WorksheetAddSettings>();
}
this.AddWorksheets.Add(worksheet.AddSettings);
if (!this.ReadOnly) {
throw new Exception("FastExcel is in ReadOnly mode so cannot perform a write");
}
// Check if ExistingHeadingRows will be overridden by the dataset
if (worksheet.ExistingHeadingRows != 0 && worksheet.Rows.Where(r => r.RowNumber <= worksheet.ExistingHeadingRows).Any()) {
throw new Exception("Existing Heading Rows was specified but some or all will be overridden by data rows. Check DataSet.Row.RowNumber against ExistingHeadingRows");
}
using (StreamWriter streamWriter = null)//new StreamWriter(this.Archive.CreateEntry(worksheet.FileName).Open()))
{
streamWriter.Write(worksheet.Headers);
if (!worksheet.Template) {
worksheet.Headers = null;
}
this.SharedStrings.ReadWriteMode = true;
// Add Rows
foreach (Row row in worksheet.Rows) {
streamWriter.Write(row.ToXmlString(this.SharedStrings));
}
this.SharedStrings.ReadWriteMode = false;
//Add Footers
streamWriter.Write(worksheet.Footers);
if (!worksheet.Template) {
worksheet.Footers = null;
}
streamWriter.Flush();
}
}
}
}
|
27182812/ChatGLM-LLaMA-chinese-insturct | 37,993 | src/transformers/models/esm/openfold_utils/residue_constants.py | # Copyright 2021 AlQuraishi Laboratory
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Constants used in AlphaFold."""
import collections
import copy
import functools
from importlib import resources
from typing import Dict, List, Mapping, Sequence, Tuple
import numpy as np
# Internal import (35fd).
# Distance from one CA to next CA [trans configuration: omega = 180].
ca_ca = 3.80209737096
# Format: The list for each AA type contains chi1, chi2, chi3, chi4 in
# this order (or a relevant subset from chi1 onwards). ALA and GLY don't have
# chi angles so their chi angle lists are empty.
chi_angles_atoms: Dict[str, List[List[str]]] = {
"ALA": [],
# Chi5 in arginine is always 0 +- 5 degrees, so ignore it.
"ARG": [["N", "CA", "CB", "CG"], ["CA", "CB", "CG", "CD"], ["CB", "CG", "CD", "NE"], ["CG", "CD", "NE", "CZ"]],
"ASN": [["N", "CA", "CB", "CG"], ["CA", "CB", "CG", "OD1"]],
"ASP": [["N", "CA", "CB", "CG"], ["CA", "CB", "CG", "OD1"]],
"CYS": [["N", "CA", "CB", "SG"]],
"GLN": [["N", "CA", "CB", "CG"], ["CA", "CB", "CG", "CD"], ["CB", "CG", "CD", "OE1"]],
"GLU": [["N", "CA", "CB", "CG"], ["CA", "CB", "CG", "CD"], ["CB", "CG", "CD", "OE1"]],
"GLY": [],
"HIS": [["N", "CA", "CB", "CG"], ["CA", "CB", "CG", "ND1"]],
"ILE": [["N", "CA", "CB", "CG1"], ["CA", "CB", "CG1", "CD1"]],
"LEU": [["N", "CA", "CB", "CG"], ["CA", "CB", "CG", "CD1"]],
"LYS": [["N", "CA", "CB", "CG"], ["CA", "CB", "CG", "CD"], ["CB", "CG", "CD", "CE"], ["CG", "CD", "CE", "NZ"]],
"MET": [["N", "CA", "CB", "CG"], ["CA", "CB", "CG", "SD"], ["CB", "CG", "SD", "CE"]],
"PHE": [["N", "CA", "CB", "CG"], ["CA", "CB", "CG", "CD1"]],
"PRO": [["N", "CA", "CB", "CG"], ["CA", "CB", "CG", "CD"]],
"SER": [["N", "CA", "CB", "OG"]],
"THR": [["N", "CA", "CB", "OG1"]],
"TRP": [["N", "CA", "CB", "CG"], ["CA", "CB", "CG", "CD1"]],
"TYR": [["N", "CA", "CB", "CG"], ["CA", "CB", "CG", "CD1"]],
"VAL": [["N", "CA", "CB", "CG1"]],
}
# If chi angles given in fixed-length array, this matrix determines how to mask
# them for each AA type. The order is as per restype_order (see below).
chi_angles_mask: List[List[float]] = [
[0.0, 0.0, 0.0, 0.0], # ALA
[1.0, 1.0, 1.0, 1.0], # ARG
[1.0, 1.0, 0.0, 0.0], # ASN
[1.0, 1.0, 0.0, 0.0], # ASP
[1.0, 0.0, 0.0, 0.0], # CYS
[1.0, 1.0, 1.0, 0.0], # GLN
[1.0, 1.0, 1.0, 0.0], # GLU
[0.0, 0.0, 0.0, 0.0], # GLY
[1.0, 1.0, 0.0, 0.0], # HIS
[1.0, 1.0, 0.0, 0.0], # ILE
[1.0, 1.0, 0.0, 0.0], # LEU
[1.0, 1.0, 1.0, 1.0], # LYS
[1.0, 1.0, 1.0, 0.0], # MET
[1.0, 1.0, 0.0, 0.0], # PHE
[1.0, 1.0, 0.0, 0.0], # PRO
[1.0, 0.0, 0.0, 0.0], # SER
[1.0, 0.0, 0.0, 0.0], # THR
[1.0, 1.0, 0.0, 0.0], # TRP
[1.0, 1.0, 0.0, 0.0], # TYR
[1.0, 0.0, 0.0, 0.0], # VAL
]
# The following chi angles are pi periodic: they can be rotated by a multiple
# of pi without affecting the structure.
chi_pi_periodic: List[List[float]] = [
[0.0, 0.0, 0.0, 0.0], # ALA
[0.0, 0.0, 0.0, 0.0], # ARG
[0.0, 0.0, 0.0, 0.0], # ASN
[0.0, 1.0, 0.0, 0.0], # ASP
[0.0, 0.0, 0.0, 0.0], # CYS
[0.0, 0.0, 0.0, 0.0], # GLN
[0.0, 0.0, 1.0, 0.0], # GLU
[0.0, 0.0, 0.0, 0.0], # GLY
[0.0, 0.0, 0.0, 0.0], # HIS
[0.0, 0.0, 0.0, 0.0], # ILE
[0.0, 0.0, 0.0, 0.0], # LEU
[0.0, 0.0, 0.0, 0.0], # LYS
[0.0, 0.0, 0.0, 0.0], # MET
[0.0, 1.0, 0.0, 0.0], # PHE
[0.0, 0.0, 0.0, 0.0], # PRO
[0.0, 0.0, 0.0, 0.0], # SER
[0.0, 0.0, 0.0, 0.0], # THR
[0.0, 0.0, 0.0, 0.0], # TRP
[0.0, 1.0, 0.0, 0.0], # TYR
[0.0, 0.0, 0.0, 0.0], # VAL
[0.0, 0.0, 0.0, 0.0], # UNK
]
# Atoms positions relative to the 8 rigid groups, defined by the pre-omega, phi,
# psi and chi angles:
# 0: 'backbone group',
# 1: 'pre-omega-group', (empty)
# 2: 'phi-group', (currently empty, because it defines only hydrogens)
# 3: 'psi-group',
# 4,5,6,7: 'chi1,2,3,4-group'
# The atom positions are relative to the axis-end-atom of the corresponding
# rotation axis. The x-axis is in direction of the rotation axis, and the y-axis
# is defined such that the dihedral-angle-definiting atom (the last entry in
# chi_angles_atoms above) is in the xy-plane (with a positive y-coordinate).
# format: [atomname, group_idx, rel_position]
rigid_group_atom_positions: Dict[str, List[Tuple[str, int, Tuple[float, float, float]]]] = {
"ALA": [
("N", 0, (-0.525, 1.363, 0.000)),
("CA", 0, (0.000, 0.000, 0.000)),
("C", 0, (1.526, -0.000, -0.000)),
("CB", 0, (-0.529, -0.774, -1.205)),
("O", 3, (0.627, 1.062, 0.000)),
],
"ARG": [
("N", 0, (-0.524, 1.362, -0.000)),
("CA", 0, (0.000, 0.000, 0.000)),
("C", 0, (1.525, -0.000, -0.000)),
("CB", 0, (-0.524, -0.778, -1.209)),
("O", 3, (0.626, 1.062, 0.000)),
("CG", 4, (0.616, 1.390, -0.000)),
("CD", 5, (0.564, 1.414, 0.000)),
("NE", 6, (0.539, 1.357, -0.000)),
("NH1", 7, (0.206, 2.301, 0.000)),
("NH2", 7, (2.078, 0.978, -0.000)),
("CZ", 7, (0.758, 1.093, -0.000)),
],
"ASN": [
("N", 0, (-0.536, 1.357, 0.000)),
("CA", 0, (0.000, 0.000, 0.000)),
("C", 0, (1.526, -0.000, -0.000)),
("CB", 0, (-0.531, -0.787, -1.200)),
("O", 3, (0.625, 1.062, 0.000)),
("CG", 4, (0.584, 1.399, 0.000)),
("ND2", 5, (0.593, -1.188, 0.001)),
("OD1", 5, (0.633, 1.059, 0.000)),
],
"ASP": [
("N", 0, (-0.525, 1.362, -0.000)),
("CA", 0, (0.000, 0.000, 0.000)),
("C", 0, (1.527, 0.000, -0.000)),
("CB", 0, (-0.526, -0.778, -1.208)),
("O", 3, (0.626, 1.062, -0.000)),
("CG", 4, (0.593, 1.398, -0.000)),
("OD1", 5, (0.610, 1.091, 0.000)),
("OD2", 5, (0.592, -1.101, -0.003)),
],
"CYS": [
("N", 0, (-0.522, 1.362, -0.000)),
("CA", 0, (0.000, 0.000, 0.000)),
("C", 0, (1.524, 0.000, 0.000)),
("CB", 0, (-0.519, -0.773, -1.212)),
("O", 3, (0.625, 1.062, -0.000)),
("SG", 4, (0.728, 1.653, 0.000)),
],
"GLN": [
("N", 0, (-0.526, 1.361, -0.000)),
("CA", 0, (0.000, 0.000, 0.000)),
("C", 0, (1.526, 0.000, 0.000)),
("CB", 0, (-0.525, -0.779, -1.207)),
("O", 3, (0.626, 1.062, -0.000)),
("CG", 4, (0.615, 1.393, 0.000)),
("CD", 5, (0.587, 1.399, -0.000)),
("NE2", 6, (0.593, -1.189, -0.001)),
("OE1", 6, (0.634, 1.060, 0.000)),
],
"GLU": [
("N", 0, (-0.528, 1.361, 0.000)),
("CA", 0, (0.000, 0.000, 0.000)),
("C", 0, (1.526, -0.000, -0.000)),
("CB", 0, (-0.526, -0.781, -1.207)),
("O", 3, (0.626, 1.062, 0.000)),
("CG", 4, (0.615, 1.392, 0.000)),
("CD", 5, (0.600, 1.397, 0.000)),
("OE1", 6, (0.607, 1.095, -0.000)),
("OE2", 6, (0.589, -1.104, -0.001)),
],
"GLY": [
("N", 0, (-0.572, 1.337, 0.000)),
("CA", 0, (0.000, 0.000, 0.000)),
("C", 0, (1.517, -0.000, -0.000)),
("O", 3, (0.626, 1.062, -0.000)),
],
"HIS": [
("N", 0, (-0.527, 1.360, 0.000)),
("CA", 0, (0.000, 0.000, 0.000)),
("C", 0, (1.525, 0.000, 0.000)),
("CB", 0, (-0.525, -0.778, -1.208)),
("O", 3, (0.625, 1.063, 0.000)),
("CG", 4, (0.600, 1.370, -0.000)),
("CD2", 5, (0.889, -1.021, 0.003)),
("ND1", 5, (0.744, 1.160, -0.000)),
("CE1", 5, (2.030, 0.851, 0.002)),
("NE2", 5, (2.145, -0.466, 0.004)),
],
"ILE": [
("N", 0, (-0.493, 1.373, -0.000)),
("CA", 0, (0.000, 0.000, 0.000)),
("C", 0, (1.527, -0.000, -0.000)),
("CB", 0, (-0.536, -0.793, -1.213)),
("O", 3, (0.627, 1.062, -0.000)),
("CG1", 4, (0.534, 1.437, -0.000)),
("CG2", 4, (0.540, -0.785, -1.199)),
("CD1", 5, (0.619, 1.391, 0.000)),
],
"LEU": [
("N", 0, (-0.520, 1.363, 0.000)),
("CA", 0, (0.000, 0.000, 0.000)),
("C", 0, (1.525, -0.000, -0.000)),
("CB", 0, (-0.522, -0.773, -1.214)),
("O", 3, (0.625, 1.063, -0.000)),
("CG", 4, (0.678, 1.371, 0.000)),
("CD1", 5, (0.530, 1.430, -0.000)),
("CD2", 5, (0.535, -0.774, 1.200)),
],
"LYS": [
("N", 0, (-0.526, 1.362, -0.000)),
("CA", 0, (0.000, 0.000, 0.000)),
("C", 0, (1.526, 0.000, 0.000)),
("CB", 0, (-0.524, -0.778, -1.208)),
("O", 3, (0.626, 1.062, -0.000)),
("CG", 4, (0.619, 1.390, 0.000)),
("CD", 5, (0.559, 1.417, 0.000)),
("CE", 6, (0.560, 1.416, 0.000)),
("NZ", 7, (0.554, 1.387, 0.000)),
],
"MET": [
("N", 0, (-0.521, 1.364, -0.000)),
("CA", 0, (0.000, 0.000, 0.000)),
("C", 0, (1.525, 0.000, 0.000)),
("CB", 0, (-0.523, -0.776, -1.210)),
("O", 3, (0.625, 1.062, -0.000)),
("CG", 4, (0.613, 1.391, -0.000)),
("SD", 5, (0.703, 1.695, 0.000)),
("CE", 6, (0.320, 1.786, -0.000)),
],
"PHE": [
("N", 0, (-0.518, 1.363, 0.000)),
("CA", 0, (0.000, 0.000, 0.000)),
("C", 0, (1.524, 0.000, -0.000)),
("CB", 0, (-0.525, -0.776, -1.212)),
("O", 3, (0.626, 1.062, -0.000)),
("CG", 4, (0.607, 1.377, 0.000)),
("CD1", 5, (0.709, 1.195, -0.000)),
("CD2", 5, (0.706, -1.196, 0.000)),
("CE1", 5, (2.102, 1.198, -0.000)),
("CE2", 5, (2.098, -1.201, -0.000)),
("CZ", 5, (2.794, -0.003, -0.001)),
],
"PRO": [
("N", 0, (-0.566, 1.351, -0.000)),
("CA", 0, (0.000, 0.000, 0.000)),
("C", 0, (1.527, -0.000, 0.000)),
("CB", 0, (-0.546, -0.611, -1.293)),
("O", 3, (0.621, 1.066, 0.000)),
("CG", 4, (0.382, 1.445, 0.0)),
# ('CD', 5, (0.427, 1.440, 0.0)),
("CD", 5, (0.477, 1.424, 0.0)), # manually made angle 2 degrees larger
],
"SER": [
("N", 0, (-0.529, 1.360, -0.000)),
("CA", 0, (0.000, 0.000, 0.000)),
("C", 0, (1.525, -0.000, -0.000)),
("CB", 0, (-0.518, -0.777, -1.211)),
("O", 3, (0.626, 1.062, -0.000)),
("OG", 4, (0.503, 1.325, 0.000)),
],
"THR": [
("N", 0, (-0.517, 1.364, 0.000)),
("CA", 0, (0.000, 0.000, 0.000)),
("C", 0, (1.526, 0.000, -0.000)),
("CB", 0, (-0.516, -0.793, -1.215)),
("O", 3, (0.626, 1.062, 0.000)),
("CG2", 4, (0.550, -0.718, -1.228)),
("OG1", 4, (0.472, 1.353, 0.000)),
],
"TRP": [
("N", 0, (-0.521, 1.363, 0.000)),
("CA", 0, (0.000, 0.000, 0.000)),
("C", 0, (1.525, -0.000, 0.000)),
("CB", 0, (-0.523, -0.776, -1.212)),
("O", 3, (0.627, 1.062, 0.000)),
("CG", 4, (0.609, 1.370, -0.000)),
("CD1", 5, (0.824, 1.091, 0.000)),
("CD2", 5, (0.854, -1.148, -0.005)),
("CE2", 5, (2.186, -0.678, -0.007)),
("CE3", 5, (0.622, -2.530, -0.007)),
("NE1", 5, (2.140, 0.690, -0.004)),
("CH2", 5, (3.028, -2.890, -0.013)),
("CZ2", 5, (3.283, -1.543, -0.011)),
("CZ3", 5, (1.715, -3.389, -0.011)),
],
"TYR": [
("N", 0, (-0.522, 1.362, 0.000)),
("CA", 0, (0.000, 0.000, 0.000)),
("C", 0, (1.524, -0.000, -0.000)),
("CB", 0, (-0.522, -0.776, -1.213)),
("O", 3, (0.627, 1.062, -0.000)),
("CG", 4, (0.607, 1.382, -0.000)),
("CD1", 5, (0.716, 1.195, -0.000)),
("CD2", 5, (0.713, -1.194, -0.001)),
("CE1", 5, (2.107, 1.200, -0.002)),
("CE2", 5, (2.104, -1.201, -0.003)),
("OH", 5, (4.168, -0.002, -0.005)),
("CZ", 5, (2.791, -0.001, -0.003)),
],
"VAL": [
("N", 0, (-0.494, 1.373, -0.000)),
("CA", 0, (0.000, 0.000, 0.000)),
("C", 0, (1.527, -0.000, -0.000)),
("CB", 0, (-0.533, -0.795, -1.213)),
("O", 3, (0.627, 1.062, -0.000)),
("CG1", 4, (0.540, 1.429, -0.000)),
("CG2", 4, (0.533, -0.776, 1.203)),
],
}
# A list of atoms (excluding hydrogen) for each AA type. PDB naming convention.
residue_atoms: Dict[str, List[str]] = {
"ALA": ["C", "CA", "CB", "N", "O"],
"ARG": ["C", "CA", "CB", "CG", "CD", "CZ", "N", "NE", "O", "NH1", "NH2"],
"ASP": ["C", "CA", "CB", "CG", "N", "O", "OD1", "OD2"],
"ASN": ["C", "CA", "CB", "CG", "N", "ND2", "O", "OD1"],
"CYS": ["C", "CA", "CB", "N", "O", "SG"],
"GLU": ["C", "CA", "CB", "CG", "CD", "N", "O", "OE1", "OE2"],
"GLN": ["C", "CA", "CB", "CG", "CD", "N", "NE2", "O", "OE1"],
"GLY": ["C", "CA", "N", "O"],
"HIS": ["C", "CA", "CB", "CG", "CD2", "CE1", "N", "ND1", "NE2", "O"],
"ILE": ["C", "CA", "CB", "CG1", "CG2", "CD1", "N", "O"],
"LEU": ["C", "CA", "CB", "CG", "CD1", "CD2", "N", "O"],
"LYS": ["C", "CA", "CB", "CG", "CD", "CE", "N", "NZ", "O"],
"MET": ["C", "CA", "CB", "CG", "CE", "N", "O", "SD"],
"PHE": ["C", "CA", "CB", "CG", "CD1", "CD2", "CE1", "CE2", "CZ", "N", "O"],
"PRO": ["C", "CA", "CB", "CG", "CD", "N", "O"],
"SER": ["C", "CA", "CB", "N", "O", "OG"],
"THR": ["C", "CA", "CB", "CG2", "N", "O", "OG1"],
"TRP": ["C", "CA", "CB", "CG", "CD1", "CD2", "CE2", "CE3", "CZ2", "CZ3", "CH2", "N", "NE1", "O"],
"TYR": ["C", "CA", "CB", "CG", "CD1", "CD2", "CE1", "CE2", "CZ", "N", "O", "OH"],
"VAL": ["C", "CA", "CB", "CG1", "CG2", "N", "O"],
}
# Naming swaps for ambiguous atom names.
# Due to symmetries in the amino acids the naming of atoms is ambiguous in
# 4 of the 20 amino acids.
# (The LDDT paper lists 7 amino acids as ambiguous, but the naming ambiguities
# in LEU, VAL and ARG can be resolved by using the 3d constellations of
# the 'ambiguous' atoms and their neighbours)
# TODO: ^ interpret this
residue_atom_renaming_swaps: Dict[str, Dict[str, str]] = {
"ASP": {"OD1": "OD2"},
"GLU": {"OE1": "OE2"},
"PHE": {"CD1": "CD2", "CE1": "CE2"},
"TYR": {"CD1": "CD2", "CE1": "CE2"},
}
# Van der Waals radii [Angstroem] of the atoms (from Wikipedia)
van_der_waals_radius: Dict[str, float] = {
"C": 1.7,
"N": 1.55,
"O": 1.52,
"S": 1.8,
}
Bond = collections.namedtuple("Bond", ["atom1_name", "atom2_name", "length", "stddev"])
BondAngle = collections.namedtuple(
"BondAngle",
["atom1_name", "atom2_name", "atom3name", "angle_rad", "stddev"],
)
def map_structure_with_atom_order(in_list: list, first_call: bool = True) -> list:
# Maps strings in a nested list structure to their corresponding index in atom_order
if first_call:
in_list = copy.deepcopy(in_list)
for i in range(len(in_list)):
if isinstance(in_list[i], list):
in_list[i] = map_structure_with_atom_order(in_list[i], first_call=False)
elif isinstance(in_list[i], str):
in_list[i] = atom_order[in_list[i]]
else:
raise ValueError("Unexpected type when mapping nested lists!")
return in_list
@functools.lru_cache(maxsize=None)
def load_stereo_chemical_props() -> (
Tuple[
Mapping[str, List[Bond]],
Mapping[str, List[Bond]],
Mapping[str, List[BondAngle]],
]
):
"""Load stereo_chemical_props.txt into a nice structure.
Load literature values for bond lengths and bond angles and translate bond angles into the length of the opposite
edge of the triangle ("residue_virtual_bonds").
Returns:
residue_bonds: dict that maps resname --> list of Bond tuples residue_virtual_bonds: dict that maps resname -->
list of Bond tuples residue_bond_angles: dict that maps resname --> list of BondAngle tuples
"""
# TODO: this file should be downloaded in a setup script
stereo_chemical_props = resources.read_text("openfold.resources", "stereo_chemical_props.txt")
lines_iter = iter(stereo_chemical_props.splitlines())
# Load bond lengths.
residue_bonds: Dict[str, List[Bond]] = {}
next(lines_iter) # Skip header line.
for line in lines_iter:
if line.strip() == "-":
break
bond, resname, bond_length, stddev = line.split()
atom1, atom2 = bond.split("-")
if resname not in residue_bonds:
residue_bonds[resname] = []
residue_bonds[resname].append(Bond(atom1, atom2, float(bond_length), float(stddev)))
residue_bonds["UNK"] = []
# Load bond angles.
residue_bond_angles: Dict[str, List[BondAngle]] = {}
next(lines_iter) # Skip empty line.
next(lines_iter) # Skip header line.
for line in lines_iter:
if line.strip() == "-":
break
bond, resname, angle_degree, stddev_degree = line.split()
atom1, atom2, atom3 = bond.split("-")
if resname not in residue_bond_angles:
residue_bond_angles[resname] = []
residue_bond_angles[resname].append(
BondAngle(
atom1,
atom2,
atom3,
float(angle_degree) / 180.0 * np.pi,
float(stddev_degree) / 180.0 * np.pi,
)
)
residue_bond_angles["UNK"] = []
def make_bond_key(atom1_name: str, atom2_name: str) -> str:
"""Unique key to lookup bonds."""
return "-".join(sorted([atom1_name, atom2_name]))
# Translate bond angles into distances ("virtual bonds").
residue_virtual_bonds: Dict[str, List[Bond]] = {}
for resname, bond_angles in residue_bond_angles.items():
# Create a fast lookup dict for bond lengths.
bond_cache: Dict[str, Bond] = {}
for b in residue_bonds[resname]:
bond_cache[make_bond_key(b.atom1_name, b.atom2_name)] = b
residue_virtual_bonds[resname] = []
for ba in bond_angles:
bond1 = bond_cache[make_bond_key(ba.atom1_name, ba.atom2_name)]
bond2 = bond_cache[make_bond_key(ba.atom2_name, ba.atom3name)]
# Compute distance between atom1 and atom3 using the law of cosines
# c^2 = a^2 + b^2 - 2ab*cos(gamma).
gamma = ba.angle_rad
length = np.sqrt(bond1.length**2 + bond2.length**2 - 2 * bond1.length * bond2.length * np.cos(gamma))
# Propagation of uncertainty assuming uncorrelated errors.
dl_outer = 0.5 / length
dl_dgamma = (2 * bond1.length * bond2.length * np.sin(gamma)) * dl_outer
dl_db1 = (2 * bond1.length - 2 * bond2.length * np.cos(gamma)) * dl_outer
dl_db2 = (2 * bond2.length - 2 * bond1.length * np.cos(gamma)) * dl_outer
stddev = np.sqrt(
(dl_dgamma * ba.stddev) ** 2 + (dl_db1 * bond1.stddev) ** 2 + (dl_db2 * bond2.stddev) ** 2
)
residue_virtual_bonds[resname].append(Bond(ba.atom1_name, ba.atom3name, length, stddev))
return (residue_bonds, residue_virtual_bonds, residue_bond_angles)
# Between-residue bond lengths for general bonds (first element) and for Proline
# (second element).
between_res_bond_length_c_n: Tuple[float, float] = (1.329, 1.341)
between_res_bond_length_stddev_c_n: Tuple[float, float] = (0.014, 0.016)
# Between-residue cos_angles.
between_res_cos_angles_c_n_ca: Tuple[float, float] = (-0.5203, 0.0353) # degrees: 121.352 +- 2.315
between_res_cos_angles_ca_c_n: Tuple[float, float] = (-0.4473, 0.0311) # degrees: 116.568 +- 1.995
# This mapping is used when we need to store atom data in a format that requires
# fixed atom data size for every residue (e.g. a numpy array).
atom_types: List[str] = [
"N",
"CA",
"C",
"CB",
"O",
"CG",
"CG1",
"CG2",
"OG",
"OG1",
"SG",
"CD",
"CD1",
"CD2",
"ND1",
"ND2",
"OD1",
"OD2",
"SD",
"CE",
"CE1",
"CE2",
"CE3",
"NE",
"NE1",
"NE2",
"OE1",
"OE2",
"CH2",
"NH1",
"NH2",
"OH",
"CZ",
"CZ2",
"CZ3",
"NZ",
"OXT",
]
atom_order: Dict[str, int] = {atom_type: i for i, atom_type in enumerate(atom_types)}
atom_type_num = len(atom_types) # := 37.
# A compact atom encoding with 14 columns
# pylint: disable=line-too-long
# pylint: disable=bad-whitespace
restype_name_to_atom14_names: Dict[str, List[str]] = {
"ALA": ["N", "CA", "C", "O", "CB", "", "", "", "", "", "", "", "", ""],
"ARG": ["N", "CA", "C", "O", "CB", "CG", "CD", "NE", "CZ", "NH1", "NH2", "", "", ""],
"ASN": ["N", "CA", "C", "O", "CB", "CG", "OD1", "ND2", "", "", "", "", "", ""],
"ASP": ["N", "CA", "C", "O", "CB", "CG", "OD1", "OD2", "", "", "", "", "", ""],
"CYS": ["N", "CA", "C", "O", "CB", "SG", "", "", "", "", "", "", "", ""],
"GLN": ["N", "CA", "C", "O", "CB", "CG", "CD", "OE1", "NE2", "", "", "", "", ""],
"GLU": ["N", "CA", "C", "O", "CB", "CG", "CD", "OE1", "OE2", "", "", "", "", ""],
"GLY": ["N", "CA", "C", "O", "", "", "", "", "", "", "", "", "", ""],
"HIS": ["N", "CA", "C", "O", "CB", "CG", "ND1", "CD2", "CE1", "NE2", "", "", "", ""],
"ILE": ["N", "CA", "C", "O", "CB", "CG1", "CG2", "CD1", "", "", "", "", "", ""],
"LEU": ["N", "CA", "C", "O", "CB", "CG", "CD1", "CD2", "", "", "", "", "", ""],
"LYS": ["N", "CA", "C", "O", "CB", "CG", "CD", "CE", "NZ", "", "", "", "", ""],
"MET": ["N", "CA", "C", "O", "CB", "CG", "SD", "CE", "", "", "", "", "", ""],
"PHE": ["N", "CA", "C", "O", "CB", "CG", "CD1", "CD2", "CE1", "CE2", "CZ", "", "", ""],
"PRO": ["N", "CA", "C", "O", "CB", "CG", "CD", "", "", "", "", "", "", ""],
"SER": ["N", "CA", "C", "O", "CB", "OG", "", "", "", "", "", "", "", ""],
"THR": ["N", "CA", "C", "O", "CB", "OG1", "CG2", "", "", "", "", "", "", ""],
"TRP": ["N", "CA", "C", "O", "CB", "CG", "CD1", "CD2", "NE1", "CE2", "CE3", "CZ2", "CZ3", "CH2"],
"TYR": ["N", "CA", "C", "O", "CB", "CG", "CD1", "CD2", "CE1", "CE2", "CZ", "OH", "", ""],
"VAL": ["N", "CA", "C", "O", "CB", "CG1", "CG2", "", "", "", "", "", "", ""],
"UNK": ["", "", "", "", "", "", "", "", "", "", "", "", "", ""],
}
# pylint: enable=line-too-long
# pylint: enable=bad-whitespace
# This is the standard residue order when coding AA type as a number.
# Reproduce it by taking 3-letter AA codes and sorting them alphabetically.
restypes: List[str] = [
"A",
"R",
"N",
"D",
"C",
"Q",
"E",
"G",
"H",
"I",
"L",
"K",
"M",
"F",
"P",
"S",
"T",
"W",
"Y",
"V",
]
restype_order: Dict[str, int] = {restype: i for i, restype in enumerate(restypes)}
restype_num = len(restypes) # := 20.
unk_restype_index = restype_num # Catch-all index for unknown restypes.
restypes_with_x: List[str] = restypes + ["X"]
restype_order_with_x: Dict[str, int] = {restype: i for i, restype in enumerate(restypes_with_x)}
def sequence_to_onehot(sequence: str, mapping: Mapping[str, int], map_unknown_to_x: bool = False) -> np.ndarray:
"""Maps the given sequence into a one-hot encoded matrix.
Args:
sequence: An amino acid sequence.
mapping: A dictionary mapping amino acids to integers.
map_unknown_to_x: If True, any amino acid that is not in the mapping will be
mapped to the unknown amino acid 'X'. If the mapping doesn't contain amino acid 'X', an error will be thrown.
If False, any amino acid not in the mapping will throw an error.
Returns:
A numpy array of shape (seq_len, num_unique_aas) with one-hot encoding of the sequence.
Raises:
ValueError: If the mapping doesn't contain values from 0 to
num_unique_aas - 1 without any gaps.
"""
num_entries = max(mapping.values()) + 1
if sorted(set(mapping.values())) != list(range(num_entries)):
raise ValueError(
"The mapping must have values from 0 to num_unique_aas-1 without any gaps. Got: %s"
% sorted(mapping.values())
)
one_hot_arr = np.zeros((len(sequence), num_entries), dtype=np.int32)
for aa_index, aa_type in enumerate(sequence):
if map_unknown_to_x:
if aa_type.isalpha() and aa_type.isupper():
aa_id = mapping.get(aa_type, mapping["X"])
else:
raise ValueError(f"Invalid character in the sequence: {aa_type}")
else:
aa_id = mapping[aa_type]
one_hot_arr[aa_index, aa_id] = 1
return one_hot_arr
restype_1to3: Dict[str, str] = {
"A": "ALA",
"R": "ARG",
"N": "ASN",
"D": "ASP",
"C": "CYS",
"Q": "GLN",
"E": "GLU",
"G": "GLY",
"H": "HIS",
"I": "ILE",
"L": "LEU",
"K": "LYS",
"M": "MET",
"F": "PHE",
"P": "PRO",
"S": "SER",
"T": "THR",
"W": "TRP",
"Y": "TYR",
"V": "VAL",
}
# NB: restype_3to1 differs from Bio.PDB.protein_letters_3to1 by being a simple
# 1-to-1 mapping of 3 letter names to one letter names. The latter contains
# many more, and less common, three letter names as keys and maps many of these
# to the same one letter name (including 'X' and 'U' which we don't use here).
restype_3to1: Dict[str, str] = {v: k for k, v in restype_1to3.items()}
# Define a restype name for all unknown residues.
unk_restype = "UNK"
resnames: List[str] = [restype_1to3[r] for r in restypes] + [unk_restype]
resname_to_idx: Dict[str, int] = {resname: i for i, resname in enumerate(resnames)}
# The mapping here uses hhblits convention, so that B is mapped to D, J and O
# are mapped to X, U is mapped to C, and Z is mapped to E. Other than that the
# remaining 20 amino acids are kept in alphabetical order.
# There are 2 non-amino acid codes, X (representing any amino acid) and
# "-" representing a missing amino acid in an alignment. The id for these
# codes is put at the end (20 and 21) so that they can easily be ignored if
# desired.
HHBLITS_AA_TO_ID: Dict[str, int] = {
"A": 0,
"B": 2,
"C": 1,
"D": 2,
"E": 3,
"F": 4,
"G": 5,
"H": 6,
"I": 7,
"J": 20,
"K": 8,
"L": 9,
"M": 10,
"N": 11,
"O": 20,
"P": 12,
"Q": 13,
"R": 14,
"S": 15,
"T": 16,
"U": 1,
"V": 17,
"W": 18,
"X": 20,
"Y": 19,
"Z": 3,
"-": 21,
}
# Partial inversion of HHBLITS_AA_TO_ID.
ID_TO_HHBLITS_AA: Dict[int, str] = {
0: "A",
1: "C", # Also U.
2: "D", # Also B.
3: "E", # Also Z.
4: "F",
5: "G",
6: "H",
7: "I",
8: "K",
9: "L",
10: "M",
11: "N",
12: "P",
13: "Q",
14: "R",
15: "S",
16: "T",
17: "V",
18: "W",
19: "Y",
20: "X", # Includes J and O.
21: "-",
}
restypes_with_x_and_gap: List[str] = restypes + ["X", "-"]
MAP_HHBLITS_AATYPE_TO_OUR_AATYPE: Tuple[int, ...] = tuple(
restypes_with_x_and_gap.index(ID_TO_HHBLITS_AA[i]) for i in range(len(restypes_with_x_and_gap))
)
def _make_standard_atom_mask() -> np.ndarray:
"""Returns [num_res_types, num_atom_types] mask array."""
# +1 to account for unknown (all 0s).
mask = np.zeros([restype_num + 1, atom_type_num], dtype=np.int32)
for restype, restype_letter in enumerate(restypes):
restype_name = restype_1to3[restype_letter]
atom_names = residue_atoms[restype_name]
for atom_name in atom_names:
atom_type = atom_order[atom_name]
mask[restype, atom_type] = 1
return mask
STANDARD_ATOM_MASK = _make_standard_atom_mask()
# A one hot representation for the first and second atoms defining the axis
# of rotation for each chi-angle in each residue.
def chi_angle_atom(atom_index: int) -> np.ndarray:
"""Define chi-angle rigid groups via one-hot representations."""
chi_angles_index = {}
one_hots = []
for k, v in chi_angles_atoms.items():
indices = [atom_types.index(s[atom_index]) for s in v]
indices.extend([-1] * (4 - len(indices)))
chi_angles_index[k] = indices
for r in restypes:
res3 = restype_1to3[r]
one_hot = np.eye(atom_type_num)[chi_angles_index[res3]]
one_hots.append(one_hot)
one_hots.append(np.zeros([4, atom_type_num])) # Add zeros for residue `X`.
one_hot = np.stack(one_hots, axis=0)
one_hot = np.transpose(one_hot, [0, 2, 1])
return one_hot
chi_atom_1_one_hot = chi_angle_atom(1)
chi_atom_2_one_hot = chi_angle_atom(2)
# An array like chi_angles_atoms but using indices rather than names.
chi_angles_atom_indices_list: List[List[List[str]]] = [chi_angles_atoms[restype_1to3[r]] for r in restypes]
chi_angles_atom_indices_ours: list = map_structure_with_atom_order(chi_angles_atom_indices_list)
chi_angles_atom_indices = np.array(
[chi_atoms + ([[0, 0, 0, 0]] * (4 - len(chi_atoms))) for chi_atoms in chi_angles_atom_indices_list]
)
# Mapping from (res_name, atom_name) pairs to the atom's chi group index
# and atom index within that group.
chi_groups_for_atom: Dict[Tuple[str, str], List[Tuple[int, int]]] = collections.defaultdict(list)
for res_name, chi_angle_atoms_for_res in chi_angles_atoms.items():
for chi_group_i, chi_group in enumerate(chi_angle_atoms_for_res):
for atom_i, atom in enumerate(chi_group):
chi_groups_for_atom[(res_name, atom)].append((chi_group_i, atom_i))
chi_groups_for_atom = dict(chi_groups_for_atom)
def _make_rigid_transformation_4x4(ex: np.ndarray, ey: np.ndarray, translation: np.ndarray) -> np.ndarray:
"""Create a rigid 4x4 transformation matrix from two axes and transl."""
# Normalize ex.
ex_normalized = ex / np.linalg.norm(ex)
# make ey perpendicular to ex
ey_normalized = ey - np.dot(ey, ex_normalized) * ex_normalized
ey_normalized /= np.linalg.norm(ey_normalized)
# compute ez as cross product
eznorm = np.cross(ex_normalized, ey_normalized)
m = np.stack([ex_normalized, ey_normalized, eznorm, translation]).transpose()
m = np.concatenate([m, [[0.0, 0.0, 0.0, 1.0]]], axis=0)
return m
# create an array with (restype, atomtype) --> rigid_group_idx
# and an array with (restype, atomtype, coord) for the atom positions
# and compute affine transformation matrices (4,4) from one rigid group to the
# previous group
restype_atom37_to_rigid_group = np.zeros([21, 37], dtype=int)
restype_atom37_mask = np.zeros([21, 37], dtype=np.float32)
restype_atom37_rigid_group_positions = np.zeros([21, 37, 3], dtype=np.float32)
restype_atom14_to_rigid_group = np.zeros([21, 14], dtype=int)
restype_atom14_mask = np.zeros([21, 14], dtype=np.float32)
restype_atom14_rigid_group_positions = np.zeros([21, 14, 3], dtype=np.float32)
restype_rigid_group_default_frame = np.zeros([21, 8, 4, 4], dtype=np.float32)
def _make_rigid_group_constants() -> None:
"""Fill the arrays above."""
for restype, restype_letter in enumerate(restypes):
resname = restype_1to3[restype_letter]
for atomname, group_idx, atom_position in rigid_group_atom_positions[resname]:
atomtype = atom_order[atomname]
restype_atom37_to_rigid_group[restype, atomtype] = group_idx
restype_atom37_mask[restype, atomtype] = 1
restype_atom37_rigid_group_positions[restype, atomtype, :] = atom_position
atom14idx = restype_name_to_atom14_names[resname].index(atomname)
restype_atom14_to_rigid_group[restype, atom14idx] = group_idx
restype_atom14_mask[restype, atom14idx] = 1
restype_atom14_rigid_group_positions[restype, atom14idx, :] = atom_position
for restype, restype_letter in enumerate(restypes):
resname = restype_1to3[restype_letter]
atom_positions: Dict[str, np.ndarray] = {
name: np.array(pos) for name, _, pos in rigid_group_atom_positions[resname]
}
# backbone to backbone is the identity transform
restype_rigid_group_default_frame[restype, 0, :, :] = np.eye(4)
# pre-omega-frame to backbone (currently dummy identity matrix)
restype_rigid_group_default_frame[restype, 1, :, :] = np.eye(4)
# phi-frame to backbone
mat = _make_rigid_transformation_4x4(
ex=atom_positions["N"] - atom_positions["CA"],
ey=np.array([1.0, 0.0, 0.0]),
translation=atom_positions["N"],
)
restype_rigid_group_default_frame[restype, 2, :, :] = mat
# psi-frame to backbone
mat = _make_rigid_transformation_4x4(
ex=atom_positions["C"] - atom_positions["CA"],
ey=atom_positions["CA"] - atom_positions["N"],
translation=atom_positions["C"],
)
restype_rigid_group_default_frame[restype, 3, :, :] = mat
# chi1-frame to backbone
if chi_angles_mask[restype][0]:
base_atom_names = chi_angles_atoms[resname][0]
base_atom_positions = [atom_positions[name] for name in base_atom_names]
mat = _make_rigid_transformation_4x4(
ex=base_atom_positions[2] - base_atom_positions[1],
ey=base_atom_positions[0] - base_atom_positions[1],
translation=base_atom_positions[2],
)
restype_rigid_group_default_frame[restype, 4, :, :] = mat
# chi2-frame to chi1-frame
# chi3-frame to chi2-frame
# chi4-frame to chi3-frame
# luckily all rotation axes for the next frame start at (0,0,0) of the
# previous frame
for chi_idx in range(1, 4):
if chi_angles_mask[restype][chi_idx]:
axis_end_atom_name = chi_angles_atoms[resname][chi_idx][2]
axis_end_atom_position = atom_positions[axis_end_atom_name]
mat = _make_rigid_transformation_4x4(
ex=axis_end_atom_position,
ey=np.array([-1.0, 0.0, 0.0]),
translation=axis_end_atom_position,
)
restype_rigid_group_default_frame[restype, 4 + chi_idx, :, :] = mat
_make_rigid_group_constants()
def make_atom14_dists_bounds(
overlap_tolerance: float = 1.5,
bond_length_tolerance_factor: int = 15,
) -> Dict[str, np.ndarray]:
"""compute upper and lower bounds for bonds to assess violations."""
restype_atom14_bond_lower_bound = np.zeros([21, 14, 14], np.float32)
restype_atom14_bond_upper_bound = np.zeros([21, 14, 14], np.float32)
restype_atom14_bond_stddev = np.zeros([21, 14, 14], np.float32)
residue_bonds, residue_virtual_bonds, _ = load_stereo_chemical_props()
for restype, restype_letter in enumerate(restypes):
resname = restype_1to3[restype_letter]
atom_list = restype_name_to_atom14_names[resname]
# create lower and upper bounds for clashes
for atom1_idx, atom1_name in enumerate(atom_list):
if not atom1_name:
continue
atom1_radius = van_der_waals_radius[atom1_name[0]]
for atom2_idx, atom2_name in enumerate(atom_list):
if (not atom2_name) or atom1_idx == atom2_idx:
continue
atom2_radius = van_der_waals_radius[atom2_name[0]]
lower = atom1_radius + atom2_radius - overlap_tolerance
upper = 1e10
restype_atom14_bond_lower_bound[restype, atom1_idx, atom2_idx] = lower
restype_atom14_bond_lower_bound[restype, atom2_idx, atom1_idx] = lower
restype_atom14_bond_upper_bound[restype, atom1_idx, atom2_idx] = upper
restype_atom14_bond_upper_bound[restype, atom2_idx, atom1_idx] = upper
# overwrite lower and upper bounds for bonds and angles
for b in residue_bonds[resname] + residue_virtual_bonds[resname]:
atom1_idx = atom_list.index(b.atom1_name)
atom2_idx = atom_list.index(b.atom2_name)
lower = b.length - bond_length_tolerance_factor * b.stddev
upper = b.length + bond_length_tolerance_factor * b.stddev
restype_atom14_bond_lower_bound[restype, atom1_idx, atom2_idx] = lower
restype_atom14_bond_lower_bound[restype, atom2_idx, atom1_idx] = lower
restype_atom14_bond_upper_bound[restype, atom1_idx, atom2_idx] = upper
restype_atom14_bond_upper_bound[restype, atom2_idx, atom1_idx] = upper
restype_atom14_bond_stddev[restype, atom1_idx, atom2_idx] = b.stddev
restype_atom14_bond_stddev[restype, atom2_idx, atom1_idx] = b.stddev
return {
"lower_bound": restype_atom14_bond_lower_bound, # shape (21,14,14)
"upper_bound": restype_atom14_bond_upper_bound, # shape (21,14,14)
"stddev": restype_atom14_bond_stddev, # shape (21,14,14)
}
restype_atom14_ambiguous_atoms = np.zeros((21, 14), dtype=np.float32)
restype_atom14_ambiguous_atoms_swap_idx: np.ndarray = np.tile(np.arange(14, dtype=int), (21, 1))
def _make_atom14_ambiguity_feats() -> None:
for res, pairs in residue_atom_renaming_swaps.items():
res_idx = restype_order[restype_3to1[res]]
for atom1, atom2 in pairs.items():
atom1_idx = restype_name_to_atom14_names[res].index(atom1)
atom2_idx = restype_name_to_atom14_names[res].index(atom2)
restype_atom14_ambiguous_atoms[res_idx, atom1_idx] = 1
restype_atom14_ambiguous_atoms[res_idx, atom2_idx] = 1
restype_atom14_ambiguous_atoms_swap_idx[res_idx, atom1_idx] = atom2_idx
restype_atom14_ambiguous_atoms_swap_idx[res_idx, atom2_idx] = atom1_idx
_make_atom14_ambiguity_feats()
def aatype_to_str_sequence(aatype: Sequence[int]) -> str:
return "".join([restypes_with_x[aatype[i]] for i in range(len(aatype))])
|
27182812/ChatGLM-LLaMA-chinese-insturct | 14,392 | src/transformers/models/esm/openfold_utils/chunk_utils.py | # Copyright 2021 AlQuraishi Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def _fetch_dims(tree: Union[dict, list, tuple, torch.Tensor]) -> List[Tuple[int, ...]]:
shapes = []
if isinstance(tree, dict):
for v in tree.values():
shapes.extend(_fetch_dims(v))
elif isinstance(tree, (list, tuple)):
for t in tree:
shapes.extend(_fetch_dims(t))
elif isinstance(tree, torch.Tensor):
shapes.append(tree.shape)
else:
raise ValueError("Not supported")
return shapes
@torch.jit.ignore
def _flat_idx_to_idx(flat_idx: int, dims: Tuple[int, ...]) -> Tuple[int, ...]:
idx = []
for d in reversed(dims):
idx.append(flat_idx % d)
flat_idx = flat_idx // d
return tuple(reversed(idx))
@torch.jit.ignore
def _get_minimal_slice_set(
start: Sequence[int],
end: Sequence[int],
dims: Sequence[int],
start_edges: Optional[Sequence[bool]] = None,
end_edges: Optional[Sequence[bool]] = None,
) -> List[Tuple[slice, ...]]:
"""
Produces an ordered sequence of tensor slices that, when used in sequence on a tensor with shape dims, yields
tensors that contain every leaf in the contiguous range [start, end]. Care is taken to yield a short sequence of
slices, and perhaps even the shortest possible (I'm pretty sure it's the latter).
end is INCLUSIVE.
"""
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(l: List[bool]) -> None:
tally = True
for i in range(len(l)):
reversed_idx = -1 * (i + 1)
l[reversed_idx] &= tally
tally = l[reversed_idx]
if start_edges is None:
start_edges = [s == 0 for s in start]
reduce_edge_list(start_edges)
if end_edges is None:
end_edges = [e == (d - 1) for e, d in zip(end, dims)]
reduce_edge_list(end_edges)
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(start) == 0:
return [()]
elif len(start) == 1:
return [(slice(start[0], end[0] + 1),)]
slices: List[Tuple[slice, ...]] = []
path_list: List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(start, end):
if s == e:
path_list.append(slice(s, s + 1))
else:
break
path: Tuple[slice, ...] = tuple(path_list)
divergence_idx = len(path)
# start == end, and we're done
if divergence_idx == len(dims):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
sdi = start[divergence_idx]
return tuple(
path + (slice(sdi, sdi + 1),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :],
[d - 1 for d in dims[divergence_idx + 1 :]],
dims[divergence_idx + 1 :],
start_edges=start_edges[divergence_idx + 1 :],
end_edges=[True for _ in end_edges[divergence_idx + 1 :]],
)
)
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
edi = end[divergence_idx]
return tuple(
path + (slice(edi, edi + 1),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]],
end[divergence_idx + 1 :],
dims[divergence_idx + 1 :],
start_edges=[True for _ in start_edges[divergence_idx + 1 :]],
end_edges=end_edges[divergence_idx + 1 :],
)
)
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx], end[divergence_idx] + 1),))
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx], end[divergence_idx]),))
slices.extend(lower())
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper())
slices.append(path + (slice(start[divergence_idx] + 1, end[divergence_idx] + 1),))
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper())
middle_ground = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1, end[divergence_idx]),))
slices.extend(lower())
return slices
@torch.jit.ignore
def _chunk_slice(t: torch.Tensor, flat_start: int, flat_end: int, no_batch_dims: int) -> torch.Tensor:
"""
Equivalent to
t.reshape((-1,) + t.shape[no_batch_dims:])[flat_start:flat_end]
but without the need for the initial reshape call, which can be memory-intensive in certain situations. The only
reshape operations in this function are performed on sub-tensors that scale with (flat_end - flat_start), the chunk
size.
"""
batch_dims = t.shape[:no_batch_dims]
start_idx = list(_flat_idx_to_idx(flat_start, batch_dims))
# _get_minimal_slice_set is inclusive
end_idx = list(_flat_idx_to_idx(flat_end - 1, batch_dims))
# Get an ordered list of slices to perform
slices = _get_minimal_slice_set(
start_idx,
end_idx,
batch_dims,
)
sliced_tensors = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:]) for s in sliced_tensors])
def chunk_layer(
layer: Callable,
inputs: Dict[str, Any],
chunk_size: int,
no_batch_dims: int,
low_mem: bool = False,
_out: Any = None,
_add_into_out: bool = False,
) -> Any:
"""
Implements the "chunking" procedure described in section 1.11.8.
Layer outputs and inputs are assumed to be simple "pytrees," consisting only of (arbitrarily nested) lists, tuples,
and dicts with torch.Tensor leaves.
Args:
layer:
The layer to be applied chunk-wise
inputs:
A (non-nested) dictionary of keyworded inputs. All leaves must be tensors and must share the same batch
dimensions.
chunk_size:
The number of sub-batches per chunk. If multiple batch dimensions are specified, a "sub-batch" is defined
as a single indexing of all batch dimensions simultaneously (s.t. the number of sub-batches is the product
of the batch dimensions).
no_batch_dims:
How many of the initial dimensions of each input tensor can be considered batch dimensions.
low_mem:
Avoids flattening potentially large input tensors. Unnecessary in most cases, and is ever so slightly
slower than the default setting.
Returns:
The reassembled output of the layer on the inputs.
"""
if not (len(inputs) > 0):
raise ValueError("Must provide at least one input")
initial_dims = [shape[:no_batch_dims] for shape in _fetch_dims(inputs)]
orig_batch_dims = tuple([max(s) for s in zip(*initial_dims)])
def _prep_inputs(t: torch.Tensor) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims]) == no_batch_dims:
t = t.expand(orig_batch_dims + t.shape[no_batch_dims:])
t = t.reshape(-1, *t.shape[no_batch_dims:])
else:
t = t.expand(orig_batch_dims + t.shape[no_batch_dims:])
return t
prepped_inputs: Dict[str, Any] = tensor_tree_map(_prep_inputs, inputs)
prepped_outputs = None
if _out is not None:
prepped_outputs = tensor_tree_map(lambda t: t.view([-1] + list(t.shape[no_batch_dims:])), _out)
flat_batch_dim = 1
for d in orig_batch_dims:
flat_batch_dim *= d
no_chunks = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(t: torch.Tensor) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
i = 0
out = prepped_outputs
for _ in range(no_chunks):
# Chunk the input
if not low_mem:
select_chunk = _select_chunk
else:
select_chunk = partial(
_chunk_slice,
flat_start=i,
flat_end=min(flat_batch_dim, i + chunk_size),
no_batch_dims=len(orig_batch_dims),
)
chunks: Dict[str, Any] = tensor_tree_map(select_chunk, prepped_inputs)
# Run the layer on the chunk
output_chunk = layer(**chunks)
# Allocate space for the output
if out is None:
out = tensor_tree_map(lambda t: t.new_zeros((flat_batch_dim,) + t.shape[1:]), output_chunk)
# Put the chunk in its pre-allocated space
if isinstance(output_chunk, dict):
def assign(d1: dict, d2: dict) -> None:
for k, v in d1.items():
if isinstance(v, dict):
assign(v, d2[k])
else:
if _add_into_out:
v[i : i + chunk_size] += d2[k]
else:
v[i : i + chunk_size] = d2[k]
assign(out, output_chunk)
elif isinstance(output_chunk, tuple):
for x1, x2 in zip(out, output_chunk):
if _add_into_out:
x1[i : i + chunk_size] += x2
else:
x1[i : i + chunk_size] = x2
elif isinstance(output_chunk, torch.Tensor):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
out[i : i + chunk_size] = output_chunk
else:
raise ValueError("Not supported")
i += chunk_size
out = tensor_tree_map(lambda t: t.view(orig_batch_dims + t.shape[1:]), out)
return out
class ChunkSizeTuner:
def __init__(
self,
# Heuristically, runtimes for most of the modules in the network
# plateau earlier than this on all GPUs I've run the model on.
max_chunk_size: int = 512,
):
self.max_chunk_size = max_chunk_size
self.cached_chunk_size: Optional[int] = None
self.cached_arg_data: Optional[tuple] = None
def _determine_favorable_chunk_size(self, fn: Callable, args: tuple, min_chunk_size: int) -> int:
logging.info("Tuning chunk size...")
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
candidates: List[int] = [2**l for l in range(int(math.log(self.max_chunk_size, 2)) + 1)]
candidates = [c for c in candidates if c > min_chunk_size]
candidates = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(chunk_size: int) -> bool:
try:
with torch.no_grad():
fn(*args, chunk_size=chunk_size)
return True
except RuntimeError:
return False
min_viable_chunk_size_index = 0
i = len(candidates) - 1
while i > min_viable_chunk_size_index:
viable = test_chunk_size(candidates[i])
if not viable:
i = (min_viable_chunk_size_index + i) // 2
else:
min_viable_chunk_size_index = i
i = (i + len(candidates) - 1) // 2
return candidates[min_viable_chunk_size_index]
def _compare_arg_caches(self, ac1: Iterable, ac2: Iterable) -> bool:
consistent = True
for a1, a2 in zip(ac1, ac2):
assert type(ac1) == type(ac2)
if isinstance(ac1, (list, tuple)):
consistent &= self._compare_arg_caches(a1, a2)
elif isinstance(ac1, dict):
a1_items = [v for _, v in sorted(a1.items(), key=lambda x: x[0])]
a2_items = [v for _, v in sorted(a2.items(), key=lambda x: x[0])]
consistent &= self._compare_arg_caches(a1_items, a2_items)
else:
consistent &= a1 == a2
return consistent
def tune_chunk_size(
self,
representative_fn: Callable,
args: tuple,
min_chunk_size: int,
) -> int:
consistent = True
arg_data: tuple = tree_map(lambda a: a.shape if isinstance(a, torch.Tensor) else a, args, object)
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data) == len(arg_data)
consistent = self._compare_arg_caches(self.cached_arg_data, arg_data)
else:
# Otherwise, we can reuse the precomputed value
consistent = False
if not consistent:
self.cached_chunk_size = self._determine_favorable_chunk_size(
representative_fn,
args,
min_chunk_size,
)
self.cached_arg_data = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
|
27182812/ChatGLM-LLaMA-chinese-insturct | 41,122 | src/transformers/models/esm/openfold_utils/rigid_utils.py | # Copyright 2021 AlQuraishi Laboratory
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from functools import lru_cache
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple
import numpy as np
import torch
def rot_matmul(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
"""
Performs matrix multiplication of two rotation matrix tensors. Written out by hand to avoid AMP downcasting.
Args:
a: [*, 3, 3] left multiplicand
b: [*, 3, 3] right multiplicand
Returns:
The product ab
"""
def row_mul(i: int) -> torch.Tensor:
return torch.stack(
[
a[..., i, 0] * b[..., 0, 0] + a[..., i, 1] * b[..., 1, 0] + a[..., i, 2] * b[..., 2, 0],
a[..., i, 0] * b[..., 0, 1] + a[..., i, 1] * b[..., 1, 1] + a[..., i, 2] * b[..., 2, 1],
a[..., i, 0] * b[..., 0, 2] + a[..., i, 1] * b[..., 1, 2] + a[..., i, 2] * b[..., 2, 2],
],
dim=-1,
)
return torch.stack(
[
row_mul(0),
row_mul(1),
row_mul(2),
],
dim=-2,
)
def rot_vec_mul(r: torch.Tensor, t: torch.Tensor) -> torch.Tensor:
"""
Applies a rotation to a vector. Written out by hand to avoid transfer to avoid AMP downcasting.
Args:
r: [*, 3, 3] rotation matrices
t: [*, 3] coordinate tensors
Returns:
[*, 3] rotated coordinates
"""
x, y, z = torch.unbind(t, dim=-1)
return torch.stack(
[
r[..., 0, 0] * x + r[..., 0, 1] * y + r[..., 0, 2] * z,
r[..., 1, 0] * x + r[..., 1, 1] * y + r[..., 1, 2] * z,
r[..., 2, 0] * x + r[..., 2, 1] * y + r[..., 2, 2] * z,
],
dim=-1,
)
@lru_cache(maxsize=None)
def identity_rot_mats(
batch_dims: Tuple[int, ...],
dtype: Optional[torch.dtype] = None,
device: Optional[torch.device] = None,
requires_grad: bool = True,
) -> torch.Tensor:
rots = torch.eye(3, dtype=dtype, device=device, requires_grad=requires_grad)
rots = rots.view(*((1,) * len(batch_dims)), 3, 3)
rots = rots.expand(*batch_dims, -1, -1)
rots = rots.contiguous()
return rots
@lru_cache(maxsize=None)
def identity_trans(
batch_dims: Tuple[int, ...],
dtype: Optional[torch.dtype] = None,
device: Optional[torch.device] = None,
requires_grad: bool = True,
) -> torch.Tensor:
trans = torch.zeros((*batch_dims, 3), dtype=dtype, device=device, requires_grad=requires_grad)
return trans
@lru_cache(maxsize=None)
def identity_quats(
batch_dims: Tuple[int, ...],
dtype: Optional[torch.dtype] = None,
device: Optional[torch.device] = None,
requires_grad: bool = True,
) -> torch.Tensor:
quat = torch.zeros((*batch_dims, 4), dtype=dtype, device=device, requires_grad=requires_grad)
with torch.no_grad():
quat[..., 0] = 1
return quat
_quat_elements: List[str] = ["a", "b", "c", "d"]
_qtr_keys: List[str] = [l1 + l2 for l1 in _quat_elements for l2 in _quat_elements]
_qtr_ind_dict: Dict[str, int] = {key: ind for ind, key in enumerate(_qtr_keys)}
def _to_mat(pairs: List[Tuple[str, int]]) -> np.ndarray:
mat = np.zeros((4, 4))
for key, value in pairs:
ind = _qtr_ind_dict[key]
mat[ind // 4][ind % 4] = value
return mat
_QTR_MAT = np.zeros((4, 4, 3, 3))
_QTR_MAT[..., 0, 0] = _to_mat([("aa", 1), ("bb", 1), ("cc", -1), ("dd", -1)])
_QTR_MAT[..., 0, 1] = _to_mat([("bc", 2), ("ad", -2)])
_QTR_MAT[..., 0, 2] = _to_mat([("bd", 2), ("ac", 2)])
_QTR_MAT[..., 1, 0] = _to_mat([("bc", 2), ("ad", 2)])
_QTR_MAT[..., 1, 1] = _to_mat([("aa", 1), ("bb", -1), ("cc", 1), ("dd", -1)])
_QTR_MAT[..., 1, 2] = _to_mat([("cd", 2), ("ab", -2)])
_QTR_MAT[..., 2, 0] = _to_mat([("bd", 2), ("ac", -2)])
_QTR_MAT[..., 2, 1] = _to_mat([("cd", 2), ("ab", 2)])
_QTR_MAT[..., 2, 2] = _to_mat([("aa", 1), ("bb", -1), ("cc", -1), ("dd", 1)])
def quat_to_rot(quat: torch.Tensor) -> torch.Tensor:
"""
Converts a quaternion to a rotation matrix.
Args:
quat: [*, 4] quaternions
Returns:
[*, 3, 3] rotation matrices
"""
# [*, 4, 4]
quat = quat[..., None] * quat[..., None, :]
# [4, 4, 3, 3]
mat = _get_quat("_QTR_MAT", dtype=quat.dtype, device=quat.device)
# [*, 4, 4, 3, 3]
shaped_qtr_mat = mat.view((1,) * len(quat.shape[:-2]) + mat.shape)
quat = quat[..., None, None] * shaped_qtr_mat
# [*, 3, 3]
return torch.sum(quat, dim=(-3, -4))
def rot_to_quat(rot: torch.Tensor) -> torch.Tensor:
if rot.shape[-2:] != (3, 3):
raise ValueError("Input rotation is incorrectly shaped")
[[xx, xy, xz], [yx, yy, yz], [zx, zy, zz]] = [[rot[..., i, j] for j in range(3)] for i in range(3)]
k = [
[
xx + yy + zz,
zy - yz,
xz - zx,
yx - xy,
],
[
zy - yz,
xx - yy - zz,
xy + yx,
xz + zx,
],
[
xz - zx,
xy + yx,
yy - xx - zz,
yz + zy,
],
[
yx - xy,
xz + zx,
yz + zy,
zz - xx - yy,
],
]
_, vectors = torch.linalg.eigh((1.0 / 3.0) * torch.stack([torch.stack(t, dim=-1) for t in k], dim=-2))
return vectors[..., -1]
_QUAT_MULTIPLY = np.zeros((4, 4, 4))
_QUAT_MULTIPLY[:, :, 0] = [[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, -1]]
_QUAT_MULTIPLY[:, :, 1] = [[0, 1, 0, 0], [1, 0, 0, 0], [0, 0, 0, 1], [0, 0, -1, 0]]
_QUAT_MULTIPLY[:, :, 2] = [[0, 0, 1, 0], [0, 0, 0, -1], [1, 0, 0, 0], [0, 1, 0, 0]]
_QUAT_MULTIPLY[:, :, 3] = [[0, 0, 0, 1], [0, 0, 1, 0], [0, -1, 0, 0], [1, 0, 0, 0]]
_QUAT_MULTIPLY_BY_VEC = _QUAT_MULTIPLY[:, 1:, :]
_CACHED_QUATS: Dict[str, np.ndarray] = {
"_QTR_MAT": _QTR_MAT,
"_QUAT_MULTIPLY": _QUAT_MULTIPLY,
"_QUAT_MULTIPLY_BY_VEC": _QUAT_MULTIPLY_BY_VEC,
}
@lru_cache(maxsize=None)
def _get_quat(quat_key: str, dtype: torch.dtype, device: torch.device) -> torch.Tensor:
return torch.tensor(_CACHED_QUATS[quat_key], dtype=dtype, device=device)
def quat_multiply(quat1: torch.Tensor, quat2: torch.Tensor) -> torch.Tensor:
"""Multiply a quaternion by another quaternion."""
mat = _get_quat("_QUAT_MULTIPLY", dtype=quat1.dtype, device=quat1.device)
reshaped_mat = mat.view((1,) * len(quat1.shape[:-1]) + mat.shape)
return torch.sum(reshaped_mat * quat1[..., :, None, None] * quat2[..., None, :, None], dim=(-3, -2))
def quat_multiply_by_vec(quat: torch.Tensor, vec: torch.Tensor) -> torch.Tensor:
"""Multiply a quaternion by a pure-vector quaternion."""
mat = _get_quat("_QUAT_MULTIPLY_BY_VEC", dtype=quat.dtype, device=quat.device)
reshaped_mat = mat.view((1,) * len(quat.shape[:-1]) + mat.shape)
return torch.sum(reshaped_mat * quat[..., :, None, None] * vec[..., None, :, None], dim=(-3, -2))
def invert_rot_mat(rot_mat: torch.Tensor) -> torch.Tensor:
return rot_mat.transpose(-1, -2)
def invert_quat(quat: torch.Tensor) -> torch.Tensor:
quat_prime = quat.clone()
quat_prime[..., 1:] *= -1
inv = quat_prime / torch.sum(quat**2, dim=-1, keepdim=True)
return inv
class Rotation:
"""
A 3D rotation. Depending on how the object is initialized, the rotation is represented by either a rotation matrix
or a quaternion, though both formats are made available by helper functions. To simplify gradient computation, the
underlying format of the rotation cannot be changed in-place. Like Rigid, the class is designed to mimic the
behavior of a torch Tensor, almost as if each Rotation object were a tensor of rotations, in one format or another.
"""
def __init__(
self,
rot_mats: Optional[torch.Tensor] = None,
quats: Optional[torch.Tensor] = None,
normalize_quats: bool = True,
):
"""
Args:
rot_mats:
A [*, 3, 3] rotation matrix tensor. Mutually exclusive with quats
quats:
A [*, 4] quaternion. Mutually exclusive with rot_mats. If normalize_quats is not True, must be a unit
quaternion
normalize_quats:
If quats is specified, whether to normalize quats
"""
if (rot_mats is None and quats is None) or (rot_mats is not None and quats is not None):
raise ValueError("Exactly one input argument must be specified")
if (rot_mats is not None and rot_mats.shape[-2:] != (3, 3)) or (quats is not None and quats.shape[-1] != 4):
raise ValueError("Incorrectly shaped rotation matrix or quaternion")
# Force full-precision
if quats is not None:
quats = quats.to(dtype=torch.float32)
if rot_mats is not None:
rot_mats = rot_mats.to(dtype=torch.float32)
if quats is not None and normalize_quats:
quats = quats / torch.linalg.norm(quats, dim=-1, keepdim=True)
self._rot_mats = rot_mats
self._quats = quats
@staticmethod
def identity(
shape,
dtype: Optional[torch.dtype] = None,
device: Optional[torch.device] = None,
requires_grad: bool = True,
fmt: str = "quat",
) -> Rotation:
"""
Returns an identity Rotation.
Args:
shape:
The "shape" of the resulting Rotation object. See documentation for the shape property
dtype:
The torch dtype for the rotation
device:
The torch device for the new rotation
requires_grad:
Whether the underlying tensors in the new rotation object should require gradient computation
fmt:
One of "quat" or "rot_mat". Determines the underlying format of the new object's rotation
Returns:
A new identity rotation
"""
if fmt == "rot_mat":
rot_mats = identity_rot_mats(
shape,
dtype,
device,
requires_grad,
)
return Rotation(rot_mats=rot_mats, quats=None)
elif fmt == "quat":
quats = identity_quats(shape, dtype, device, requires_grad)
return Rotation(rot_mats=None, quats=quats, normalize_quats=False)
else:
raise ValueError(f"Invalid format: f{fmt}")
# Magic methods
def __getitem__(self, index: Any) -> Rotation:
"""
Allows torch-style indexing over the virtual shape of the rotation object. See documentation for the shape
property.
Args:
index:
A torch index. E.g. (1, 3, 2), or (slice(None,))
Returns:
The indexed rotation
"""
if type(index) != tuple:
index = (index,)
if self._rot_mats is not None:
rot_mats = self._rot_mats[index + (slice(None), slice(None))]
return Rotation(rot_mats=rot_mats)
elif self._quats is not None:
quats = self._quats[index + (slice(None),)]
return Rotation(quats=quats, normalize_quats=False)
else:
raise ValueError("Both rotations are None")
def __mul__(self, right: torch.Tensor) -> Rotation:
"""
Pointwise left multiplication of the rotation with a tensor. Can be used to e.g. mask the Rotation.
Args:
right:
The tensor multiplicand
Returns:
The product
"""
if not (isinstance(right, torch.Tensor)):
raise TypeError("The other multiplicand must be a Tensor")
if self._rot_mats is not None:
rot_mats = self._rot_mats * right[..., None, None]
return Rotation(rot_mats=rot_mats, quats=None)
elif self._quats is not None:
quats = self._quats * right[..., None]
return Rotation(rot_mats=None, quats=quats, normalize_quats=False)
else:
raise ValueError("Both rotations are None")
def __rmul__(self, left: torch.Tensor) -> Rotation:
"""
Reverse pointwise multiplication of the rotation with a tensor.
Args:
left:
The left multiplicand
Returns:
The product
"""
return self.__mul__(left)
# Properties
@property
def shape(self) -> torch.Size:
"""
Returns the virtual shape of the rotation object. This shape is defined as the batch dimensions of the
underlying rotation matrix or quaternion. If the Rotation was initialized with a [10, 3, 3] rotation matrix
tensor, for example, the resulting shape would be [10].
Returns:
The virtual shape of the rotation object
"""
if self._rot_mats is not None:
return self._rot_mats.shape[:-2]
elif self._quats is not None:
return self._quats.shape[:-1]
else:
raise ValueError("Both rotations are None")
@property
def dtype(self) -> torch.dtype:
"""
Returns the dtype of the underlying rotation.
Returns:
The dtype of the underlying rotation
"""
if self._rot_mats is not None:
return self._rot_mats.dtype
elif self._quats is not None:
return self._quats.dtype
else:
raise ValueError("Both rotations are None")
@property
def device(self) -> torch.device:
"""
The device of the underlying rotation
Returns:
The device of the underlying rotation
"""
if self._rot_mats is not None:
return self._rot_mats.device
elif self._quats is not None:
return self._quats.device
else:
raise ValueError("Both rotations are None")
@property
def requires_grad(self) -> bool:
"""
Returns the requires_grad property of the underlying rotation
Returns:
The requires_grad property of the underlying tensor
"""
if self._rot_mats is not None:
return self._rot_mats.requires_grad
elif self._quats is not None:
return self._quats.requires_grad
else:
raise ValueError("Both rotations are None")
def get_rot_mats(self) -> torch.Tensor:
"""
Returns the underlying rotation as a rotation matrix tensor.
Returns:
The rotation as a rotation matrix tensor
"""
if self._rot_mats is not None:
return self._rot_mats
elif self._quats is not None:
return quat_to_rot(self._quats)
else:
raise ValueError("Both rotations are None")
def get_quats(self) -> torch.Tensor:
"""
Returns the underlying rotation as a quaternion tensor.
Depending on whether the Rotation was initialized with a quaternion, this function may call torch.linalg.eigh.
Returns:
The rotation as a quaternion tensor.
"""
if self._rot_mats is not None:
return rot_to_quat(self._rot_mats)
elif self._quats is not None:
return self._quats
else:
raise ValueError("Both rotations are None")
def get_cur_rot(self) -> torch.Tensor:
"""
Return the underlying rotation in its current form
Returns:
The stored rotation
"""
if self._rot_mats is not None:
return self._rot_mats
elif self._quats is not None:
return self._quats
else:
raise ValueError("Both rotations are None")
# Rotation functions
def compose_q_update_vec(self, q_update_vec: torch.Tensor, normalize_quats: bool = True) -> Rotation:
"""
Returns a new quaternion Rotation after updating the current object's underlying rotation with a quaternion
update, formatted as a [*, 3] tensor whose final three columns represent x, y, z such that (1, x, y, z) is the
desired (not necessarily unit) quaternion update.
Args:
q_update_vec:
A [*, 3] quaternion update tensor
normalize_quats:
Whether to normalize the output quaternion
Returns:
An updated Rotation
"""
quats = self.get_quats()
new_quats = quats + quat_multiply_by_vec(quats, q_update_vec)
return Rotation(
rot_mats=None,
quats=new_quats,
normalize_quats=normalize_quats,
)
def compose_r(self, r: Rotation) -> Rotation:
"""
Compose the rotation matrices of the current Rotation object with those of another.
Args:
r:
An update rotation object
Returns:
An updated rotation object
"""
r1 = self.get_rot_mats()
r2 = r.get_rot_mats()
new_rot_mats = rot_matmul(r1, r2)
return Rotation(rot_mats=new_rot_mats, quats=None)
def compose_q(self, r: Rotation, normalize_quats: bool = True) -> Rotation:
"""
Compose the quaternions of the current Rotation object with those of another.
Depending on whether either Rotation was initialized with quaternions, this function may call
torch.linalg.eigh.
Args:
r:
An update rotation object
Returns:
An updated rotation object
"""
q1 = self.get_quats()
q2 = r.get_quats()
new_quats = quat_multiply(q1, q2)
return Rotation(rot_mats=None, quats=new_quats, normalize_quats=normalize_quats)
def apply(self, pts: torch.Tensor) -> torch.Tensor:
"""
Apply the current Rotation as a rotation matrix to a set of 3D coordinates.
Args:
pts:
A [*, 3] set of points
Returns:
[*, 3] rotated points
"""
rot_mats = self.get_rot_mats()
return rot_vec_mul(rot_mats, pts)
def invert_apply(self, pts: torch.Tensor) -> torch.Tensor:
"""
The inverse of the apply() method.
Args:
pts:
A [*, 3] set of points
Returns:
[*, 3] inverse-rotated points
"""
rot_mats = self.get_rot_mats()
inv_rot_mats = invert_rot_mat(rot_mats)
return rot_vec_mul(inv_rot_mats, pts)
def invert(self) -> Rotation:
"""
Returns the inverse of the current Rotation.
Returns:
The inverse of the current Rotation
"""
if self._rot_mats is not None:
return Rotation(rot_mats=invert_rot_mat(self._rot_mats), quats=None)
elif self._quats is not None:
return Rotation(
rot_mats=None,
quats=invert_quat(self._quats),
normalize_quats=False,
)
else:
raise ValueError("Both rotations are None")
# "Tensor" stuff
def unsqueeze(self, dim: int) -> Rotation:
"""
Analogous to torch.unsqueeze. The dimension is relative to the shape of the Rotation object.
Args:
dim: A positive or negative dimension index.
Returns:
The unsqueezed Rotation.
"""
if dim >= len(self.shape):
raise ValueError("Invalid dimension")
if self._rot_mats is not None:
rot_mats = self._rot_mats.unsqueeze(dim if dim >= 0 else dim - 2)
return Rotation(rot_mats=rot_mats, quats=None)
elif self._quats is not None:
quats = self._quats.unsqueeze(dim if dim >= 0 else dim - 1)
return Rotation(rot_mats=None, quats=quats, normalize_quats=False)
else:
raise ValueError("Both rotations are None")
@staticmethod
def cat(rs: Sequence[Rotation], dim: int) -> Rotation:
"""
Concatenates rotations along one of the batch dimensions. Analogous to torch.cat().
Note that the output of this operation is always a rotation matrix, regardless of the format of input
rotations.
Args:
rs:
A list of rotation objects
dim:
The dimension along which the rotations should be concatenated
Returns:
A concatenated Rotation object in rotation matrix format
"""
rot_mats = torch.cat(
[r.get_rot_mats() for r in rs],
dim=dim if dim >= 0 else dim - 2,
)
return Rotation(rot_mats=rot_mats, quats=None)
def map_tensor_fn(self, fn: Callable[[torch.Tensor], torch.Tensor]) -> Rotation:
"""
Apply a Tensor -> Tensor function to underlying rotation tensors, mapping over the rotation dimension(s). Can
be used e.g. to sum out a one-hot batch dimension.
Args:
fn:
A Tensor -> Tensor function to be mapped over the Rotation
Returns:
The transformed Rotation object
"""
if self._rot_mats is not None:
rot_mats = self._rot_mats.view(self._rot_mats.shape[:-2] + (9,))
rot_mats = torch.stack(list(map(fn, torch.unbind(rot_mats, dim=-1))), dim=-1)
rot_mats = rot_mats.view(rot_mats.shape[:-1] + (3, 3))
return Rotation(rot_mats=rot_mats, quats=None)
elif self._quats is not None:
quats = torch.stack(list(map(fn, torch.unbind(self._quats, dim=-1))), dim=-1)
return Rotation(rot_mats=None, quats=quats, normalize_quats=False)
else:
raise ValueError("Both rotations are None")
def cuda(self) -> Rotation:
"""
Analogous to the cuda() method of torch Tensors
Returns:
A copy of the Rotation in CUDA memory
"""
if self._rot_mats is not None:
return Rotation(rot_mats=self._rot_mats.cuda(), quats=None)
elif self._quats is not None:
return Rotation(rot_mats=None, quats=self._quats.cuda(), normalize_quats=False)
else:
raise ValueError("Both rotations are None")
def to(self, device: Optional[torch.device], dtype: Optional[torch.dtype]) -> Rotation:
"""
Analogous to the to() method of torch Tensors
Args:
device:
A torch device
dtype:
A torch dtype
Returns:
A copy of the Rotation using the new device and dtype
"""
if self._rot_mats is not None:
return Rotation(
rot_mats=self._rot_mats.to(device=device, dtype=dtype),
quats=None,
)
elif self._quats is not None:
return Rotation(
rot_mats=None,
quats=self._quats.to(device=device, dtype=dtype),
normalize_quats=False,
)
else:
raise ValueError("Both rotations are None")
def detach(self) -> Rotation:
"""
Returns a copy of the Rotation whose underlying Tensor has been detached from its torch graph.
Returns:
A copy of the Rotation whose underlying Tensor has been detached from its torch graph
"""
if self._rot_mats is not None:
return Rotation(rot_mats=self._rot_mats.detach(), quats=None)
elif self._quats is not None:
return Rotation(
rot_mats=None,
quats=self._quats.detach(),
normalize_quats=False,
)
else:
raise ValueError("Both rotations are None")
class Rigid:
"""
A class representing a rigid transformation. Little more than a wrapper around two objects: a Rotation object and a
[*, 3] translation Designed to behave approximately like a single torch tensor with the shape of the shared batch
dimensions of its component parts.
"""
def __init__(self, rots: Optional[Rotation], trans: Optional[torch.Tensor]):
"""
Args:
rots: A [*, 3, 3] rotation tensor
trans: A corresponding [*, 3] translation tensor
"""
# (we need device, dtype, etc. from at least one input)
batch_dims, dtype, device, requires_grad = None, None, None, None
if trans is not None:
batch_dims = trans.shape[:-1]
dtype = trans.dtype
device = trans.device
requires_grad = trans.requires_grad
elif rots is not None:
batch_dims = rots.shape
dtype = rots.dtype
device = rots.device
requires_grad = rots.requires_grad
else:
raise ValueError("At least one input argument must be specified")
if rots is None:
rots = Rotation.identity(
batch_dims,
dtype,
device,
requires_grad,
)
elif trans is None:
trans = identity_trans(
batch_dims,
dtype,
device,
requires_grad,
)
assert rots is not None
assert trans is not None
if (rots.shape != trans.shape[:-1]) or (rots.device != trans.device):
raise ValueError("Rots and trans incompatible")
# Force full precision. Happens to the rotations automatically.
trans = trans.to(dtype=torch.float32)
self._rots = rots
self._trans = trans
@staticmethod
def identity(
shape: Tuple[int, ...],
dtype: Optional[torch.dtype] = None,
device: Optional[torch.device] = None,
requires_grad: bool = True,
fmt: str = "quat",
) -> Rigid:
"""
Constructs an identity transformation.
Args:
shape:
The desired shape
dtype:
The dtype of both internal tensors
device:
The device of both internal tensors
requires_grad:
Whether grad should be enabled for the internal tensors
Returns:
The identity transformation
"""
return Rigid(
Rotation.identity(shape, dtype, device, requires_grad, fmt=fmt),
identity_trans(shape, dtype, device, requires_grad),
)
def __getitem__(self, index: Any) -> Rigid:
"""
Indexes the affine transformation with PyTorch-style indices. The index is applied to the shared dimensions of
both the rotation and the translation.
E.g.::
r = Rotation(rot_mats=torch.rand(10, 10, 3, 3), quats=None) t = Rigid(r, torch.rand(10, 10, 3)) indexed =
t[3, 4:6] assert(indexed.shape == (2,)) assert(indexed.get_rots().shape == (2,))
assert(indexed.get_trans().shape == (2, 3))
Args:
index: A standard torch tensor index. E.g. 8, (10, None, 3),
or (3, slice(0, 1, None))
Returns:
The indexed tensor
"""
if type(index) != tuple:
index = (index,)
return Rigid(
self._rots[index],
self._trans[index + (slice(None),)],
)
def __mul__(self, right: torch.Tensor) -> Rigid:
"""
Pointwise left multiplication of the transformation with a tensor. Can be used to e.g. mask the Rigid.
Args:
right:
The tensor multiplicand
Returns:
The product
"""
if not (isinstance(right, torch.Tensor)):
raise TypeError("The other multiplicand must be a Tensor")
new_rots = self._rots * right
new_trans = self._trans * right[..., None]
return Rigid(new_rots, new_trans)
def __rmul__(self, left: torch.Tensor) -> Rigid:
"""
Reverse pointwise multiplication of the transformation with a tensor.
Args:
left:
The left multiplicand
Returns:
The product
"""
return self.__mul__(left)
@property
def shape(self) -> torch.Size:
"""
Returns the shape of the shared dimensions of the rotation and the translation.
Returns:
The shape of the transformation
"""
return self._trans.shape[:-1]
@property
def device(self) -> torch.device:
"""
Returns the device on which the Rigid's tensors are located.
Returns:
The device on which the Rigid's tensors are located
"""
return self._trans.device
def get_rots(self) -> Rotation:
"""
Getter for the rotation.
Returns:
The rotation object
"""
return self._rots
def get_trans(self) -> torch.Tensor:
"""
Getter for the translation.
Returns:
The stored translation
"""
return self._trans
def compose_q_update_vec(self, q_update_vec: torch.Tensor) -> Rigid:
"""
Composes the transformation with a quaternion update vector of shape [*, 6], where the final 6 columns
represent the x, y, and z values of a quaternion of form (1, x, y, z) followed by a 3D translation.
Args:
q_vec: The quaternion update vector.
Returns:
The composed transformation.
"""
q_vec, t_vec = q_update_vec[..., :3], q_update_vec[..., 3:]
new_rots = self._rots.compose_q_update_vec(q_vec)
trans_update = self._rots.apply(t_vec)
new_translation = self._trans + trans_update
return Rigid(new_rots, new_translation)
def compose(self, r: Rigid) -> Rigid:
"""
Composes the current rigid object with another.
Args:
r:
Another Rigid object
Returns:
The composition of the two transformations
"""
new_rot = self._rots.compose_r(r._rots)
new_trans = self._rots.apply(r._trans) + self._trans
return Rigid(new_rot, new_trans)
def apply(self, pts: torch.Tensor) -> torch.Tensor:
"""
Applies the transformation to a coordinate tensor.
Args:
pts: A [*, 3] coordinate tensor.
Returns:
The transformed points.
"""
rotated = self._rots.apply(pts)
return rotated + self._trans
def invert_apply(self, pts: torch.Tensor) -> torch.Tensor:
"""
Applies the inverse of the transformation to a coordinate tensor.
Args:
pts: A [*, 3] coordinate tensor
Returns:
The transformed points.
"""
pts = pts - self._trans
return self._rots.invert_apply(pts)
def invert(self) -> Rigid:
"""
Inverts the transformation.
Returns:
The inverse transformation.
"""
rot_inv = self._rots.invert()
trn_inv = rot_inv.apply(self._trans)
return Rigid(rot_inv, -1 * trn_inv)
def map_tensor_fn(self, fn: Callable[[torch.Tensor], torch.Tensor]) -> Rigid:
"""
Apply a Tensor -> Tensor function to underlying translation and rotation tensors, mapping over the
translation/rotation dimensions respectively.
Args:
fn:
A Tensor -> Tensor function to be mapped over the Rigid
Returns:
The transformed Rigid object
"""
new_rots = self._rots.map_tensor_fn(fn)
new_trans = torch.stack(list(map(fn, torch.unbind(self._trans, dim=-1))), dim=-1)
return Rigid(new_rots, new_trans)
def to_tensor_4x4(self) -> torch.Tensor:
"""
Converts a transformation to a homogenous transformation tensor.
Returns:
A [*, 4, 4] homogenous transformation tensor
"""
tensor = self._trans.new_zeros((*self.shape, 4, 4))
tensor[..., :3, :3] = self._rots.get_rot_mats()
tensor[..., :3, 3] = self._trans
tensor[..., 3, 3] = 1
return tensor
@staticmethod
def from_tensor_4x4(t: torch.Tensor) -> Rigid:
"""
Constructs a transformation from a homogenous transformation tensor.
Args:
t: [*, 4, 4] homogenous transformation tensor
Returns:
T object with shape [*]
"""
if t.shape[-2:] != (4, 4):
raise ValueError("Incorrectly shaped input tensor")
rots = Rotation(rot_mats=t[..., :3, :3], quats=None)
trans = t[..., :3, 3]
return Rigid(rots, trans)
def to_tensor_7(self) -> torch.Tensor:
"""
Converts a transformation to a tensor with 7 final columns, four for the quaternion followed by three for the
translation.
Returns:
A [*, 7] tensor representation of the transformation
"""
tensor = self._trans.new_zeros((*self.shape, 7))
tensor[..., :4] = self._rots.get_quats()
tensor[..., 4:] = self._trans
return tensor
@staticmethod
def from_tensor_7(t: torch.Tensor, normalize_quats: bool = False) -> Rigid:
if t.shape[-1] != 7:
raise ValueError("Incorrectly shaped input tensor")
quats, trans = t[..., :4], t[..., 4:]
rots = Rotation(rot_mats=None, quats=quats, normalize_quats=normalize_quats)
return Rigid(rots, trans)
@staticmethod
def from_3_points(
p_neg_x_axis: torch.Tensor, origin: torch.Tensor, p_xy_plane: torch.Tensor, eps: float = 1e-8
) -> Rigid:
"""
Implements algorithm 21. Constructs transformations from sets of 3 points using the Gram-Schmidt algorithm.
Args:
p_neg_x_axis: [*, 3] coordinates
origin: [*, 3] coordinates used as frame origins
p_xy_plane: [*, 3] coordinates
eps: Small epsilon value
Returns:
A transformation object of shape [*]
"""
p_neg_x_axis_unbound = torch.unbind(p_neg_x_axis, dim=-1)
origin_unbound = torch.unbind(origin, dim=-1)
p_xy_plane_unbound = torch.unbind(p_xy_plane, dim=-1)
e0 = [c1 - c2 for c1, c2 in zip(origin_unbound, p_neg_x_axis_unbound)]
e1 = [c1 - c2 for c1, c2 in zip(p_xy_plane_unbound, origin_unbound)]
denom = torch.sqrt(sum(c * c for c in e0) + eps * torch.ones_like(e0[0]))
e0 = [c / denom for c in e0]
dot = sum((c1 * c2 for c1, c2 in zip(e0, e1)))
e1 = [c2 - c1 * dot for c1, c2 in zip(e0, e1)]
denom = torch.sqrt(sum((c * c for c in e1)) + eps * torch.ones_like(e1[0]))
e1 = [c / denom for c in e1]
e2 = [
e0[1] * e1[2] - e0[2] * e1[1],
e0[2] * e1[0] - e0[0] * e1[2],
e0[0] * e1[1] - e0[1] * e1[0],
]
rots = torch.stack([c for tup in zip(e0, e1, e2) for c in tup], dim=-1)
rots = rots.reshape(rots.shape[:-1] + (3, 3))
rot_obj = Rotation(rot_mats=rots, quats=None)
return Rigid(rot_obj, torch.stack(origin_unbound, dim=-1))
def unsqueeze(self, dim: int) -> Rigid:
"""
Analogous to torch.unsqueeze. The dimension is relative to the shared dimensions of the rotation/translation.
Args:
dim: A positive or negative dimension index.
Returns:
The unsqueezed transformation.
"""
if dim >= len(self.shape):
raise ValueError("Invalid dimension")
rots = self._rots.unsqueeze(dim)
trans = self._trans.unsqueeze(dim if dim >= 0 else dim - 1)
return Rigid(rots, trans)
@staticmethod
def cat(ts: Sequence[Rigid], dim: int) -> Rigid:
"""
Concatenates transformations along a new dimension.
Args:
ts:
A list of T objects
dim:
The dimension along which the transformations should be concatenated
Returns:
A concatenated transformation object
"""
rots = Rotation.cat([t._rots for t in ts], dim)
trans = torch.cat([t._trans for t in ts], dim=dim if dim >= 0 else dim - 1)
return Rigid(rots, trans)
def apply_rot_fn(self, fn: Callable[[Rotation], Rotation]) -> Rigid:
"""
Applies a Rotation -> Rotation function to the stored rotation object.
Args:
fn: A function of type Rotation -> Rotation
Returns:
A transformation object with a transformed rotation.
"""
return Rigid(fn(self._rots), self._trans)
def apply_trans_fn(self, fn: Callable[[torch.Tensor], torch.Tensor]) -> Rigid:
"""
Applies a Tensor -> Tensor function to the stored translation.
Args:
fn:
A function of type Tensor -> Tensor to be applied to the translation
Returns:
A transformation object with a transformed translation.
"""
return Rigid(self._rots, fn(self._trans))
def scale_translation(self, trans_scale_factor: float) -> Rigid:
"""
Scales the translation by a constant factor.
Args:
trans_scale_factor:
The constant factor
Returns:
A transformation object with a scaled translation.
"""
return self.apply_trans_fn(lambda t: t * trans_scale_factor)
def stop_rot_gradient(self) -> Rigid:
"""
Detaches the underlying rotation object
Returns:
A transformation object with detached rotations
"""
return self.apply_rot_fn(lambda r: r.detach())
@staticmethod
def make_transform_from_reference(
n_xyz: torch.Tensor, ca_xyz: torch.Tensor, c_xyz: torch.Tensor, eps: float = 1e-20
) -> Rigid:
"""
Returns a transformation object from reference coordinates.
Note that this method does not take care of symmetries. If you provide the atom positions in the non-standard
way, the N atom will end up not at [-0.527250, 1.359329, 0.0] but instead at [-0.527250, -1.359329, 0.0]. You
need to take care of such cases in your code.
Args:
n_xyz: A [*, 3] tensor of nitrogen xyz coordinates.
ca_xyz: A [*, 3] tensor of carbon alpha xyz coordinates.
c_xyz: A [*, 3] tensor of carbon xyz coordinates.
Returns:
A transformation object. After applying the translation and rotation to the reference backbone, the
coordinates will approximately equal to the input coordinates.
"""
translation = -1 * ca_xyz
n_xyz = n_xyz + translation
c_xyz = c_xyz + translation
c_x, c_y, c_z = [c_xyz[..., i] for i in range(3)]
norm = torch.sqrt(eps + c_x**2 + c_y**2)
sin_c1 = -c_y / norm
cos_c1 = c_x / norm
c1_rots = sin_c1.new_zeros((*sin_c1.shape, 3, 3))
c1_rots[..., 0, 0] = cos_c1
c1_rots[..., 0, 1] = -1 * sin_c1
c1_rots[..., 1, 0] = sin_c1
c1_rots[..., 1, 1] = cos_c1
c1_rots[..., 2, 2] = 1
norm = torch.sqrt(eps + c_x**2 + c_y**2 + c_z**2)
sin_c2 = c_z / norm
cos_c2 = torch.sqrt(c_x**2 + c_y**2) / norm
c2_rots = sin_c2.new_zeros((*sin_c2.shape, 3, 3))
c2_rots[..., 0, 0] = cos_c2
c2_rots[..., 0, 2] = sin_c2
c2_rots[..., 1, 1] = 1
c2_rots[..., 2, 0] = -1 * sin_c2
c2_rots[..., 2, 2] = cos_c2
c_rots = rot_matmul(c2_rots, c1_rots)
n_xyz = rot_vec_mul(c_rots, n_xyz)
_, n_y, n_z = [n_xyz[..., i] for i in range(3)]
norm = torch.sqrt(eps + n_y**2 + n_z**2)
sin_n = -n_z / norm
cos_n = n_y / norm
n_rots = sin_c2.new_zeros((*sin_c2.shape, 3, 3))
n_rots[..., 0, 0] = 1
n_rots[..., 1, 1] = cos_n
n_rots[..., 1, 2] = -1 * sin_n
n_rots[..., 2, 1] = sin_n
n_rots[..., 2, 2] = cos_n
rots = rot_matmul(n_rots, c_rots)
rots = rots.transpose(-1, -2)
translation = -1 * translation
rot_obj = Rotation(rot_mats=rots, quats=None)
return Rigid(rot_obj, translation)
def cuda(self) -> Rigid:
"""
Moves the transformation object to GPU memory
Returns:
A version of the transformation on GPU
"""
return Rigid(self._rots.cuda(), self._trans.cuda())
|
2881099/dotnetGen_sqlserver | 1,413 | GenMs/FastExcel/FastExcel.Worksheets.cs | using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Xml.Linq;
namespace FastExcel {
public partial class FastExcel {
private Worksheet[] _worksheets;
/// <summary>
/// List of worksheets, loaded on first access of property
/// </summary>
public Worksheet[] Worksheets {
get {
if (_worksheets != null) {
return _worksheets;
} else {
_worksheets = GetWorksheetProperties();
return _worksheets;
}
}
}
private Worksheet[] GetWorksheetProperties() {
CheckFiles();
PrepareArchive(false);
var worksheets = new List<Worksheet>();
using (Stream stream = this.Archive.GetEntry("xl/workbook.xml").Open()) {
XDocument document = XDocument.Load(stream);
if (document == null) {
throw new Exception("Unable to load workbook.xml");
}
List<XElement> sheetsElements = document.Descendants().Where(d => d.Name.LocalName == "sheet").ToList();
foreach (var sheetElement in sheetsElements) {
var worksheet = new Worksheet(this);
worksheet.Index = sheetsElements.IndexOf(sheetElement) + 1;
worksheet.Name = (from attribute in sheetElement.Attributes()
where attribute.Name == "name"
select attribute.Value).FirstOrDefault();
worksheets.Add(worksheet);
}
}
return worksheets.ToArray();
}
}
}
|
2881099/dotnetGen_sqlserver | 1,325 | GenMs/FastExcel/FastExcel.Delete.cs | using System;
using System.Collections.Generic;
using System.IO;
using System.IO.Compression;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Xml.Linq;
namespace FastExcel {
public partial class FastExcel {
/// <summary>
/// Deletes the selected sheet Note:delete happens on Dispose
/// </summary>
/// <param name="sheetNumber">sheet number, starts at 1</param>
public void Delete(int sheetNumber) {
this.Delete(sheetNumber, null);
}
/// <summary>
/// Deletes the selected sheet Note:delete happens on Dispose
/// </summary>
/// <param name="sheetName">Worksheet name</param>
public void Delete(string sheetName) {
this.Update(null, sheetName);
}
private void Delete(int? sheetNumber = null, string sheetName = null) {
CheckFiles();
PrepareArchive(false);
// Get worksheet details
Worksheet worksheet = new Worksheet();
worksheet.GetWorksheetProperties(this, sheetNumber, sheetName);
// Delete the file
if (!string.IsNullOrEmpty(worksheet.FileName)) {
ZipArchiveEntry entry = this.Archive.GetEntry(worksheet.FileName);
if (entry != null) {
entry.Delete();
}
if (this.DeleteWorksheets == null) {
this.DeleteWorksheets = new List<int>();
}
this.DeleteWorksheets.Add(worksheet.Index);
}
}
}
}
|
2881099/dotnetGen_sqlserver | 1,022 | GenMs/FastExcel/FastExcel.Update.cs | using System;
using System.Collections.Generic;
using System.IO;
using System.IO.Compression;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Xml.Linq;
namespace FastExcel {
public partial class FastExcel {
/// <summary>
/// Update the worksheet
/// </summary>
/// <param name="data">The worksheet</param>
/// <param name="sheetNumber">eg 1,2,4</param>
public void Update(Worksheet data, int sheetNumber) {
this.Update(data, sheetNumber, null);
}
/// <summary>
/// Update the worksheet
/// </summary>
/// <param name="data">The worksheet</param>
/// <param name="sheetName">eg. Sheet1, Sheet2</param>
public void Update(Worksheet data, string sheetName) {
this.Update(data, null, sheetName);
}
private void Update(Worksheet data, int? sheetNumber = null, string sheetName = null) {
CheckFiles();
PrepareArchive();
Worksheet currentData = this.Read(sheetNumber, sheetName);
currentData.Merge(data);
this.Write(currentData);
}
}
}
|
2881099/dotnetGen_sqlserver | 5,412 | GenMs/FastExcel/SharedStrings.cs | using System;
using System.Collections.Generic;
using System.IO;
using System.IO.Compression;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Xml.Linq;
namespace FastExcel
{
/// <summary>
/// Read and update xl/sharedStrings.xml file
/// </summary>
public class SharedStrings
{
//A dictionary is a lot faster than a list
private Dictionary<string, int> StringDictionary { get; set; }
private Dictionary<int, string> StringArray { get; set; }
private bool SharedStringsExists { get; set; }
private ZipArchive ZipArchive { get; set; }
public bool PendingChanges { get; private set; }
public bool ReadWriteMode { get; set; }
internal SharedStrings(ZipArchive archive)
{
this.ZipArchive = archive;
this.SharedStringsExists = true;
if (!this.ZipArchive.Entries.Where(entry => entry.FullName == "xl/sharedStrings.xml").Any())
{
this.StringDictionary = new Dictionary<string, int>();
this.SharedStringsExists = false;
return;
}
using (Stream stream = this.ZipArchive.GetEntry("xl/sharedStrings.xml").Open())
{
if (stream == null)
{
this.StringDictionary = new Dictionary<string, int>();
this.SharedStringsExists = false;
return;
}
XDocument document = XDocument.Load(stream);
if (document == null)
{
this.StringDictionary = new Dictionary<string, int>();
this.SharedStringsExists = false;
return;
}
int i = 0;
this.StringDictionary = document.Descendants().Where(d => d.Name.LocalName == "t").Select(e => e.Value).ToDictionary(k=> k,v => i++);
}
}
internal int AddString(string stringValue)
{
if (this.StringDictionary.ContainsKey(stringValue))
{
return this.StringDictionary[stringValue];
}
else
{
this.PendingChanges = true;
this.StringDictionary.Add(stringValue, this.StringDictionary.Count);
// Clear String Array used for retrieval
if (this.ReadWriteMode && this.StringArray != null)
{
this.StringArray.Add(this.StringDictionary.Count - 1, stringValue);
}
else
{
this.StringArray = null;
}
return this.StringDictionary.Count - 1;
}
}
internal void Write()
{
// Only update if changes were made
if (!this.PendingChanges)
{
return;
}
StreamWriter streamWriter = null;
try
{
if (this.SharedStringsExists)
{
streamWriter = new StreamWriter(this.ZipArchive.GetEntry("xl/sharedStrings.xml").Open());
}
else
{
streamWriter = new StreamWriter(this.ZipArchive.CreateEntry("xl/sharedStrings.xml").Open());
}
// TODO instead of saving the headers then writing them back get position where the headers finish then write from there
/* Note: the count attribute value is wrong, it is the number of times strings are used thoughout the workbook it is different to the unique count
* but because this library is about speed and Excel does not seem to care I am not going to fix it because I would need to read the whole workbook
*/
streamWriter.Write(string.Format("<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>" +
"<sst uniqueCount=\"{0}\" count=\"{0}\" xmlns=\"http://schemas.openxmlformats.org/spreadsheetml/2006/main\">", this.StringDictionary.Count));
// Add Rows
foreach (var stringValue in this.StringDictionary)
{
streamWriter.Write(string.Format("<si><t>{0}</t></si>", stringValue.Key));
}
//Add Footers
streamWriter.Write("</sst>");
streamWriter.Flush();
}
finally
{
streamWriter.Dispose();
this.PendingChanges = false;
}
}
internal string GetString(string position)
{
int pos = 0;
if (int.TryParse(position, out pos))
{
return GetString(pos + 1);
}
else
{
// TODO: should I throw an error? this is a corrupted excel document
return string.Empty;
}
}
internal string GetString(int position)
{
if (this.StringArray == null)
{
this.StringArray = this.StringDictionary.ToDictionary(kv => kv.Value, kv => kv.Key);
}
return this.StringArray[position - 1];
}
}
}
|
27182812/ChatGLM-LLaMA-chinese-insturct | 3,764 | src/transformers/models/esm/openfold_utils/data_transforms.py | # Copyright 2021 AlQuraishi Laboratory
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def make_atom14_masks(protein: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
"""Construct denser atom positions (14 dimensions instead of 37)."""
restype_atom14_to_atom37_list = []
restype_atom37_to_atom14_list = []
restype_atom14_mask_list = []
for rt in rc.restypes:
atom_names = rc.restype_name_to_atom14_names[rc.restype_1to3[rt]]
restype_atom14_to_atom37_list.append([(rc.atom_order[name] if name else 0) for name in atom_names])
atom_name_to_idx14 = {name: i for i, name in enumerate(atom_names)}
restype_atom37_to_atom14_list.append(
[(atom_name_to_idx14[name] if name in atom_name_to_idx14 else 0) for name in rc.atom_types]
)
restype_atom14_mask_list.append([(1.0 if name else 0.0) for name in atom_names])
# Add dummy mapping for restype 'UNK'
restype_atom14_to_atom37_list.append([0] * 14)
restype_atom37_to_atom14_list.append([0] * 37)
restype_atom14_mask_list.append([0.0] * 14)
restype_atom14_to_atom37 = torch.tensor(
restype_atom14_to_atom37_list,
dtype=torch.int32,
device=protein["aatype"].device,
)
restype_atom37_to_atom14 = torch.tensor(
restype_atom37_to_atom14_list,
dtype=torch.int32,
device=protein["aatype"].device,
)
restype_atom14_mask = torch.tensor(
restype_atom14_mask_list,
dtype=torch.float32,
device=protein["aatype"].device,
)
protein_aatype = protein["aatype"].to(torch.long)
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
residx_atom14_to_atom37 = restype_atom14_to_atom37[protein_aatype]
residx_atom14_mask = restype_atom14_mask[protein_aatype]
protein["atom14_atom_exists"] = residx_atom14_mask
protein["residx_atom14_to_atom37"] = residx_atom14_to_atom37.long()
# create the gather indices for mapping back
residx_atom37_to_atom14 = restype_atom37_to_atom14[protein_aatype]
protein["residx_atom37_to_atom14"] = residx_atom37_to_atom14.long()
# create the corresponding mask
restype_atom37_mask = torch.zeros([21, 37], dtype=torch.float32, device=protein["aatype"].device)
for restype, restype_letter in enumerate(rc.restypes):
restype_name = rc.restype_1to3[restype_letter]
atom_names = rc.residue_atoms[restype_name]
for atom_name in atom_names:
atom_type = rc.atom_order[atom_name]
restype_atom37_mask[restype, atom_type] = 1
residx_atom37_mask = restype_atom37_mask[protein_aatype]
protein["atom37_atom_exists"] = residx_atom37_mask
return protein
def make_atom14_masks_np(batch: Dict[str, torch.Tensor]) -> Dict[str, np.ndarray]:
batch = tree_map(lambda n: torch.tensor(n, device=batch["aatype"].device), batch, np.ndarray)
out = tensor_tree_map(lambda t: np.array(t), make_atom14_masks(batch))
return out
|
2881099/dotnetGen_sqlserver | 13,345 | GenMs/WinFormClass/Socket/ServerSocket.cs | using System;
using System.Collections.Generic;
using System.Net;
using System.Net.Sockets;
using System.Text;
using System.Threading;
public class ServerSocket : IDisposable {
private TcpListener _tcpListener;
private Thread _tcpListenerThread;
private Dictionary<int, AcceptSocket> _clients = new Dictionary<int, AcceptSocket>();
private object _clients_lock = new object();
private int _id = 1;
private int _port;
private bool _running;
private ManualResetEvent _stopWait;
public event ServerSocketAcceptedEventHandler Accepted;
public event ServerSocketClosedEventHandler Closed;
public event ServerSocketReceiveEventHandler Receive;
public event ServerSocketErrorEventHandler Error;
private WorkQueue _acceptWQ;
internal WorkQueue _receiveWQ;
internal WorkQueue _receiveSyncWQ;
private WorkQueue _writeWQ;
public ServerSocket(int port) {
this._port = port;
}
public void Start() {
if (this._running == false) {
this._running = true;
try {
this._tcpListener = new TcpListener(IPAddress.Any, this._port);
this._tcpListener.Start();
this._acceptWQ = new WorkQueue();
this._receiveWQ = new WorkQueue();
this._receiveSyncWQ = new WorkQueue();
this._writeWQ = new WorkQueue();
} catch (Exception ex) {
this._running = false;
this.OnError(ex);
return;
}
this._tcpListenerThread = new Thread(delegate() {
while (this._running) {
try {
TcpClient tcpClient = this._tcpListener.AcceptTcpClientAsync().Result;
this._acceptWQ.Enqueue(delegate() {
try {
AcceptSocket acceptSocket = new AcceptSocket(this, tcpClient, this._id);
this.OnAccepted(acceptSocket);
} catch (Exception ex) {
this.OnError(ex);
}
});
} catch (Exception ex) {
this.OnError(ex);
}
}
int[] keys = new int[this._clients.Count];
try {
this._clients.Keys.CopyTo(keys, 0);
} catch {
lock (this._clients_lock) {
keys = new int[this._clients.Count];
this._clients.Keys.CopyTo(keys, 0);
}
}
foreach (int key in keys) {
AcceptSocket client = null;
if (this._clients.TryGetValue(key, out client)) {
client.Close();
}
}
if (this._acceptWQ != null) {
this._acceptWQ.Dispose();
}
if (this._receiveWQ != null) {
this._receiveWQ.Dispose();
}
if (this._receiveSyncWQ != null) {
this._receiveSyncWQ.Dispose();
}
if (this._writeWQ != null) {
this._writeWQ.Dispose();
}
this._clients.Clear();
this._stopWait.Set();
});
this._tcpListenerThread.Start();
}
}
public void Stop() {
if (this._tcpListener != null) {
this._tcpListener.Stop();
}
if (this._running == true) {
this._stopWait = new ManualResetEvent(false);
this._stopWait.Reset();
this._running = false;
this._stopWait.WaitOne();
}
}
internal void AccessDenied(AcceptSocket client) {
client.Write(SocketMessager.SYS_ACCESS_DENIED, delegate(object sender2, ServerSocketReceiveEventArgs e2) {
}, TimeSpan.FromSeconds(1));
client.Close();
}
public void Write(SocketMessager messager) {
int[] keys = new int[this._clients.Count];
try {
this._clients.Keys.CopyTo(keys, 0);
} catch {
lock (this._clients_lock) {
keys = new int[this._clients.Count];
this._clients.Keys.CopyTo(keys, 0);
}
}
foreach (int key in keys) {
AcceptSocket client = null;
if (this._clients.TryGetValue(key, out client)) {
this._writeWQ.Enqueue(delegate() {
client.Write(messager);
});
}
}
}
public AcceptSocket GetAcceptSocket(int id) {
AcceptSocket socket = null;
this._clients.TryGetValue(id, out socket);
return socket;
}
internal void CloseClient(AcceptSocket client) {
this._clients.Remove(client.Id);
}
protected virtual void OnAccepted(ServerSocketAcceptedEventArgs e) {
SocketMessager helloMessager = new SocketMessager(SocketMessager.SYS_HELLO_WELCOME.Action);
e.AcceptSocket.Write(helloMessager, delegate(object sender2, ServerSocketReceiveEventArgs e2) {
if (e2.Messager.Id == helloMessager.Id &&
string.Compare(e2.Messager.Action, helloMessager.Action) == 0) {
e.AcceptSocket._accepted = true;
}
}, TimeSpan.FromSeconds(2));
if (e.AcceptSocket._accepted) {
if (this.Accepted != null) {
try {
this.Accepted(this, e);
} catch (Exception ex) {
this.OnError(ex);
}
}
} else {
e.AcceptSocket.AccessDenied();
}
}
private void OnAccepted(AcceptSocket client) {
lock (_clients_lock) {
_clients.Add(this._id++, client);
}
ServerSocketAcceptedEventArgs e = new ServerSocketAcceptedEventArgs(this._clients.Count, client);
this.OnAccepted(e);
}
protected virtual void OnClosed(ServerSocketClosedEventArgs e) {
if (this.Closed != null) {
this.Closed(this, e);
}
}
internal void OnClosed(AcceptSocket client) {
ServerSocketClosedEventArgs e = new ServerSocketClosedEventArgs(this._clients.Count, client.Id);
this.OnClosed(e);
}
protected virtual void OnReceive(ServerSocketReceiveEventArgs e) {
if (this.Receive != null) {
this.Receive(this, e);
}
}
internal void OnReceive2(ServerSocketReceiveEventArgs e) {
this.OnReceive(e);
}
protected virtual void OnError(ServerSocketErrorEventArgs e) {
if (this.Error != null) {
this.Error(this, e);
}
}
protected void OnError(Exception ex) {
ServerSocketErrorEventArgs e = new ServerSocketErrorEventArgs(-1, ex, null);
this.OnError(e);
}
internal void OnError2(ServerSocketErrorEventArgs e) {
this.OnError(e);
}
#region IDisposable 成员
public void Dispose() {
this.Stop();
}
#endregion
}
public class AcceptSocket : BaseSocket, IDisposable {
private ServerSocket _server;
private TcpClient _tcpClient;
private Thread _thread;
private bool _running;
private int _id;
private int _receives;
private int _errors;
private object _errors_lock = new object();
private object _write_lock = new object();
private Dictionary<int, SyncReceive> _receiveHandlers = new Dictionary<int, SyncReceive>();
private object _receiveHandlers_lock = new object();
private DateTime _lastActive;
internal bool _accepted;
public AcceptSocket(ServerSocket server, TcpClient tcpClient, int id) {
this._running = true;
this._id = id;
this._server = server;
this._tcpClient = tcpClient;
this._lastActive = DateTime.Now;
this._thread = new Thread(delegate() {
while (this._running) {
try {
NetworkStream ns = this._tcpClient.GetStream();
ns.ReadTimeout = 1000 * 20;
if (ns.DataAvailable) {
SocketMessager messager = base.Read(ns);
if (string.Compare(messager.Action, SocketMessager.SYS_TEST_LINK.Action) != 0) {
ServerSocketReceiveEventArgs e = new ServerSocketReceiveEventArgs(this._receives++, messager, this);
SyncReceive receive = null;
if (this._receiveHandlers.TryGetValue(messager.Id, out receive)) {
this._server._receiveSyncWQ.Enqueue(delegate() {
try {
receive.ReceiveHandler(this, e);
} catch (Exception ex) {
this.OnError(ex);
} finally {
receive.Wait.Set();
}
});
} else {
this._server._receiveWQ.Enqueue(delegate() {
this.OnReceive(e);
});
}
}
this._lastActive = DateTime.Now;
} else if (_accepted) {
TimeSpan ts = DateTime.Now - _lastActive;
if (ts.TotalSeconds > 5) {
this.Write(SocketMessager.SYS_TEST_LINK);
}
}
if (!ns.DataAvailable) Thread.CurrentThread.Join(1);
} catch (Exception ex) {
this._running = false;
this.OnError(ex);
}
}
this.Close();
this.OnClosed();
});
this._thread.Start();
}
public void Close() {
this._running = false;
if (this._tcpClient != null) {
this._tcpClient.Dispose();
this._tcpClient = null;
}
this._server.CloseClient(this);
int[] keys = new int[this._receiveHandlers.Count];
try {
this._receiveHandlers.Keys.CopyTo(keys, 0);
} catch {
lock (this._receiveHandlers_lock) {
keys = new int[this._receiveHandlers.Count];
this._receiveHandlers.Keys.CopyTo(keys, 0);
}
}
foreach (int key in keys) {
SyncReceive receiveHandler = null;
if (this._receiveHandlers.TryGetValue(key, out receiveHandler)) {
receiveHandler.Wait.Set();
}
}
lock (this._receiveHandlers_lock) {
this._receiveHandlers.Clear();
}
}
public void Write(SocketMessager messager) {
this.Write(messager, null, TimeSpan.Zero);
}
public void Write(SocketMessager messager, ServerSocketReceiveEventHandler receiveHandler) {
this.Write(messager, receiveHandler, TimeSpan.FromSeconds(20));
}
public void Write(SocketMessager messager, ServerSocketReceiveEventHandler receiveHandler, TimeSpan timeout) {
SyncReceive syncReceive = null;
try {
if (receiveHandler != null) {
syncReceive = new SyncReceive(receiveHandler);
lock (this._receiveHandlers_lock) {
if (!this._receiveHandlers.ContainsKey(messager.Id)) {
this._receiveHandlers.Add(messager.Id, syncReceive);
} else {
this._receiveHandlers[messager.Id] = syncReceive;
}
}
}
lock (_write_lock) {
NetworkStream ns = this._tcpClient.GetStream();
base.Write(ns, messager);
}
this._lastActive = DateTime.Now;
if (syncReceive != null) {
syncReceive.Wait.Reset();
syncReceive.Wait.WaitOne(timeout);
syncReceive.Wait.Set();
lock (this._receiveHandlers_lock) {
this._receiveHandlers.Remove(messager.Id);
}
}
} catch (Exception ex) {
this._running = false;
this.OnError(ex);
if (syncReceive != null) {
syncReceive.Wait.Set();
lock (this._receiveHandlers_lock) {
this._receiveHandlers.Remove(messager.Id);
}
}
}
}
/// <summary>
/// 拒绝访问,并关闭连接
/// </summary>
public void AccessDenied() {
this._server.AccessDenied(this);
}
protected virtual void OnClosed() {
try {
this._server.OnClosed(this);
} catch (Exception ex) {
this.OnError(ex);
}
}
protected virtual void OnReceive(ServerSocketReceiveEventArgs e) {
try {
this._server.OnReceive2(e);
} catch (Exception ex) {
this.OnError(ex);
}
}
protected virtual void OnError(Exception ex) {
int errors = 0;
lock (this._errors_lock) {
errors = ++this._errors;
}
ServerSocketErrorEventArgs e = new ServerSocketErrorEventArgs(errors, ex, this);
this._server.OnError2(e);
}
public int Id {
get { return _id; }
}
class SyncReceive : IDisposable {
private ServerSocketReceiveEventHandler _receiveHandler;
private ManualResetEvent _wait;
public SyncReceive(ServerSocketReceiveEventHandler onReceive) {
this._receiveHandler = onReceive;
this._wait = new ManualResetEvent(false);
}
public ManualResetEvent Wait {
get { return _wait; }
}
public ServerSocketReceiveEventHandler ReceiveHandler {
get { return _receiveHandler; }
}
#region IDisposable 成员
public void Dispose() {
this._wait.Set();
}
#endregion
}
#region IDisposable 成员
void IDisposable.Dispose() {
this.Close();
}
#endregion
}
public delegate void ServerSocketClosedEventHandler(object sender, ServerSocketClosedEventArgs e);
public delegate void ServerSocketAcceptedEventHandler(object sender, ServerSocketAcceptedEventArgs e);
public delegate void ServerSocketErrorEventHandler(object sender, ServerSocketErrorEventArgs e);
public delegate void ServerSocketReceiveEventHandler(object sender, ServerSocketReceiveEventArgs e);
public class ServerSocketClosedEventArgs : EventArgs {
private int _accepts;
private int _acceptSocketId;
public ServerSocketClosedEventArgs(int accepts, int acceptSocketId) {
this._accepts = accepts;
this._acceptSocketId = acceptSocketId;
}
public int Accepts {
get { return _accepts; }
}
public int AcceptSocketId {
get { return _acceptSocketId; }
}
}
public class ServerSocketAcceptedEventArgs : EventArgs {
private int _accepts;
private AcceptSocket _acceptSocket;
public ServerSocketAcceptedEventArgs(int accepts, AcceptSocket acceptSocket) {
this._accepts = accepts;
this._acceptSocket = acceptSocket;
}
public int Accepts {
get { return _accepts; }
}
public AcceptSocket AcceptSocket {
get { return _acceptSocket; }
}
}
public class ServerSocketErrorEventArgs : EventArgs {
private int _errors;
private Exception _exception;
private AcceptSocket _acceptSocket;
public ServerSocketErrorEventArgs(int errors, Exception exception, AcceptSocket acceptSocket) {
this._errors = errors;
this._exception = exception;
this._acceptSocket = acceptSocket;
}
public int Errors {
get { return _errors; }
}
public Exception Exception {
get { return _exception; }
}
public AcceptSocket AcceptSocket {
get { return _acceptSocket; }
}
}
public class ServerSocketReceiveEventArgs : EventArgs {
private int _receives;
private SocketMessager _messager;
private AcceptSocket _acceptSocket;
public ServerSocketReceiveEventArgs(int receives, SocketMessager messager, AcceptSocket acceptSocket) {
this._receives = receives;
this._messager = messager;
this._acceptSocket = acceptSocket;
}
public int Receives {
get { return _receives; }
}
public SocketMessager Messager {
get { return _messager; }
}
public AcceptSocket AcceptSocket {
get { return _acceptSocket; }
}
} |
2881099/dotnetGen_sqlserver | 7,283 | GenMs/WinFormClass/Socket/BaseSocket.cs | /**********************************************************************************
*
* 此文件代码由 NicPetShop.exe 自动生成,您没有必要修改它或删除它
* NicPetShop.exe 能将数据库的关系映射到 c#,让您使用更方便,您无需要担心它的性能
* NicPetShop.exe 将永久免费给大家使用
*
* Author: Nic
* QQ: 2881099
* Email: kellynic@163.com
* 帮助: http://www.kellynic.com/default.asp?tag=NicPetShop
*
**********************************************************************************/
using System;
using System.IO;
using System.Collections.Generic;
using System.Globalization;
using System.Net.Sockets;
using System.Text;
using System.Threading;
using System.Runtime.Serialization;
using System.Runtime.Serialization.Formatters.Binary;
using System.Reflection;
public class BaseSocket {
protected void Write(Stream stream, SocketMessager messager) {
MemoryStream ms = new MemoryStream();
byte[] buff = Encoding.UTF8.GetBytes(messager.GetCanParseString());
ms.Write(buff, 0, buff.Length);
if (messager.Arg != null) {
buff = Deflate.Compress(BaseSocket.Serialize(messager.Arg));
ms.Write(buff, 0, buff.Length);
}
this.Write(stream, ms.ToArray());
ms.Close();
}
private void Write(Stream stream, byte[] data) {
MemoryStream ms = new MemoryStream();
byte[] buff = Encoding.UTF8.GetBytes(Convert.ToString(data.Length + 8, 16).PadRight(8));
ms.Write(buff, 0, buff.Length);
ms.Write(data, 0, data.Length);
buff = ms.ToArray();
ms.Close();
stream.Write(buff, 0, buff.Length);
}
protected SocketMessager Read(Stream stream) {
byte[] data = new byte[8];
int bytes = 0;
int overs = data.Length;
string size = string.Empty;
while (overs > 0) {
bytes = stream.Read(data, 0, overs);
overs -= bytes;
size += Encoding.UTF8.GetString(data, 0, bytes);
}
if (int.TryParse(size, NumberStyles.HexNumber, null, out overs) == false) {
return null;
}
overs -= data.Length;
MemoryStream ms = new MemoryStream();
data = new Byte[1024];
while (overs > 0) {
bytes = stream.Read(data, 0, overs < data.Length ? overs : data.Length);
overs -= bytes;
ms.Write(data, 0, bytes);
}
data = ms.ToArray();
ms.Close();
return SocketMessager.Parse(data);
}
public static int findBytes(byte[] source, byte[] find, int startIndex) {
if (find == null) return -1;
if (find.Length == 0) return -1;
if (source == null) return -1;
if (source.Length == 0) return -1;
if (startIndex < 0) startIndex = 0;
int idx = -1, idx2 = startIndex - 1;
do {
idx2 = idx = Array.FindIndex<byte>(source, Math.Min(idx2 + 1, source.Length), delegate (byte b) {
return b == find[0];
});
if (idx2 != -1) {
for (int a = 1; a < find.Length; a++) {
if (++idx2 >= source.Length || source[idx2] != find[a]) {
idx = -1;
break;
}
}
if (idx != -1) break;
}
} while (idx2 != -1);
return idx;
}
public static byte[] Serialize(object obj) {
IFormatter formatter = new BinaryFormatter();
MemoryStream ms = new MemoryStream();
formatter.Serialize(ms, obj);
byte[] data = ms.ToArray();
ms.Close();
return data;
}
public static object Deserialize(byte[] stream) {
IFormatter formatter = new BinaryFormatter();
formatter.Binder = new TransmissionBinder();
MemoryStream ms = new MemoryStream(stream);
object obj = formatter.Deserialize(ms);
ms.Close();
return obj;
}
}
internal class TransmissionBinder : SerializationBinder {
public override Type BindToType(string assemblyName, string typeName) {
//var ass = AppDomain.CurrentDomain.GetAssemblies();
//foreach (var a in ass) if (a.FullName == assemblyName) return a.GetType(typeName);
//foreach (var a in ass) if (a.GetName().Name == "Common") return a.GetType(typeName);
return Type.GetType(typeName.Replace("Common, ", "GenMs, "));
}
}
public class SocketMessager {
private static int _identity;
public static readonly SocketMessager SYS_TEST_LINK = new SocketMessager("\0");
public static readonly SocketMessager SYS_HELLO_WELCOME = new SocketMessager("Hello, Welcome!");
public static readonly SocketMessager SYS_ACCESS_DENIED = new SocketMessager("Access Denied.");
private int _id;
public bool _isChangeId;
private string _action;
private string _permission;
private DateTime _remoteTime;
private object _arg;
private Exception _exception;
public SocketMessager(string action)
: this(action, null, null) {
}
public SocketMessager(string action, object arg)
: this(action, null, arg) {
}
public SocketMessager(string action, string permission, object arg) {
this._id = Interlocked.Increment(ref _identity);
this._action = action == null ? string.Empty : action;
this._permission = permission == null ? string.Empty : permission;
this._arg = arg;
this._remoteTime = DateTime.Now;
}
public override string ToString() {
return
this._remoteTime.ToString("yyyy-MM-dd HH:mm:ss") + "\t" +
this._id + "\t" +
this._action.Replace("\t", "\\t") + "\t" +
this._permission.Replace("\t", "\\t") + "\t" +
this._arg;
}
public string GetCanParseString() {
if (string.Compare(this._action, SocketMessager.SYS_TEST_LINK.Action) == 0) {
return this.Action;
} else if (
string.Compare(this._action, SocketMessager.SYS_HELLO_WELCOME.Action) == 0 ||
string.Compare(this._action, SocketMessager.SYS_ACCESS_DENIED.Action) == 0) {
return
this._id + "\t" +
this.Action + "\r\n";
} else {
return
this._id + "\t" +
this._action.Replace("\\", "\\\\").Replace("\t", "\\t").Replace("\r\n", "\\n") + "\t" +
this._permission.Replace("\\", "\\\\").Replace("\t", "\\t").Replace("\r\n", "\\n") + "\t" +
this._remoteTime.ToString("yyyy-MM-dd HH:mm:ss") + "\r\n";
}
}
public static SocketMessager Parse(byte[] data) {
if (data == null) return new SocketMessager("NULL");
if (data.Length == 1 && data[0] == 0) return SocketMessager.SYS_TEST_LINK;
int idx = BaseSocket.findBytes(data, new byte[] { 13, 10 }, 0);
string text = Encoding.UTF8.GetString(data, 0, idx);
string[] loc1 = text.Split(new string[] { "\t" }, 4, StringSplitOptions.None);
string loc2 = loc1[0];
string loc3 = loc1.Length > 1 ? loc1[1].Replace("\\\\", "\\").Replace("\\t", "\t").Replace("\\n", "\r\n") : null;
string loc4 = loc1.Length > 2 ? loc1[2].Replace("\\\\", "\\").Replace("\\t", "\t").Replace("\\n", "\r\n") : null;
string loc5 = loc1.Length > 3 ? loc1[3] : null;
MemoryStream ms = new MemoryStream();
ms.Write(data, idx + 2, data.Length - idx - 2);
SocketMessager messager = new SocketMessager(loc3, loc4,
ms.Length > 0 ? BaseSocket.Deserialize(Deflate.Decompress(ms.ToArray())) : null);
if (int.TryParse(loc2, out idx)) messager._id = idx;
if (!string.IsNullOrEmpty(loc5)) DateTime.TryParse(loc5, out messager._remoteTime);
if (messager._arg is Exception) messager._exception = messager._arg as Exception;
return messager;
}
/// <summary>
/// 服务端为 -,客户端为 +
/// </summary>
public int Id {
get { return _id; }
set {
if (_id != value) {
_isChangeId = true;
}
_id = value;
}
}
public string Action {
get { return _action; }
}
public string Permission {
get { return _permission; }
}
public DateTime RemoteTime {
get { return _remoteTime; }
}
public object Arg {
get { return _arg; }
}
public Exception Exception {
get { return _exception; }
}
} |
27182812/ChatGLM-LLaMA-chinese-insturct | 11,490 | src/transformers/models/esm/openfold_utils/protein.py | # Copyright 2021 AlQuraishi Laboratory
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Protein data type."""
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
FeatureDict = Mapping[str, np.ndarray]
ModelOutput = Mapping[str, Any] # Is a nested dict.
PICO_TO_ANGSTROM = 0.01
@dataclasses.dataclass(frozen=True)
class Protein:
"""Protein structure representation."""
# Cartesian coordinates of atoms in angstroms. The atom types correspond to
# residue_constants.atom_types, i.e. the first three are N, CA, CB.
atom_positions: np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
aatype: np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
atom_mask: np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
residue_index: np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
b_factors: np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
chain_index: Optional[np.ndarray] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
remark: Optional[str] = None
# Templates used to generate this protein (prediction-only)
parents: Optional[Sequence[str]] = None
# Chain corresponding to each parent
parents_chain_index: Optional[Sequence[int]] = None
def from_proteinnet_string(proteinnet_str: str) -> Protein:
tag_re = r"(\[[A-Z]+\]\n)"
tags: List[str] = [tag.strip() for tag in re.split(tag_re, proteinnet_str) if len(tag) > 0]
groups: Iterator[Tuple[str, List[str]]] = zip(tags[0::2], [l.split("\n") for l in tags[1::2]])
atoms: List[str] = ["N", "CA", "C"]
aatype = None
atom_positions = None
atom_mask = None
for g in groups:
if "[PRIMARY]" == g[0]:
seq = g[1][0].strip()
for i in range(len(seq)):
if seq[i] not in residue_constants.restypes:
seq[i] = "X" # FIXME: strings are immutable
aatype = np.array(
[residue_constants.restype_order.get(res_symbol, residue_constants.restype_num) for res_symbol in seq]
)
elif "[TERTIARY]" == g[0]:
tertiary: List[List[float]] = []
for axis in range(3):
tertiary.append(list(map(float, g[1][axis].split())))
tertiary_np = np.array(tertiary)
atom_positions = np.zeros((len(tertiary[0]) // 3, residue_constants.atom_type_num, 3)).astype(np.float32)
for i, atom in enumerate(atoms):
atom_positions[:, residue_constants.atom_order[atom], :] = np.transpose(tertiary_np[:, i::3])
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
mask = np.array(list(map({"-": 0, "+": 1}.get, g[1][0].strip())))
atom_mask = np.zeros(
(
len(mask),
residue_constants.atom_type_num,
)
).astype(np.float32)
for i, atom in enumerate(atoms):
atom_mask[:, residue_constants.atom_order[atom]] = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=atom_positions,
atom_mask=atom_mask,
aatype=aatype,
residue_index=np.arange(len(aatype)),
b_factors=None,
)
def get_pdb_headers(prot: Protein, chain_id: int = 0) -> List[str]:
pdb_headers: List[str] = []
remark = prot.remark
if remark is not None:
pdb_headers.append(f"REMARK {remark}")
parents = prot.parents
parents_chain_index = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
parents = [p for i, p in zip(parents_chain_index, parents) if i == chain_id]
if parents is None or len(parents) == 0:
parents = ["N/A"]
pdb_headers.append(f"PARENT {' '.join(parents)}")
return pdb_headers
def add_pdb_headers(prot: Protein, pdb_str: str) -> str:
"""Add pdb headers to an existing PDB string. Useful during multi-chain
recycling
"""
out_pdb_lines: List[str] = []
lines = pdb_str.split("\n")
remark = prot.remark
if remark is not None:
out_pdb_lines.append(f"REMARK {remark}")
parents_per_chain: List[List[str]]
if prot.parents is not None and len(prot.parents) > 0:
parents_per_chain = []
if prot.parents_chain_index is not None:
parent_dict: Dict[str, List[str]] = {}
for p, i in zip(prot.parents, prot.parents_chain_index):
parent_dict.setdefault(str(i), [])
parent_dict[str(i)].append(p)
max_idx = max([int(chain_idx) for chain_idx in parent_dict])
for i in range(max_idx + 1):
chain_parents = parent_dict.get(str(i), ["N/A"])
parents_per_chain.append(chain_parents)
else:
parents_per_chain.append(list(prot.parents))
else:
parents_per_chain = [["N/A"]]
def make_parent_line(p: Sequence[str]) -> str:
return f"PARENT {' '.join(p)}"
out_pdb_lines.append(make_parent_line(parents_per_chain[0]))
chain_counter = 0
for i, l in enumerate(lines):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(l)
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(parents_per_chain):
chain_parents = parents_per_chain[chain_counter]
else:
chain_parents = ["N/A"]
out_pdb_lines.append(make_parent_line(chain_parents))
return "\n".join(out_pdb_lines)
def to_pdb(prot: Protein) -> str:
"""Converts a `Protein` instance to a PDB string.
Args:
prot: The protein to convert to PDB.
Returns:
PDB string.
"""
restypes = residue_constants.restypes + ["X"]
def res_1to3(r: int) -> str:
return residue_constants.restype_1to3.get(restypes[r], "UNK")
atom_types = residue_constants.atom_types
pdb_lines: List[str] = []
atom_mask = prot.atom_mask
aatype = prot.aatype
atom_positions = prot.atom_positions
residue_index = prot.residue_index.astype(np.int32)
b_factors = prot.b_factors
chain_index = prot.chain_index
if np.any(aatype > residue_constants.restype_num):
raise ValueError("Invalid aatypes.")
headers = get_pdb_headers(prot)
if len(headers) > 0:
pdb_lines.extend(headers)
n = aatype.shape[0]
atom_index = 1
prev_chain_index = 0
chain_tags = string.ascii_uppercase
chain_tag = None
# Add all atom sites.
for i in range(n):
res_name_3 = res_1to3(aatype[i])
for atom_name, pos, mask, b_factor in zip(atom_types, atom_positions[i], atom_mask[i], b_factors[i]):
if mask < 0.5:
continue
record_type = "ATOM"
name = atom_name if len(atom_name) == 4 else f" {atom_name}"
alt_loc = ""
insertion_code = ""
occupancy = 1.00
element = atom_name[0] # Protein supports only C, N, O, S, this works.
charge = ""
chain_tag = "A"
if chain_index is not None:
chain_tag = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
atom_line = (
f"{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"
f"{res_name_3:>3} {chain_tag:>1}"
f"{residue_index[i]:>4}{insertion_code:>1} "
f"{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"
f"{occupancy:>6.2f}{b_factor:>6.2f} "
f"{element:>2}{charge:>2}"
)
pdb_lines.append(atom_line)
atom_index += 1
should_terminate = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
should_terminate = True
prev_chain_index = chain_index[i + 1]
if should_terminate:
# Close the chain.
chain_end = "TER"
chain_termination_line = (
f"{chain_end:<6}{atom_index:>5} {res_1to3(aatype[i]):>3} {chain_tag:>1}{residue_index[i]:>4}"
)
pdb_lines.append(chain_termination_line)
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(prot, prev_chain_index))
pdb_lines.append("END")
pdb_lines.append("")
return "\n".join(pdb_lines)
def ideal_atom_mask(prot: Protein) -> np.ndarray:
"""Computes an ideal atom mask.
`Protein.atom_mask` typically is defined according to the atoms that are reported in the PDB. This function
computes a mask according to heavy atoms that should be present in the given sequence of amino acids.
Args:
prot: `Protein` whose fields are `numpy.ndarray` objects.
Returns:
An ideal atom mask.
"""
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def from_prediction(
features: FeatureDict,
result: ModelOutput,
b_factors: Optional[np.ndarray] = None,
chain_index: Optional[np.ndarray] = None,
remark: Optional[str] = None,
parents: Optional[Sequence[str]] = None,
parents_chain_index: Optional[Sequence[int]] = None,
) -> Protein:
"""Assembles a protein from a prediction.
Args:
features: Dictionary holding model inputs.
result: Dictionary holding model outputs.
b_factors: (Optional) B-factors to use for the protein.
chain_index: (Optional) Chain indices for multi-chain predictions
remark: (Optional) Remark about the prediction
parents: (Optional) List of template names
Returns:
A protein instance.
"""
return Protein(
aatype=features["aatype"],
atom_positions=result["final_atom_positions"],
atom_mask=result["final_atom_mask"],
residue_index=features["residue_index"] + 1,
b_factors=b_factors if b_factors is not None else np.zeros_like(result["final_atom_mask"]),
chain_index=chain_index,
remark=remark,
parents=parents,
parents_chain_index=parents_chain_index,
)
|
2881099/dotnetGen_sqlserver | 7,754 | GenMs/WinFormClass/Socket/ClientSocket.cs | using System;
using System.IO;
using System.Collections.Generic;
using System.Net;
using System.Net.Sockets;
using System.Text;
using System.Threading;
public class ClientSocket : BaseSocket, IDisposable {
private bool _isDisposed;
private IPEndPoint _remotePoint;
private TcpClient _tcpClient;
private Thread _thread;
private bool _running;
private int _receives;
private int _errors;
private object _errors_lock = new object();
private object _write_lock = new object();
private Dictionary<int, SyncReceive> _receiveHandlers = new Dictionary<int, SyncReceive>();
private object _receiveHandlers_lock = new object();
private DateTime _lastActive;
public event ClientSocketClosedEventHandler Closed;
public event ClientSocketReceiveEventHandler Receive;
public event ClientSocketErrorEventHandler Error;
public void Connect(string hostname, int port) {
if (this._isDisposed == false && this._running == false) {
this._running = true;
try {
IPAddress[] ips = Dns.GetHostAddresses(hostname);
if (ips.Length == 0) throw new Exception("无法解析“" + hostname + "”");
this._remotePoint = new IPEndPoint(ips[0], port);
this._tcpClient = new TcpClient();
this._tcpClient.Connect(this._remotePoint);
} catch (Exception ex) {
this._running = false;
this.OnError(ex);
this.OnClosed();
return;
}
this._receives = 0;
this._errors = 0;
this._lastActive = DateTime.Now;
this._thread = new Thread(delegate() {
while (this._running) {
try {
NetworkStream ns = this._tcpClient.GetStream();
ns.ReadTimeout = 1000 * 20;
if (ns.DataAvailable) {
SocketMessager messager = base.Read(ns);
if (string.Compare(messager.Action, SocketMessager.SYS_TEST_LINK.Action) == 0) {
} else if (this._receives == 0 &&
string.Compare(messager.Action, SocketMessager.SYS_HELLO_WELCOME.Action) == 0) {
this._receives++;
this.Write(messager);
} else if (string.Compare(messager.Action, SocketMessager.SYS_ACCESS_DENIED.Action) == 0) {
throw new Exception(SocketMessager.SYS_ACCESS_DENIED.Action);
} else {
ClientSocketReceiveEventArgs e = new ClientSocketReceiveEventArgs(this._receives++, messager);
SyncReceive receive = null;
if (this._receiveHandlers.TryGetValue(messager.Id, out receive)) {
new Thread(delegate() {
try {
receive.ReceiveHandler(this, e);
} catch (Exception ex) {
this.OnError(ex);
} finally {
receive.Wait.Set();
}
}).Start();
} else if (this.Receive != null) {
new Thread(delegate() {
this.OnReceive(e);
}).Start();
}
}
this._lastActive = DateTime.Now;
} else {
TimeSpan ts = DateTime.Now - _lastActive;
if (ts.TotalSeconds > 3) {
this.Write(SocketMessager.SYS_TEST_LINK);
}
}
if (!ns.DataAvailable) Thread.CurrentThread.Join(1);
} catch (Exception ex) {
this._running = false;
this.OnError(ex);
}
}
this.Close();
this.OnClosed();
});
this._thread.Start();
}
}
public void Close() {
this._running = false;
if (this._tcpClient != null) {
this._tcpClient.Close();
}
int[] keys = new int[this._receiveHandlers.Count];
try {
this._receiveHandlers.Keys.CopyTo(keys, 0);
} catch {
lock (this._receiveHandlers_lock) {
keys = new int[this._receiveHandlers.Count];
this._receiveHandlers.Keys.CopyTo(keys, 0);
}
}
foreach (int key in keys) {
SyncReceive receiveHandler = null;
if (this._receiveHandlers.TryGetValue(key, out receiveHandler)) {
receiveHandler.Wait.Set();
}
}
lock (this._receiveHandlers_lock) {
this._receiveHandlers.Clear();
}
}
public void Write(SocketMessager messager) {
this.Write(messager, null, TimeSpan.Zero);
}
public void Write(SocketMessager messager, ClientSocketReceiveEventHandler receiveHandler) {
this.Write(messager, receiveHandler, TimeSpan.FromSeconds(200));
}
public void Write(SocketMessager messager, ClientSocketReceiveEventHandler receiveHandler, TimeSpan timeout) {
SyncReceive syncReceive = null;
try {
if (receiveHandler != null) {
syncReceive = new SyncReceive(receiveHandler);
lock (this._receiveHandlers_lock) {
if (!this._receiveHandlers.ContainsKey(messager.Id)) {
this._receiveHandlers.Add(messager.Id, syncReceive);
} else {
this._receiveHandlers[messager.Id] = syncReceive;
}
}
}
lock (_write_lock) {
NetworkStream ns = this._tcpClient.GetStream();
base.Write(ns, messager);
}
this._lastActive = DateTime.Now;
if (syncReceive != null) {
syncReceive.Wait.Reset();
syncReceive.Wait.WaitOne(timeout, false);
syncReceive.Wait.Set();
lock (this._receiveHandlers_lock) {
this._receiveHandlers.Remove(messager.Id);
}
}
} catch (Exception ex) {
this._running = false;
this.OnError(ex);
if (syncReceive != null) {
syncReceive.Wait.Set();
lock (this._receiveHandlers_lock) {
this._receiveHandlers.Remove(messager.Id);
}
}
}
}
protected virtual void OnClosed(EventArgs e) {
if (this.Closed != null) {
new Thread(delegate() {
try {
this.Closed(this, e);
} catch (Exception ex) {
this.OnError(ex);
}
}).Start();
}
}
protected void OnClosed() {
this.OnClosed(new EventArgs());
}
protected virtual void OnReceive(ClientSocketReceiveEventArgs e) {
if (this.Receive != null) {
try {
this.Receive(this, e);
} catch (Exception ex) {
this.OnError(ex);
}
}
}
protected virtual void OnError(ClientSocketErrorEventArgs e) {
if (this.Error != null) {
this.Error(this, e);
}
}
protected void OnError(Exception ex) {
int errors = 0;
lock (this._errors_lock) {
errors = ++this._errors;
}
ClientSocketErrorEventArgs e = new ClientSocketErrorEventArgs(ex, errors);
this.OnError(e);
}
public bool Running {
get { return this._running; }
}
class SyncReceive : IDisposable {
private ClientSocketReceiveEventHandler _receiveHandler;
private ManualResetEvent _wait;
public SyncReceive(ClientSocketReceiveEventHandler receiveHandler) {
this._receiveHandler = receiveHandler;
this._wait = new ManualResetEvent(false);
}
public ClientSocketReceiveEventHandler ReceiveHandler {
get { return _receiveHandler; }
}
public ManualResetEvent Wait {
get { return _wait; }
}
#region IDisposable 成员
public void Dispose() {
this._wait.Set();
this._wait.Close();
}
#endregion
}
#region IDisposable 成员
public void Dispose() {
this._isDisposed = true;
this.Close();
}
#endregion
}
public delegate void ClientSocketClosedEventHandler(object sender, EventArgs e);
public delegate void ClientSocketErrorEventHandler(object sender, ClientSocketErrorEventArgs e);
public delegate void ClientSocketReceiveEventHandler(object sender, ClientSocketReceiveEventArgs e);
public class ClientSocketErrorEventArgs : EventArgs {
private int _errors;
private Exception _exception;
public ClientSocketErrorEventArgs(Exception exception, int errors) {
this._exception = exception;
this._errors = errors;
}
public int Errors {
get { return _errors; }
}
public Exception Exception {
get { return _exception; }
}
}
public class ClientSocketReceiveEventArgs : EventArgs {
private int _receives;
private SocketMessager _messager;
public ClientSocketReceiveEventArgs(int receives, SocketMessager messager) {
this._receives = receives;
this._messager = messager;
}
public int Receives {
get { return _receives; }
}
public SocketMessager Messager {
get { return _messager; }
}
} |
2881099/dotnetGen_sqlserver | 857 | ServerWinForm/Properties/AssemblyInfo.cs | using System.Reflection;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
// 有关程序集的常规信息通过下列属性集
// 控制。更改这些属性值可修改
// 与程序集关联的信息。
[assembly: AssemblyTitle("ServerWinForm")]
[assembly: AssemblyDescription("")]
[assembly: AssemblyConfiguration("")]
[assembly: AssemblyCompany("")]
[assembly: AssemblyProduct("ServerWinForm")]
[assembly: AssemblyCopyright("版权所有 (C) 2016")]
[assembly: AssemblyTrademark("")]
[assembly: AssemblyCulture("")]
// 将 ComVisible 设置为 false 使此程序集中的类型
// 对 COM 组件不可见。如果需要从 COM 访问此程序集中的类型,
// 则将该类型上的 ComVisible 属性设置为 true。
[assembly: ComVisible(false)]
// 如果此项目向 COM 公开,则下列 GUID 用于类型库的 ID
[assembly: Guid("919d7765-864f-4c8c-9d5c-c18c7bd11e38")]
// 程序集的版本信息由下面四个值组成:
//
// 主版本
// 次版本
// 内部版本号
// 修订号
//
[assembly: AssemblyVersion("1.0.0.0")]
[assembly: AssemblyFileVersion("1.0.0.0")]
|
2881099/dotnetGen_sqlserver | 979 | ServerWinForm/Properties/Settings.Designer.cs | //------------------------------------------------------------------------------
// <auto-generated>
// 此代码由工具生成。
// 运行时版本:4.0.30319.42000
//
// 对此文件的更改可能会导致不正确的行为,并且如果
// 重新生成代码,这些更改将会丢失。
// </auto-generated>
//------------------------------------------------------------------------------
namespace ServerWinForm.Properties {
[global::System.Runtime.CompilerServices.CompilerGeneratedAttribute()]
[global::System.CodeDom.Compiler.GeneratedCodeAttribute("Microsoft.VisualStudio.Editors.SettingsDesigner.SettingsSingleFileGenerator", "14.0.0.0")]
internal sealed partial class Settings : global::System.Configuration.ApplicationSettingsBase {
private static Settings defaultInstance = ((Settings)(global::System.Configuration.ApplicationSettingsBase.Synchronized(new Settings())));
public static Settings Default {
get {
return defaultInstance;
}
}
}
}
|
2881099/dotnetGen_sqlserver | 2,453 | ServerWinForm/Properties/Resources.Designer.cs | //------------------------------------------------------------------------------
// <auto-generated>
// 此代码由工具生成。
// 运行时版本:4.0.30319.42000
//
// 对此文件的更改可能会导致不正确的行为,并且如果
// 重新生成代码,这些更改将会丢失。
// </auto-generated>
//------------------------------------------------------------------------------
namespace ServerWinForm.Properties {
using System;
/// <summary>
/// 一个强类型的资源类,用于查找本地化的字符串等。
/// </summary>
// 此类是由 StronglyTypedResourceBuilder
// 类通过类似于 ResGen 或 Visual Studio 的工具自动生成的。
// 若要添加或移除成员,请编辑 .ResX 文件,然后重新运行 ResGen
// (以 /str 作为命令选项),或重新生成 VS 项目。
[global::System.CodeDom.Compiler.GeneratedCodeAttribute("System.Resources.Tools.StronglyTypedResourceBuilder", "4.0.0.0")]
[global::System.Diagnostics.DebuggerNonUserCodeAttribute()]
[global::System.Runtime.CompilerServices.CompilerGeneratedAttribute()]
internal class Resources {
private static global::System.Resources.ResourceManager resourceMan;
private static global::System.Globalization.CultureInfo resourceCulture;
[global::System.Diagnostics.CodeAnalysis.SuppressMessageAttribute("Microsoft.Performance", "CA1811:AvoidUncalledPrivateCode")]
internal Resources() {
}
/// <summary>
/// 返回此类使用的缓存的 ResourceManager 实例。
/// </summary>
[global::System.ComponentModel.EditorBrowsableAttribute(global::System.ComponentModel.EditorBrowsableState.Advanced)]
internal static global::System.Resources.ResourceManager ResourceManager {
get {
if (object.ReferenceEquals(resourceMan, null)) {
global::System.Resources.ResourceManager temp = new global::System.Resources.ResourceManager("ServerWinForm.Properties.Resources", typeof(Resources).Assembly);
resourceMan = temp;
}
return resourceMan;
}
}
/// <summary>
/// 使用此强类型资源类,为所有资源查找
/// 重写当前线程的 CurrentUICulture 属性。
/// </summary>
[global::System.ComponentModel.EditorBrowsableAttribute(global::System.ComponentModel.EditorBrowsableState.Advanced)]
internal static global::System.Globalization.CultureInfo Culture {
get {
return resourceCulture;
}
set {
resourceCulture = value;
}
}
}
}
|
2881099/dotnetGen_sqlserver | 851 | MakeCode/Properties/AssemblyInfo.cs | using System.Reflection;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
// 有关程序集的常规信息通过下列属性集
// 控制。更改这些属性值可修改
// 与程序集关联的信息。
[assembly: AssemblyTitle("NicPetShop")]
[assembly: AssemblyDescription("")]
[assembly: AssemblyConfiguration("")]
[assembly: AssemblyCompany("")]
[assembly: AssemblyProduct("NicPetShop")]
[assembly: AssemblyCopyright("版权所有 (C) 2008")]
[assembly: AssemblyTrademark("")]
[assembly: AssemblyCulture("")]
// 将 ComVisible 设置为 false 使此程序集中的类型
// 对 COM 组件不可见。如果需要从 COM 访问此程序集中的类型,
// 则将该类型上的 ComVisible 属性设置为 true。
[assembly: ComVisible(false)]
// 如果此项目向 COM 公开,则下列 GUID 用于类型库的 ID
[assembly: Guid("d26d6e70-6297-4f9c-992d-02c5478ca63b")]
// 程序集的版本信息由下面四个值组成:
//
// 主版本
// 次版本
// 内部版本号
// 修订号
//
[assembly: AssemblyVersion("1.0.0.0")]
[assembly: AssemblyFileVersion("1.0.0.0")]
|
2881099/dotnetGen_sqlserver | 5,016 | MakeCode/Properties/Settings.Designer.cs | //------------------------------------------------------------------------------
// <auto-generated>
// 此代码由工具生成。
// 运行时版本:4.0.30319.42000
//
// 对此文件的更改可能会导致不正确的行为,并且如果
// 重新生成代码,这些更改将会丢失。
// </auto-generated>
//------------------------------------------------------------------------------
namespace MakeCode.Properties {
[global::System.Runtime.CompilerServices.CompilerGeneratedAttribute()]
[global::System.CodeDom.Compiler.GeneratedCodeAttribute("Microsoft.VisualStudio.Editors.SettingsDesigner.SettingsSingleFileGenerator", "14.0.0.0")]
internal sealed partial class Settings : global::System.Configuration.ApplicationSettingsBase {
private static Settings defaultInstance = ((Settings)(global::System.Configuration.ApplicationSettingsBase.Synchronized(new Settings())));
public static Settings Default {
get {
return defaultInstance;
}
}
[global::System.Configuration.UserScopedSettingAttribute()]
[global::System.Diagnostics.DebuggerNonUserCodeAttribute()]
[global::System.Configuration.DefaultSettingValueAttribute("")]
public string txtServer_text {
get {
return ((string)(this["txtServer_text"]));
}
set {
this["txtServer_text"] = value;
}
}
[global::System.Configuration.UserScopedSettingAttribute()]
[global::System.Diagnostics.DebuggerNonUserCodeAttribute()]
[global::System.Configuration.DefaultSettingValueAttribute("")]
public string txtUsername_text {
get {
return ((string)(this["txtUsername_text"]));
}
set {
this["txtUsername_text"] = value;
}
}
[global::System.Configuration.UserScopedSettingAttribute()]
[global::System.Diagnostics.DebuggerNonUserCodeAttribute()]
[global::System.Configuration.DefaultSettingValueAttribute("")]
public string txtPassword_text {
get {
return ((string)(this["txtPassword_text"]));
}
set {
this["txtPassword_text"] = value;
}
}
[global::System.Configuration.UserScopedSettingAttribute()]
[global::System.Diagnostics.DebuggerNonUserCodeAttribute()]
[global::System.Configuration.DefaultSettingValueAttribute("")]
public string txtSolution_text {
get {
return ((string)(this["txtSolution_text"]));
}
set {
this["txtSolution_text"] = value;
}
}
[global::System.Configuration.UserScopedSettingAttribute()]
[global::System.Diagnostics.DebuggerNonUserCodeAttribute()]
[global::System.Configuration.DefaultSettingValueAttribute("False")]
public bool chkSolution_checked {
get {
return ((bool)(this["chkSolution_checked"]));
}
set {
this["chkSolution_checked"] = value;
}
}
[global::System.Configuration.UserScopedSettingAttribute()]
[global::System.Diagnostics.DebuggerNonUserCodeAttribute()]
[global::System.Configuration.DefaultSettingValueAttribute("True")]
public bool chkIntegrated_Checked {
get {
return ((bool)(this["chkIntegrated_Checked"]));
}
set {
this["chkIntegrated_Checked"] = value;
}
}
[global::System.Configuration.UserScopedSettingAttribute()]
[global::System.Diagnostics.DebuggerNonUserCodeAttribute()]
[global::System.Configuration.DefaultSettingValueAttribute("False")]
public bool chkMultiDB_checked {
get {
return ((bool)(this["chkMultiDB_checked"]));
}
set {
this["chkMultiDB_checked"] = value;
}
}
[global::System.Configuration.UserScopedSettingAttribute()]
[global::System.Diagnostics.DebuggerNonUserCodeAttribute()]
[global::System.Configuration.DefaultSettingValueAttribute("False")]
public bool chkWebAdmin_checked {
get {
return ((bool)(this["chkWebAdmin_checked"]));
}
set {
this["chkWebAdmin_checked"] = value;
}
}
[global::System.Configuration.UserScopedSettingAttribute()]
[global::System.Diagnostics.DebuggerNonUserCodeAttribute()]
[global::System.Configuration.DefaultSettingValueAttribute("False")]
public bool chkDownloadRes_checked {
get {
return ((bool)(this["chkDownloadRes_checked"]));
}
set {
this["chkDownloadRes_checked"] = value;
}
}
}
}
|
2881099/dotnetGen_sqlserver | 2,443 | MakeCode/Properties/Resources.Designer.cs | //------------------------------------------------------------------------------
// <auto-generated>
// 此代码由工具生成。
// 运行时版本:4.0.30319.42000
//
// 对此文件的更改可能会导致不正确的行为,并且如果
// 重新生成代码,这些更改将会丢失。
// </auto-generated>
//------------------------------------------------------------------------------
namespace MakeCode.Properties {
using System;
/// <summary>
/// 一个强类型的资源类,用于查找本地化的字符串等。
/// </summary>
// 此类是由 StronglyTypedResourceBuilder
// 类通过类似于 ResGen 或 Visual Studio 的工具自动生成的。
// 若要添加或移除成员,请编辑 .ResX 文件,然后重新运行 ResGen
// (以 /str 作为命令选项),或重新生成 VS 项目。
[global::System.CodeDom.Compiler.GeneratedCodeAttribute("System.Resources.Tools.StronglyTypedResourceBuilder", "4.0.0.0")]
[global::System.Diagnostics.DebuggerNonUserCodeAttribute()]
[global::System.Runtime.CompilerServices.CompilerGeneratedAttribute()]
internal class Resources {
private static global::System.Resources.ResourceManager resourceMan;
private static global::System.Globalization.CultureInfo resourceCulture;
[global::System.Diagnostics.CodeAnalysis.SuppressMessageAttribute("Microsoft.Performance", "CA1811:AvoidUncalledPrivateCode")]
internal Resources() {
}
/// <summary>
/// 返回此类使用的缓存的 ResourceManager 实例。
/// </summary>
[global::System.ComponentModel.EditorBrowsableAttribute(global::System.ComponentModel.EditorBrowsableState.Advanced)]
internal static global::System.Resources.ResourceManager ResourceManager {
get {
if (object.ReferenceEquals(resourceMan, null)) {
global::System.Resources.ResourceManager temp = new global::System.Resources.ResourceManager("MakeCode.Properties.Resources", typeof(Resources).Assembly);
resourceMan = temp;
}
return resourceMan;
}
}
/// <summary>
/// 使用此强类型资源类,为所有资源查找
/// 重写当前线程的 CurrentUICulture 属性。
/// </summary>
[global::System.ComponentModel.EditorBrowsableAttribute(global::System.ComponentModel.EditorBrowsableState.Advanced)]
internal static global::System.Globalization.CultureInfo Culture {
get {
return resourceCulture;
}
set {
resourceCulture = value;
}
}
}
}
|
2881099/dotnetGen_sqlserver | 1,465 | MakeCode/Properties/Settings.settings | <?xml version='1.0' encoding='utf-8'?>
<SettingsFile xmlns="http://schemas.microsoft.com/VisualStudio/2004/01/settings" CurrentProfile="(Default)" GeneratedClassNamespace="MakeCode.Properties" GeneratedClassName="Settings">
<Profiles />
<Settings>
<Setting Name="txtServer_text" Type="System.String" Scope="User">
<Value Profile="(Default)" />
</Setting>
<Setting Name="txtUsername_text" Type="System.String" Scope="User">
<Value Profile="(Default)" />
</Setting>
<Setting Name="txtPassword_text" Type="System.String" Scope="User">
<Value Profile="(Default)" />
</Setting>
<Setting Name="txtSolution_text" Type="System.String" Scope="User">
<Value Profile="(Default)" />
</Setting>
<Setting Name="chkSolution_checked" Type="System.Boolean" Scope="User">
<Value Profile="(Default)">False</Value>
</Setting>
<Setting Name="chkIntegrated_Checked" Type="System.Boolean" Scope="User">
<Value Profile="(Default)">True</Value>
</Setting>
<Setting Name="chkMultiDB_checked" Type="System.Boolean" Scope="User">
<Value Profile="(Default)">False</Value>
</Setting>
<Setting Name="chkWebAdmin_checked" Type="System.Boolean" Scope="User">
<Value Profile="(Default)">False</Value>
</Setting>
<Setting Name="chkDownloadRes_checked" Type="System.Boolean" Scope="User">
<Value Profile="(Default)">False</Value>
</Setting>
</Settings>
</SettingsFile> |
27182812/ChatGLM-LLaMA-chinese-insturct | 3,705 | src/transformers/models/esm/openfold_utils/loss.py | # Copyright 2021 AlQuraishi Laboratory
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Optional, Tuple
import torch
def _calculate_bin_centers(boundaries: torch.Tensor) -> torch.Tensor:
step = boundaries[1] - boundaries[0]
bin_centers = boundaries + step / 2
bin_centers = torch.cat([bin_centers, (bin_centers[-1] + step).unsqueeze(-1)], dim=0)
return bin_centers
def _calculate_expected_aligned_error(
alignment_confidence_breaks: torch.Tensor,
aligned_distance_error_probs: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
bin_centers = _calculate_bin_centers(alignment_confidence_breaks)
return (
torch.sum(aligned_distance_error_probs * bin_centers, dim=-1),
bin_centers[-1],
)
def compute_predicted_aligned_error(
logits: torch.Tensor,
max_bin: int = 31,
no_bins: int = 64,
**kwargs,
) -> Dict[str, torch.Tensor]:
"""Computes aligned confidence metrics from logits.
Args:
logits: [*, num_res, num_res, num_bins] the logits output from
PredictedAlignedErrorHead.
max_bin: Maximum bin value
no_bins: Number of bins
Returns:
aligned_confidence_probs: [*, num_res, num_res, num_bins] the predicted
aligned error probabilities over bins for each residue pair.
predicted_aligned_error: [*, num_res, num_res] the expected aligned distance
error for each pair of residues.
max_predicted_aligned_error: [*] the maximum predicted error possible.
"""
boundaries = torch.linspace(0, max_bin, steps=(no_bins - 1), device=logits.device)
aligned_confidence_probs = torch.nn.functional.softmax(logits, dim=-1)
predicted_aligned_error, max_predicted_aligned_error = _calculate_expected_aligned_error(
alignment_confidence_breaks=boundaries,
aligned_distance_error_probs=aligned_confidence_probs,
)
return {
"aligned_confidence_probs": aligned_confidence_probs,
"predicted_aligned_error": predicted_aligned_error,
"max_predicted_aligned_error": max_predicted_aligned_error,
}
def compute_tm(
logits: torch.Tensor,
residue_weights: Optional[torch.Tensor] = None,
max_bin: int = 31,
no_bins: int = 64,
eps: float = 1e-8,
**kwargs,
) -> torch.Tensor:
if residue_weights is None:
residue_weights = logits.new_ones(logits.shape[-2])
boundaries = torch.linspace(0, max_bin, steps=(no_bins - 1), device=logits.device)
bin_centers = _calculate_bin_centers(boundaries)
torch.sum(residue_weights)
n = logits.shape[-2]
clipped_n = max(n, 19)
d0 = 1.24 * (clipped_n - 15) ** (1.0 / 3) - 1.8
probs = torch.nn.functional.softmax(logits, dim=-1)
tm_per_bin = 1.0 / (1 + (bin_centers**2) / (d0**2))
predicted_tm_term = torch.sum(probs * tm_per_bin, dim=-1)
normed_residue_mask = residue_weights / (eps + residue_weights.sum())
per_alignment = torch.sum(predicted_tm_term * normed_residue_mask, dim=-1)
weighted = per_alignment * residue_weights
argmax = (weighted == torch.max(weighted)).nonzero()[0]
return per_alignment[tuple(argmax)]
|
27182812/ChatGLM-LLaMA-chinese-insturct | 8,376 | src/transformers/models/esm/openfold_utils/feats.py | # Copyright 2021 AlQuraishi Laboratory
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Tuple, overload
import torch
import torch.types
from torch import nn
from . import residue_constants as rc
from .rigid_utils import Rigid, Rotation
from .tensor_utils import batched_gather
@overload
def pseudo_beta_fn(aatype: torch.Tensor, all_atom_positions: torch.Tensor, all_atom_masks: None) -> torch.Tensor:
...
@overload
def pseudo_beta_fn(
aatype: torch.Tensor, all_atom_positions: torch.Tensor, all_atom_masks: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
...
def pseudo_beta_fn(aatype, all_atom_positions, all_atom_masks):
is_gly = aatype == rc.restype_order["G"]
ca_idx = rc.atom_order["CA"]
cb_idx = rc.atom_order["CB"]
pseudo_beta = torch.where(
is_gly[..., None].expand(*((-1,) * len(is_gly.shape)), 3),
all_atom_positions[..., ca_idx, :],
all_atom_positions[..., cb_idx, :],
)
if all_atom_masks is not None:
pseudo_beta_mask = torch.where(
is_gly,
all_atom_masks[..., ca_idx],
all_atom_masks[..., cb_idx],
)
return pseudo_beta, pseudo_beta_mask
else:
return pseudo_beta
def atom14_to_atom37(atom14: torch.Tensor, batch: Dict[str, torch.Tensor]) -> torch.Tensor:
atom37_data = batched_gather(
atom14,
batch["residx_atom37_to_atom14"],
dim=-2,
no_batch_dims=len(atom14.shape[:-2]),
)
atom37_data = atom37_data * batch["atom37_atom_exists"][..., None]
return atom37_data
def build_template_angle_feat(template_feats: Dict[str, torch.Tensor]) -> torch.Tensor:
template_aatype = template_feats["template_aatype"]
torsion_angles_sin_cos = template_feats["template_torsion_angles_sin_cos"]
alt_torsion_angles_sin_cos = template_feats["template_alt_torsion_angles_sin_cos"]
torsion_angles_mask = template_feats["template_torsion_angles_mask"]
template_angle_feat = torch.cat(
[
nn.functional.one_hot(template_aatype, 22),
torsion_angles_sin_cos.reshape(*torsion_angles_sin_cos.shape[:-2], 14),
alt_torsion_angles_sin_cos.reshape(*alt_torsion_angles_sin_cos.shape[:-2], 14),
torsion_angles_mask,
],
dim=-1,
)
return template_angle_feat
def build_template_pair_feat(
batch: Dict[str, torch.Tensor],
min_bin: torch.types.Number,
max_bin: torch.types.Number,
no_bins: int,
use_unit_vector: bool = False,
eps: float = 1e-20,
inf: float = 1e8,
) -> torch.Tensor:
template_mask = batch["template_pseudo_beta_mask"]
template_mask_2d = template_mask[..., None] * template_mask[..., None, :]
# Compute distogram (this seems to differ slightly from Alg. 5)
tpb = batch["template_pseudo_beta"]
dgram = torch.sum((tpb[..., None, :] - tpb[..., None, :, :]) ** 2, dim=-1, keepdim=True)
lower = torch.linspace(min_bin, max_bin, no_bins, device=tpb.device) ** 2
upper = torch.cat([lower[1:], lower.new_tensor([inf])], dim=-1)
dgram = ((dgram > lower) * (dgram < upper)).type(dgram.dtype)
to_concat = [dgram, template_mask_2d[..., None]]
aatype_one_hot: torch.LongTensor = nn.functional.one_hot(
batch["template_aatype"],
rc.restype_num + 2,
)
n_res = batch["template_aatype"].shape[-1]
to_concat.append(aatype_one_hot[..., None, :, :].expand(*aatype_one_hot.shape[:-2], n_res, -1, -1))
to_concat.append(aatype_one_hot[..., None, :].expand(*aatype_one_hot.shape[:-2], -1, n_res, -1))
n, ca, c = [rc.atom_order[a] for a in ["N", "CA", "C"]]
rigids = Rigid.make_transform_from_reference(
n_xyz=batch["template_all_atom_positions"][..., n, :],
ca_xyz=batch["template_all_atom_positions"][..., ca, :],
c_xyz=batch["template_all_atom_positions"][..., c, :],
eps=eps,
)
points = rigids.get_trans()[..., None, :, :]
rigid_vec = rigids[..., None].invert_apply(points)
inv_distance_scalar = torch.rsqrt(eps + torch.sum(rigid_vec**2, dim=-1))
t_aa_masks = batch["template_all_atom_mask"]
template_mask = t_aa_masks[..., n] * t_aa_masks[..., ca] * t_aa_masks[..., c]
template_mask_2d = template_mask[..., None] * template_mask[..., None, :]
inv_distance_scalar = inv_distance_scalar * template_mask_2d
unit_vector = rigid_vec * inv_distance_scalar[..., None]
if not use_unit_vector:
unit_vector = unit_vector * 0.0
to_concat.extend(torch.unbind(unit_vector[..., None, :], dim=-1))
to_concat.append(template_mask_2d[..., None])
act = torch.cat(to_concat, dim=-1)
act = act * template_mask_2d[..., None]
return act
def build_extra_msa_feat(batch: Dict[str, torch.Tensor]) -> torch.Tensor:
msa_1hot: torch.LongTensor = nn.functional.one_hot(batch["extra_msa"], 23)
msa_feat = [
msa_1hot,
batch["extra_has_deletion"].unsqueeze(-1),
batch["extra_deletion_value"].unsqueeze(-1),
]
return torch.cat(msa_feat, dim=-1)
def torsion_angles_to_frames(
r: Rigid,
alpha: torch.Tensor,
aatype: torch.Tensor,
rrgdf: torch.Tensor,
) -> Rigid:
# [*, N, 8, 4, 4]
default_4x4 = rrgdf[aatype, ...]
# [*, N, 8] transformations, i.e.
# One [*, N, 8, 3, 3] rotation matrix and
# One [*, N, 8, 3] translation matrix
default_r = r.from_tensor_4x4(default_4x4)
bb_rot = alpha.new_zeros((*((1,) * len(alpha.shape[:-1])), 2))
bb_rot[..., 1] = 1
# [*, N, 8, 2]
alpha = torch.cat([bb_rot.expand(*alpha.shape[:-2], -1, -1), alpha], dim=-2)
# [*, N, 8, 3, 3]
# Produces rotation matrices of the form:
# [
# [1, 0 , 0 ],
# [0, a_2,-a_1],
# [0, a_1, a_2]
# ]
# This follows the original code rather than the supplement, which uses
# different indices.
all_rots = alpha.new_zeros(default_r.get_rots().get_rot_mats().shape)
all_rots[..., 0, 0] = 1
all_rots[..., 1, 1] = alpha[..., 1]
all_rots[..., 1, 2] = -alpha[..., 0]
all_rots[..., 2, 1:] = alpha
all_frames = default_r.compose(Rigid(Rotation(rot_mats=all_rots), None))
chi2_frame_to_frame = all_frames[..., 5]
chi3_frame_to_frame = all_frames[..., 6]
chi4_frame_to_frame = all_frames[..., 7]
chi1_frame_to_bb = all_frames[..., 4]
chi2_frame_to_bb = chi1_frame_to_bb.compose(chi2_frame_to_frame)
chi3_frame_to_bb = chi2_frame_to_bb.compose(chi3_frame_to_frame)
chi4_frame_to_bb = chi3_frame_to_bb.compose(chi4_frame_to_frame)
all_frames_to_bb = Rigid.cat(
[
all_frames[..., :5],
chi2_frame_to_bb.unsqueeze(-1),
chi3_frame_to_bb.unsqueeze(-1),
chi4_frame_to_bb.unsqueeze(-1),
],
dim=-1,
)
all_frames_to_global = r[..., None].compose(all_frames_to_bb)
return all_frames_to_global
def frames_and_literature_positions_to_atom14_pos(
r: Rigid,
aatype: torch.Tensor,
default_frames: torch.Tensor,
group_idx: torch.Tensor,
atom_mask: torch.Tensor,
lit_positions: torch.Tensor,
) -> torch.Tensor:
# [*, N, 14]
group_mask = group_idx[aatype, ...]
# [*, N, 14, 8]
group_mask_one_hot: torch.LongTensor = nn.functional.one_hot(
group_mask,
num_classes=default_frames.shape[-3],
)
# [*, N, 14, 8]
t_atoms_to_global = r[..., None, :] * group_mask_one_hot
# [*, N, 14]
t_atoms_to_global = t_atoms_to_global.map_tensor_fn(lambda x: torch.sum(x, dim=-1))
# [*, N, 14, 1]
atom_mask = atom_mask[aatype, ...].unsqueeze(-1)
# [*, N, 14, 3]
lit_positions = lit_positions[aatype, ...]
pred_positions = t_atoms_to_global.apply(lit_positions)
pred_positions = pred_positions * atom_mask
return pred_positions
|
2881099/dotnetGen_sqlserver | 863 | ServerWinService/Properties/AssemblyInfo.cs | using System.Reflection;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
// 有关程序集的常规信息通过下列属性集
// 控制。更改这些属性值可修改
// 与程序集关联的信息。
[assembly: AssemblyTitle("ServerWinService")]
[assembly: AssemblyDescription("")]
[assembly: AssemblyConfiguration("")]
[assembly: AssemblyCompany("")]
[assembly: AssemblyProduct("ServerWinService")]
[assembly: AssemblyCopyright("版权所有 (C) 2007")]
[assembly: AssemblyTrademark("")]
[assembly: AssemblyCulture("")]
// 将 ComVisible 设置为 false 使此程序集中的类型
// 对 COM 组件不可见。如果需要从 COM 访问此程序集中的类型,
// 则将该类型上的 ComVisible 属性设置为 true。
[assembly: ComVisible(false)]
// 如果此项目向 COM 公开,则下列 GUID 用于类型库的 ID
[assembly: Guid("3d782f35-0953-5580-8273-e45d913562b7")]
// 程序集的版本信息由下面四个值组成:
//
// 主版本
// 次版本
// 内部版本号
// 修订号
//
[assembly: AssemblyVersion("1.0.0.0")]
[assembly: AssemblyFileVersion("1.0.0.0")]
|
27182812/ChatGLM-LLaMA-chinese-insturct | 4,798 | src/transformers/models/esm/openfold_utils/tensor_utils.py | # Copyright 2021 AlQuraishi Laboratory
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from typing import Any, Callable, Dict, List, Type, TypeVar, Union, overload
import torch
import torch.nn as nn
import torch.types
def add(m1: torch.Tensor, m2: torch.Tensor, inplace: bool) -> torch.Tensor:
# The first operation in a checkpoint can't be in-place, but it's
# nice to have in-place addition during inference. Thus...
if not inplace:
m1 = m1 + m2
else:
m1 += m2
return m1
def permute_final_dims(tensor: torch.Tensor, inds: List[int]) -> torch.Tensor:
zero_index = -1 * len(inds)
first_inds = list(range(len(tensor.shape[:zero_index])))
return tensor.permute(first_inds + [zero_index + i for i in inds])
def flatten_final_dims(t: torch.Tensor, no_dims: int) -> torch.Tensor:
return t.reshape(t.shape[:-no_dims] + (-1,))
def masked_mean(mask: torch.Tensor, value: torch.Tensor, dim: int, eps: float = 1e-4) -> torch.Tensor:
mask = mask.expand(*value.shape)
return torch.sum(mask * value, dim=dim) / (eps + torch.sum(mask, dim=dim))
def pts_to_distogram(
pts: torch.Tensor, min_bin: torch.types.Number = 2.3125, max_bin: torch.types.Number = 21.6875, no_bins: int = 64
) -> torch.Tensor:
boundaries = torch.linspace(min_bin, max_bin, no_bins - 1, device=pts.device)
dists = torch.sqrt(torch.sum((pts.unsqueeze(-2) - pts.unsqueeze(-3)) ** 2, dim=-1))
return torch.bucketize(dists, boundaries)
def dict_multimap(fn: Callable[[list], Any], dicts: List[dict]) -> dict:
first = dicts[0]
new_dict = {}
for k, v in first.items():
all_v = [d[k] for d in dicts]
if isinstance(v, dict):
new_dict[k] = dict_multimap(fn, all_v)
else:
new_dict[k] = fn(all_v)
return new_dict
def one_hot(x: torch.Tensor, v_bins: torch.Tensor) -> torch.Tensor:
reshaped_bins = v_bins.view(((1,) * len(x.shape)) + (len(v_bins),))
diffs = x[..., None] - reshaped_bins
am = torch.argmin(torch.abs(diffs), dim=-1)
return nn.functional.one_hot(am, num_classes=len(v_bins)).float()
def batched_gather(data: torch.Tensor, inds: torch.Tensor, dim: int = 0, no_batch_dims: int = 0) -> torch.Tensor:
ranges: List[Union[slice, torch.Tensor]] = []
for i, s in enumerate(data.shape[:no_batch_dims]):
r = torch.arange(s)
r = r.view(*(*((1,) * i), -1, *((1,) * (len(inds.shape) - i - 1))))
ranges.append(r)
remaining_dims: List[Union[slice, torch.Tensor]] = [slice(None) for _ in range(len(data.shape) - no_batch_dims)]
remaining_dims[dim - no_batch_dims if dim >= 0 else dim] = inds
ranges.extend(remaining_dims)
# Matt note: Editing this to get around the behaviour of using a list as an array index changing
# in recent Numpy versions
return data[tuple(ranges)]
T = TypeVar("T")
# With tree_map, a poor man's JAX tree_map
def dict_map(
fn: Callable[[T], Any], dic: Dict[Any, Union[dict, list, tuple, T]], leaf_type: Type[T]
) -> Dict[Any, Union[dict, list, tuple, Any]]:
new_dict: Dict[Any, Union[dict, list, tuple, Any]] = {}
for k, v in dic.items():
if isinstance(v, dict):
new_dict[k] = dict_map(fn, v, leaf_type)
else:
new_dict[k] = tree_map(fn, v, leaf_type)
return new_dict
@overload
def tree_map(fn: Callable[[T], Any], tree: T, leaf_type: Type[T]) -> Any:
...
@overload
def tree_map(fn: Callable[[T], Any], tree: dict, leaf_type: Type[T]) -> dict:
...
@overload
def tree_map(fn: Callable[[T], Any], tree: list, leaf_type: Type[T]) -> list:
...
@overload
def tree_map(fn: Callable[[T], Any], tree: tuple, leaf_type: Type[T]) -> tuple:
...
def tree_map(fn, tree, leaf_type):
if isinstance(tree, dict):
return dict_map(fn, tree, leaf_type)
elif isinstance(tree, list):
return [tree_map(fn, x, leaf_type) for x in tree]
elif isinstance(tree, tuple):
return tuple(tree_map(fn, x, leaf_type) for x in tree)
elif isinstance(tree, leaf_type):
return fn(tree)
else:
print(type(tree))
raise ValueError("Not supported")
tensor_tree_map = partial(tree_map, leaf_type=torch.Tensor)
|
27182812/ChatGLM-LLaMA-chinese-insturct | 1,837 | src/transformers/models/deformable_detr/custom_kernel/ms_deform_attn.h | /*!
**************************************************************************************************
* Deformable DETR
* Copyright (c) 2020 SenseTime. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 [see LICENSE for details]
**************************************************************************************************
* Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
**************************************************************************************************
*/
#pragma once
#include "cpu/ms_deform_attn_cpu.h"
#ifdef WITH_CUDA
#include "cuda/ms_deform_attn_cuda.h"
#endif
at::Tensor
ms_deform_attn_forward(
const at::Tensor &value,
const at::Tensor &spatial_shapes,
const at::Tensor &level_start_index,
const at::Tensor &sampling_loc,
const at::Tensor &attn_weight,
const int im2col_step)
{
if (value.type().is_cuda())
{
#ifdef WITH_CUDA
return ms_deform_attn_cuda_forward(
value, spatial_shapes, level_start_index, sampling_loc, attn_weight, im2col_step);
#else
AT_ERROR("Not compiled with GPU support");
#endif
}
AT_ERROR("Not implemented on the CPU");
}
std::vector<at::Tensor>
ms_deform_attn_backward(
const at::Tensor &value,
const at::Tensor &spatial_shapes,
const at::Tensor &level_start_index,
const at::Tensor &sampling_loc,
const at::Tensor &attn_weight,
const at::Tensor &grad_output,
const int im2col_step)
{
if (value.type().is_cuda())
{
#ifdef WITH_CUDA
return ms_deform_attn_cuda_backward(
value, spatial_shapes, level_start_index, sampling_loc, attn_weight, grad_output, im2col_step);
#else
AT_ERROR("Not compiled with GPU support");
#endif
}
AT_ERROR("Not implemented on the CPU");
}
|
27182812/ChatGLM-LLaMA-chinese-insturct | 1,255 | src/transformers/models/deformable_detr/custom_kernel/cpu/ms_deform_attn_cpu.cpp | /*!
**************************************************************************************************
* Deformable DETR
* Copyright (c) 2020 SenseTime. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 [see LICENSE for details]
**************************************************************************************************
* Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
**************************************************************************************************
*/
#include <vector>
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
at::Tensor
ms_deform_attn_cpu_forward(
const at::Tensor &value,
const at::Tensor &spatial_shapes,
const at::Tensor &level_start_index,
const at::Tensor &sampling_loc,
const at::Tensor &attn_weight,
const int im2col_step)
{
AT_ERROR("Not implement on cpu");
}
std::vector<at::Tensor>
ms_deform_attn_cpu_backward(
const at::Tensor &value,
const at::Tensor &spatial_shapes,
const at::Tensor &level_start_index,
const at::Tensor &sampling_loc,
const at::Tensor &attn_weight,
const at::Tensor &grad_output,
const int im2col_step)
{
AT_ERROR("Not implement on cpu");
}
|
27182812/ChatGLM-LLaMA-chinese-insturct | 1,138 | src/transformers/models/deformable_detr/custom_kernel/cpu/ms_deform_attn_cpu.h | /*!
**************************************************************************************************
* Deformable DETR
* Copyright (c) 2020 SenseTime. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 [see LICENSE for details]
**************************************************************************************************
* Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
**************************************************************************************************
*/
#pragma once
#include <torch/extension.h>
at::Tensor
ms_deform_attn_cpu_forward(
const at::Tensor &value,
const at::Tensor &spatial_shapes,
const at::Tensor &level_start_index,
const at::Tensor &sampling_loc,
const at::Tensor &attn_weight,
const int im2col_step);
std::vector<at::Tensor>
ms_deform_attn_cpu_backward(
const at::Tensor &value,
const at::Tensor &spatial_shapes,
const at::Tensor &level_start_index,
const at::Tensor &sampling_loc,
const at::Tensor &attn_weight,
const at::Tensor &grad_output,
const int im2col_step);
|
27182812/ChatGLM-LLaMA-chinese-insturct | 54,695 | src/transformers/models/deformable_detr/custom_kernel/cuda/ms_deform_im2col_cuda.cuh | /*!
**************************************************************************
* Deformable DETR
* Copyright (c) 2020 SenseTime. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 [see LICENSE for details]
**************************************************************************
* Modified from DCN (https://github.com/msracver/Deformable-ConvNets)
* Copyright (c) 2018 Microsoft
**************************************************************************
*/
#include <cstdio>
#include <algorithm>
#include <cstring>
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THCAtomics.cuh>
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
const int CUDA_NUM_THREADS = 1024;
inline int GET_BLOCKS(const int N, const int num_threads)
{
return (N + num_threads - 1) / num_threads;
}
template <typename scalar_t>
__device__ scalar_t ms_deform_attn_im2col_bilinear(const scalar_t* &bottom_data,
const int &height, const int &width, const int &nheads, const int &channels,
const scalar_t &h, const scalar_t &w, const int &m, const int &c)
{
const int h_low = floor(h);
const int w_low = floor(w);
const int h_high = h_low + 1;
const int w_high = w_low + 1;
const scalar_t lh = h - h_low;
const scalar_t lw = w - w_low;
const scalar_t hh = 1 - lh, hw = 1 - lw;
const int w_stride = nheads * channels;
const int h_stride = width * w_stride;
const int h_low_ptr_offset = h_low * h_stride;
const int h_high_ptr_offset = h_low_ptr_offset + h_stride;
const int w_low_ptr_offset = w_low * w_stride;
const int w_high_ptr_offset = w_low_ptr_offset + w_stride;
const int base_ptr = m * channels + c;
scalar_t v1 = 0;
if (h_low >= 0 && w_low >= 0)
{
const int ptr1 = h_low_ptr_offset + w_low_ptr_offset + base_ptr;
v1 = bottom_data[ptr1];
}
scalar_t v2 = 0;
if (h_low >= 0 && w_high <= width - 1)
{
const int ptr2 = h_low_ptr_offset + w_high_ptr_offset + base_ptr;
v2 = bottom_data[ptr2];
}
scalar_t v3 = 0;
if (h_high <= height - 1 && w_low >= 0)
{
const int ptr3 = h_high_ptr_offset + w_low_ptr_offset + base_ptr;
v3 = bottom_data[ptr3];
}
scalar_t v4 = 0;
if (h_high <= height - 1 && w_high <= width - 1)
{
const int ptr4 = h_high_ptr_offset + w_high_ptr_offset + base_ptr;
v4 = bottom_data[ptr4];
}
const scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
const scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename scalar_t>
__device__ void ms_deform_attn_col2im_bilinear(const scalar_t* &bottom_data,
const int &height, const int &width, const int &nheads, const int &channels,
const scalar_t &h, const scalar_t &w, const int &m, const int &c,
const scalar_t &top_grad,
const scalar_t &attn_weight,
scalar_t* &grad_value,
scalar_t* grad_sampling_loc,
scalar_t* grad_attn_weight)
{
const int h_low = floor(h);
const int w_low = floor(w);
const int h_high = h_low + 1;
const int w_high = w_low + 1;
const scalar_t lh = h - h_low;
const scalar_t lw = w - w_low;
const scalar_t hh = 1 - lh, hw = 1 - lw;
const int w_stride = nheads * channels;
const int h_stride = width * w_stride;
const int h_low_ptr_offset = h_low * h_stride;
const int h_high_ptr_offset = h_low_ptr_offset + h_stride;
const int w_low_ptr_offset = w_low * w_stride;
const int w_high_ptr_offset = w_low_ptr_offset + w_stride;
const int base_ptr = m * channels + c;
const scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
const scalar_t top_grad_value = top_grad * attn_weight;
scalar_t grad_h_weight = 0, grad_w_weight = 0;
scalar_t v1 = 0;
if (h_low >= 0 && w_low >= 0)
{
const int ptr1 = h_low_ptr_offset + w_low_ptr_offset + base_ptr;
v1 = bottom_data[ptr1];
grad_h_weight -= hw * v1;
grad_w_weight -= hh * v1;
atomicAdd(grad_value+ptr1, w1*top_grad_value);
}
scalar_t v2 = 0;
if (h_low >= 0 && w_high <= width - 1)
{
const int ptr2 = h_low_ptr_offset + w_high_ptr_offset + base_ptr;
v2 = bottom_data[ptr2];
grad_h_weight -= lw * v2;
grad_w_weight += hh * v2;
atomicAdd(grad_value+ptr2, w2*top_grad_value);
}
scalar_t v3 = 0;
if (h_high <= height - 1 && w_low >= 0)
{
const int ptr3 = h_high_ptr_offset + w_low_ptr_offset + base_ptr;
v3 = bottom_data[ptr3];
grad_h_weight += hw * v3;
grad_w_weight -= lh * v3;
atomicAdd(grad_value+ptr3, w3*top_grad_value);
}
scalar_t v4 = 0;
if (h_high <= height - 1 && w_high <= width - 1)
{
const int ptr4 = h_high_ptr_offset + w_high_ptr_offset + base_ptr;
v4 = bottom_data[ptr4];
grad_h_weight += lw * v4;
grad_w_weight += lh * v4;
atomicAdd(grad_value+ptr4, w4*top_grad_value);
}
const scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
*grad_attn_weight = top_grad * val;
*grad_sampling_loc = width * grad_w_weight * top_grad_value;
*(grad_sampling_loc + 1) = height * grad_h_weight * top_grad_value;
}
template <typename scalar_t>
__device__ void ms_deform_attn_col2im_bilinear_gm(const scalar_t* &bottom_data,
const int &height, const int &width, const int &nheads, const int &channels,
const scalar_t &h, const scalar_t &w, const int &m, const int &c,
const scalar_t &top_grad,
const scalar_t &attn_weight,
scalar_t* &grad_value,
scalar_t* grad_sampling_loc,
scalar_t* grad_attn_weight)
{
const int h_low = floor(h);
const int w_low = floor(w);
const int h_high = h_low + 1;
const int w_high = w_low + 1;
const scalar_t lh = h - h_low;
const scalar_t lw = w - w_low;
const scalar_t hh = 1 - lh, hw = 1 - lw;
const int w_stride = nheads * channels;
const int h_stride = width * w_stride;
const int h_low_ptr_offset = h_low * h_stride;
const int h_high_ptr_offset = h_low_ptr_offset + h_stride;
const int w_low_ptr_offset = w_low * w_stride;
const int w_high_ptr_offset = w_low_ptr_offset + w_stride;
const int base_ptr = m * channels + c;
const scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
const scalar_t top_grad_value = top_grad * attn_weight;
scalar_t grad_h_weight = 0, grad_w_weight = 0;
scalar_t v1 = 0;
if (h_low >= 0 && w_low >= 0)
{
const int ptr1 = h_low_ptr_offset + w_low_ptr_offset + base_ptr;
v1 = bottom_data[ptr1];
grad_h_weight -= hw * v1;
grad_w_weight -= hh * v1;
atomicAdd(grad_value+ptr1, w1*top_grad_value);
}
scalar_t v2 = 0;
if (h_low >= 0 && w_high <= width - 1)
{
const int ptr2 = h_low_ptr_offset + w_high_ptr_offset + base_ptr;
v2 = bottom_data[ptr2];
grad_h_weight -= lw * v2;
grad_w_weight += hh * v2;
atomicAdd(grad_value+ptr2, w2*top_grad_value);
}
scalar_t v3 = 0;
if (h_high <= height - 1 && w_low >= 0)
{
const int ptr3 = h_high_ptr_offset + w_low_ptr_offset + base_ptr;
v3 = bottom_data[ptr3];
grad_h_weight += hw * v3;
grad_w_weight -= lh * v3;
atomicAdd(grad_value+ptr3, w3*top_grad_value);
}
scalar_t v4 = 0;
if (h_high <= height - 1 && w_high <= width - 1)
{
const int ptr4 = h_high_ptr_offset + w_high_ptr_offset + base_ptr;
v4 = bottom_data[ptr4];
grad_h_weight += lw * v4;
grad_w_weight += lh * v4;
atomicAdd(grad_value+ptr4, w4*top_grad_value);
}
const scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
atomicAdd(grad_attn_weight, top_grad * val);
atomicAdd(grad_sampling_loc, width * grad_w_weight * top_grad_value);
atomicAdd(grad_sampling_loc + 1, height * grad_h_weight * top_grad_value);
}
template <typename scalar_t>
__global__ void ms_deformable_im2col_gpu_kernel(const int n,
const scalar_t *data_value,
const int64_t *data_spatial_shapes,
const int64_t *data_level_start_index,
const scalar_t *data_sampling_loc,
const scalar_t *data_attn_weight,
const int batch_size,
const int spatial_size,
const int num_heads,
const int channels,
const int num_levels,
const int num_query,
const int num_point,
scalar_t *data_col)
{
CUDA_KERNEL_LOOP(index, n)
{
int _temp = index;
const int c_col = _temp % channels;
_temp /= channels;
const int sampling_index = _temp;
const int m_col = _temp % num_heads;
_temp /= num_heads;
const int q_col = _temp % num_query;
_temp /= num_query;
const int b_col = _temp;
scalar_t *data_col_ptr = data_col + index;
int data_weight_ptr = sampling_index * num_levels * num_point;
int data_loc_w_ptr = data_weight_ptr << 1;
const int qid_stride = num_heads * channels;
const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride;
scalar_t col = 0;
for (int l_col=0; l_col < num_levels; ++l_col)
{
const int level_start_id = data_level_start_index[l_col];
const int spatial_h_ptr = l_col << 1;
const int spatial_h = data_spatial_shapes[spatial_h_ptr];
const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1];
const scalar_t *data_value_ptr = data_value + (data_value_ptr_init_offset + level_start_id * qid_stride);
for (int p_col=0; p_col < num_point; ++p_col)
{
const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr];
const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1];
const scalar_t weight = data_attn_weight[data_weight_ptr];
const scalar_t h_im = loc_h * spatial_h - 0.5;
const scalar_t w_im = loc_w * spatial_w - 0.5;
if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w)
{
col += ms_deform_attn_im2col_bilinear(data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col) * weight;
}
data_weight_ptr += 1;
data_loc_w_ptr += 2;
}
}
*data_col_ptr = col;
}
}
template <typename scalar_t, unsigned int blockSize>
__global__ void ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1(const int n,
const scalar_t *grad_col,
const scalar_t *data_value,
const int64_t *data_spatial_shapes,
const int64_t *data_level_start_index,
const scalar_t *data_sampling_loc,
const scalar_t *data_attn_weight,
const int batch_size,
const int spatial_size,
const int num_heads,
const int channels,
const int num_levels,
const int num_query,
const int num_point,
scalar_t *grad_value,
scalar_t *grad_sampling_loc,
scalar_t *grad_attn_weight)
{
CUDA_KERNEL_LOOP(index, n)
{
__shared__ scalar_t cache_grad_sampling_loc[blockSize * 2];
__shared__ scalar_t cache_grad_attn_weight[blockSize];
unsigned int tid = threadIdx.x;
int _temp = index;
const int c_col = _temp % channels;
_temp /= channels;
const int sampling_index = _temp;
const int m_col = _temp % num_heads;
_temp /= num_heads;
const int q_col = _temp % num_query;
_temp /= num_query;
const int b_col = _temp;
const scalar_t top_grad = grad_col[index];
int data_weight_ptr = sampling_index * num_levels * num_point;
int data_loc_w_ptr = data_weight_ptr << 1;
const int grad_sampling_ptr = data_weight_ptr;
grad_sampling_loc += grad_sampling_ptr << 1;
grad_attn_weight += grad_sampling_ptr;
const int grad_weight_stride = 1;
const int grad_loc_stride = 2;
const int qid_stride = num_heads * channels;
const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride;
for (int l_col=0; l_col < num_levels; ++l_col)
{
const int level_start_id = data_level_start_index[l_col];
const int spatial_h_ptr = l_col << 1;
const int spatial_h = data_spatial_shapes[spatial_h_ptr];
const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1];
const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride;
const scalar_t *data_value_ptr = data_value + value_ptr_offset;
scalar_t *grad_value_ptr = grad_value + value_ptr_offset;
for (int p_col=0; p_col < num_point; ++p_col)
{
const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr];
const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1];
const scalar_t weight = data_attn_weight[data_weight_ptr];
const scalar_t h_im = loc_h * spatial_h - 0.5;
const scalar_t w_im = loc_w * spatial_w - 0.5;
*(cache_grad_sampling_loc+(threadIdx.x << 1)) = 0;
*(cache_grad_sampling_loc+((threadIdx.x << 1) + 1)) = 0;
*(cache_grad_attn_weight+threadIdx.x)=0;
if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w)
{
ms_deform_attn_col2im_bilinear(
data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col,
top_grad, weight, grad_value_ptr,
cache_grad_sampling_loc+(threadIdx.x << 1), cache_grad_attn_weight+threadIdx.x);
}
__syncthreads();
if (tid == 0)
{
scalar_t _grad_w=cache_grad_sampling_loc[0], _grad_h=cache_grad_sampling_loc[1], _grad_a=cache_grad_attn_weight[0];
int sid=2;
for (unsigned int tid = 1; tid < blockSize; ++tid)
{
_grad_w += cache_grad_sampling_loc[sid];
_grad_h += cache_grad_sampling_loc[sid + 1];
_grad_a += cache_grad_attn_weight[tid];
sid += 2;
}
*grad_sampling_loc = _grad_w;
*(grad_sampling_loc + 1) = _grad_h;
*grad_attn_weight = _grad_a;
}
__syncthreads();
data_weight_ptr += 1;
data_loc_w_ptr += 2;
grad_attn_weight += grad_weight_stride;
grad_sampling_loc += grad_loc_stride;
}
}
}
}
template <typename scalar_t, unsigned int blockSize>
__global__ void ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2(const int n,
const scalar_t *grad_col,
const scalar_t *data_value,
const int64_t *data_spatial_shapes,
const int64_t *data_level_start_index,
const scalar_t *data_sampling_loc,
const scalar_t *data_attn_weight,
const int batch_size,
const int spatial_size,
const int num_heads,
const int channels,
const int num_levels,
const int num_query,
const int num_point,
scalar_t *grad_value,
scalar_t *grad_sampling_loc,
scalar_t *grad_attn_weight)
{
CUDA_KERNEL_LOOP(index, n)
{
__shared__ scalar_t cache_grad_sampling_loc[blockSize * 2];
__shared__ scalar_t cache_grad_attn_weight[blockSize];
unsigned int tid = threadIdx.x;
int _temp = index;
const int c_col = _temp % channels;
_temp /= channels;
const int sampling_index = _temp;
const int m_col = _temp % num_heads;
_temp /= num_heads;
const int q_col = _temp % num_query;
_temp /= num_query;
const int b_col = _temp;
const scalar_t top_grad = grad_col[index];
int data_weight_ptr = sampling_index * num_levels * num_point;
int data_loc_w_ptr = data_weight_ptr << 1;
const int grad_sampling_ptr = data_weight_ptr;
grad_sampling_loc += grad_sampling_ptr << 1;
grad_attn_weight += grad_sampling_ptr;
const int grad_weight_stride = 1;
const int grad_loc_stride = 2;
const int qid_stride = num_heads * channels;
const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride;
for (int l_col=0; l_col < num_levels; ++l_col)
{
const int level_start_id = data_level_start_index[l_col];
const int spatial_h_ptr = l_col << 1;
const int spatial_h = data_spatial_shapes[spatial_h_ptr];
const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1];
const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride;
const scalar_t *data_value_ptr = data_value + value_ptr_offset;
scalar_t *grad_value_ptr = grad_value + value_ptr_offset;
for (int p_col=0; p_col < num_point; ++p_col)
{
const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr];
const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1];
const scalar_t weight = data_attn_weight[data_weight_ptr];
const scalar_t h_im = loc_h * spatial_h - 0.5;
const scalar_t w_im = loc_w * spatial_w - 0.5;
*(cache_grad_sampling_loc+(threadIdx.x << 1)) = 0;
*(cache_grad_sampling_loc+((threadIdx.x << 1) + 1)) = 0;
*(cache_grad_attn_weight+threadIdx.x)=0;
if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w)
{
ms_deform_attn_col2im_bilinear(
data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col,
top_grad, weight, grad_value_ptr,
cache_grad_sampling_loc+(threadIdx.x << 1), cache_grad_attn_weight+threadIdx.x);
}
__syncthreads();
for (unsigned int s=blockSize/2; s>0; s>>=1)
{
if (tid < s) {
const unsigned int xid1 = tid << 1;
const unsigned int xid2 = (tid + s) << 1;
cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + s];
cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2];
cache_grad_sampling_loc[xid1 + 1] += cache_grad_sampling_loc[xid2 + 1];
}
__syncthreads();
}
if (tid == 0)
{
*grad_sampling_loc = cache_grad_sampling_loc[0];
*(grad_sampling_loc + 1) = cache_grad_sampling_loc[1];
*grad_attn_weight = cache_grad_attn_weight[0];
}
__syncthreads();
data_weight_ptr += 1;
data_loc_w_ptr += 2;
grad_attn_weight += grad_weight_stride;
grad_sampling_loc += grad_loc_stride;
}
}
}
}
template <typename scalar_t>
__global__ void ms_deformable_col2im_gpu_kernel_shm_reduce_v1(const int n,
const scalar_t *grad_col,
const scalar_t *data_value,
const int64_t *data_spatial_shapes,
const int64_t *data_level_start_index,
const scalar_t *data_sampling_loc,
const scalar_t *data_attn_weight,
const int batch_size,
const int spatial_size,
const int num_heads,
const int channels,
const int num_levels,
const int num_query,
const int num_point,
scalar_t *grad_value,
scalar_t *grad_sampling_loc,
scalar_t *grad_attn_weight)
{
CUDA_KERNEL_LOOP(index, n)
{
extern __shared__ int _s[];
scalar_t* cache_grad_sampling_loc = (scalar_t*)_s;
scalar_t* cache_grad_attn_weight = cache_grad_sampling_loc + 2 * blockDim.x;
unsigned int tid = threadIdx.x;
int _temp = index;
const int c_col = _temp % channels;
_temp /= channels;
const int sampling_index = _temp;
const int m_col = _temp % num_heads;
_temp /= num_heads;
const int q_col = _temp % num_query;
_temp /= num_query;
const int b_col = _temp;
const scalar_t top_grad = grad_col[index];
int data_weight_ptr = sampling_index * num_levels * num_point;
int data_loc_w_ptr = data_weight_ptr << 1;
const int grad_sampling_ptr = data_weight_ptr;
grad_sampling_loc += grad_sampling_ptr << 1;
grad_attn_weight += grad_sampling_ptr;
const int grad_weight_stride = 1;
const int grad_loc_stride = 2;
const int qid_stride = num_heads * channels;
const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride;
for (int l_col=0; l_col < num_levels; ++l_col)
{
const int level_start_id = data_level_start_index[l_col];
const int spatial_h_ptr = l_col << 1;
const int spatial_h = data_spatial_shapes[spatial_h_ptr];
const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1];
const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride;
const scalar_t *data_value_ptr = data_value + value_ptr_offset;
scalar_t *grad_value_ptr = grad_value + value_ptr_offset;
for (int p_col=0; p_col < num_point; ++p_col)
{
const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr];
const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1];
const scalar_t weight = data_attn_weight[data_weight_ptr];
const scalar_t h_im = loc_h * spatial_h - 0.5;
const scalar_t w_im = loc_w * spatial_w - 0.5;
*(cache_grad_sampling_loc+(threadIdx.x << 1)) = 0;
*(cache_grad_sampling_loc+((threadIdx.x << 1) + 1)) = 0;
*(cache_grad_attn_weight+threadIdx.x)=0;
if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w)
{
ms_deform_attn_col2im_bilinear(
data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col,
top_grad, weight, grad_value_ptr,
cache_grad_sampling_loc+(threadIdx.x << 1), cache_grad_attn_weight+threadIdx.x);
}
__syncthreads();
if (tid == 0)
{
scalar_t _grad_w=cache_grad_sampling_loc[0], _grad_h=cache_grad_sampling_loc[1], _grad_a=cache_grad_attn_weight[0];
int sid=2;
for (unsigned int tid = 1; tid < blockDim.x; ++tid)
{
_grad_w += cache_grad_sampling_loc[sid];
_grad_h += cache_grad_sampling_loc[sid + 1];
_grad_a += cache_grad_attn_weight[tid];
sid += 2;
}
*grad_sampling_loc = _grad_w;
*(grad_sampling_loc + 1) = _grad_h;
*grad_attn_weight = _grad_a;
}
__syncthreads();
data_weight_ptr += 1;
data_loc_w_ptr += 2;
grad_attn_weight += grad_weight_stride;
grad_sampling_loc += grad_loc_stride;
}
}
}
}
template <typename scalar_t>
__global__ void ms_deformable_col2im_gpu_kernel_shm_reduce_v2(const int n,
const scalar_t *grad_col,
const scalar_t *data_value,
const int64_t *data_spatial_shapes,
const int64_t *data_level_start_index,
const scalar_t *data_sampling_loc,
const scalar_t *data_attn_weight,
const int batch_size,
const int spatial_size,
const int num_heads,
const int channels,
const int num_levels,
const int num_query,
const int num_point,
scalar_t *grad_value,
scalar_t *grad_sampling_loc,
scalar_t *grad_attn_weight)
{
CUDA_KERNEL_LOOP(index, n)
{
extern __shared__ int _s[];
scalar_t* cache_grad_sampling_loc = (scalar_t*)_s;
scalar_t* cache_grad_attn_weight = cache_grad_sampling_loc + 2 * blockDim.x;
unsigned int tid = threadIdx.x;
int _temp = index;
const int c_col = _temp % channels;
_temp /= channels;
const int sampling_index = _temp;
const int m_col = _temp % num_heads;
_temp /= num_heads;
const int q_col = _temp % num_query;
_temp /= num_query;
const int b_col = _temp;
const scalar_t top_grad = grad_col[index];
int data_weight_ptr = sampling_index * num_levels * num_point;
int data_loc_w_ptr = data_weight_ptr << 1;
const int grad_sampling_ptr = data_weight_ptr;
grad_sampling_loc += grad_sampling_ptr << 1;
grad_attn_weight += grad_sampling_ptr;
const int grad_weight_stride = 1;
const int grad_loc_stride = 2;
const int qid_stride = num_heads * channels;
const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride;
for (int l_col=0; l_col < num_levels; ++l_col)
{
const int level_start_id = data_level_start_index[l_col];
const int spatial_h_ptr = l_col << 1;
const int spatial_h = data_spatial_shapes[spatial_h_ptr];
const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1];
const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride;
const scalar_t *data_value_ptr = data_value + value_ptr_offset;
scalar_t *grad_value_ptr = grad_value + value_ptr_offset;
for (int p_col=0; p_col < num_point; ++p_col)
{
const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr];
const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1];
const scalar_t weight = data_attn_weight[data_weight_ptr];
const scalar_t h_im = loc_h * spatial_h - 0.5;
const scalar_t w_im = loc_w * spatial_w - 0.5;
*(cache_grad_sampling_loc+(threadIdx.x << 1)) = 0;
*(cache_grad_sampling_loc+((threadIdx.x << 1) + 1)) = 0;
*(cache_grad_attn_weight+threadIdx.x)=0;
if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w)
{
ms_deform_attn_col2im_bilinear(
data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col,
top_grad, weight, grad_value_ptr,
cache_grad_sampling_loc+(threadIdx.x << 1), cache_grad_attn_weight+threadIdx.x);
}
__syncthreads();
for (unsigned int s=blockDim.x/2, spre=blockDim.x; s>0; s>>=1, spre>>=1)
{
if (tid < s) {
const unsigned int xid1 = tid << 1;
const unsigned int xid2 = (tid + s) << 1;
cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + s];
cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2];
cache_grad_sampling_loc[xid1 + 1] += cache_grad_sampling_loc[xid2 + 1];
if (tid + (s << 1) < spre)
{
cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + (s << 1)];
cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2 + (s << 1)];
cache_grad_sampling_loc[xid1 + 1] += cache_grad_sampling_loc[xid2 + 1 + (s << 1)];
}
}
__syncthreads();
}
if (tid == 0)
{
*grad_sampling_loc = cache_grad_sampling_loc[0];
*(grad_sampling_loc + 1) = cache_grad_sampling_loc[1];
*grad_attn_weight = cache_grad_attn_weight[0];
}
__syncthreads();
data_weight_ptr += 1;
data_loc_w_ptr += 2;
grad_attn_weight += grad_weight_stride;
grad_sampling_loc += grad_loc_stride;
}
}
}
}
template <typename scalar_t>
__global__ void ms_deformable_col2im_gpu_kernel_shm_reduce_v2_multi_blocks(const int n,
const scalar_t *grad_col,
const scalar_t *data_value,
const int64_t *data_spatial_shapes,
const int64_t *data_level_start_index,
const scalar_t *data_sampling_loc,
const scalar_t *data_attn_weight,
const int batch_size,
const int spatial_size,
const int num_heads,
const int channels,
const int num_levels,
const int num_query,
const int num_point,
scalar_t *grad_value,
scalar_t *grad_sampling_loc,
scalar_t *grad_attn_weight)
{
CUDA_KERNEL_LOOP(index, n)
{
extern __shared__ int _s[];
scalar_t* cache_grad_sampling_loc = (scalar_t*)_s;
scalar_t* cache_grad_attn_weight = cache_grad_sampling_loc + 2 * blockDim.x;
unsigned int tid = threadIdx.x;
int _temp = index;
const int c_col = _temp % channels;
_temp /= channels;
const int sampling_index = _temp;
const int m_col = _temp % num_heads;
_temp /= num_heads;
const int q_col = _temp % num_query;
_temp /= num_query;
const int b_col = _temp;
const scalar_t top_grad = grad_col[index];
int data_weight_ptr = sampling_index * num_levels * num_point;
int data_loc_w_ptr = data_weight_ptr << 1;
const int grad_sampling_ptr = data_weight_ptr;
grad_sampling_loc += grad_sampling_ptr << 1;
grad_attn_weight += grad_sampling_ptr;
const int grad_weight_stride = 1;
const int grad_loc_stride = 2;
const int qid_stride = num_heads * channels;
const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride;
for (int l_col=0; l_col < num_levels; ++l_col)
{
const int level_start_id = data_level_start_index[l_col];
const int spatial_h_ptr = l_col << 1;
const int spatial_h = data_spatial_shapes[spatial_h_ptr];
const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1];
const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride;
const scalar_t *data_value_ptr = data_value + value_ptr_offset;
scalar_t *grad_value_ptr = grad_value + value_ptr_offset;
for (int p_col=0; p_col < num_point; ++p_col)
{
const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr];
const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1];
const scalar_t weight = data_attn_weight[data_weight_ptr];
const scalar_t h_im = loc_h * spatial_h - 0.5;
const scalar_t w_im = loc_w * spatial_w - 0.5;
*(cache_grad_sampling_loc+(threadIdx.x << 1)) = 0;
*(cache_grad_sampling_loc+((threadIdx.x << 1) + 1)) = 0;
*(cache_grad_attn_weight+threadIdx.x)=0;
if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w)
{
ms_deform_attn_col2im_bilinear(
data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col,
top_grad, weight, grad_value_ptr,
cache_grad_sampling_loc+(threadIdx.x << 1), cache_grad_attn_weight+threadIdx.x);
}
__syncthreads();
for (unsigned int s=blockDim.x/2, spre=blockDim.x; s>0; s>>=1, spre>>=1)
{
if (tid < s) {
const unsigned int xid1 = tid << 1;
const unsigned int xid2 = (tid + s) << 1;
cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + s];
cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2];
cache_grad_sampling_loc[xid1 + 1] += cache_grad_sampling_loc[xid2 + 1];
if (tid + (s << 1) < spre)
{
cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + (s << 1)];
cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2 + (s << 1)];
cache_grad_sampling_loc[xid1 + 1] += cache_grad_sampling_loc[xid2 + 1 + (s << 1)];
}
}
__syncthreads();
}
if (tid == 0)
{
atomicAdd(grad_sampling_loc, cache_grad_sampling_loc[0]);
atomicAdd(grad_sampling_loc + 1, cache_grad_sampling_loc[1]);
atomicAdd(grad_attn_weight, cache_grad_attn_weight[0]);
}
__syncthreads();
data_weight_ptr += 1;
data_loc_w_ptr += 2;
grad_attn_weight += grad_weight_stride;
grad_sampling_loc += grad_loc_stride;
}
}
}
}
template <typename scalar_t>
__global__ void ms_deformable_col2im_gpu_kernel_gm(const int n,
const scalar_t *grad_col,
const scalar_t *data_value,
const int64_t *data_spatial_shapes,
const int64_t *data_level_start_index,
const scalar_t *data_sampling_loc,
const scalar_t *data_attn_weight,
const int batch_size,
const int spatial_size,
const int num_heads,
const int channels,
const int num_levels,
const int num_query,
const int num_point,
scalar_t *grad_value,
scalar_t *grad_sampling_loc,
scalar_t *grad_attn_weight)
{
CUDA_KERNEL_LOOP(index, n)
{
int _temp = index;
const int c_col = _temp % channels;
_temp /= channels;
const int sampling_index = _temp;
const int m_col = _temp % num_heads;
_temp /= num_heads;
const int q_col = _temp % num_query;
_temp /= num_query;
const int b_col = _temp;
const scalar_t top_grad = grad_col[index];
int data_weight_ptr = sampling_index * num_levels * num_point;
int data_loc_w_ptr = data_weight_ptr << 1;
const int grad_sampling_ptr = data_weight_ptr;
grad_sampling_loc += grad_sampling_ptr << 1;
grad_attn_weight += grad_sampling_ptr;
const int grad_weight_stride = 1;
const int grad_loc_stride = 2;
const int qid_stride = num_heads * channels;
const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride;
for (int l_col=0; l_col < num_levels; ++l_col)
{
const int level_start_id = data_level_start_index[l_col];
const int spatial_h_ptr = l_col << 1;
const int spatial_h = data_spatial_shapes[spatial_h_ptr];
const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1];
const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride;
const scalar_t *data_value_ptr = data_value + value_ptr_offset;
scalar_t *grad_value_ptr = grad_value + value_ptr_offset;
for (int p_col=0; p_col < num_point; ++p_col)
{
const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr];
const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1];
const scalar_t weight = data_attn_weight[data_weight_ptr];
const scalar_t h_im = loc_h * spatial_h - 0.5;
const scalar_t w_im = loc_w * spatial_w - 0.5;
if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w)
{
ms_deform_attn_col2im_bilinear_gm(
data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col,
top_grad, weight, grad_value_ptr,
grad_sampling_loc, grad_attn_weight);
}
data_weight_ptr += 1;
data_loc_w_ptr += 2;
grad_attn_weight += grad_weight_stride;
grad_sampling_loc += grad_loc_stride;
}
}
}
}
template <typename scalar_t>
void ms_deformable_im2col_cuda(cudaStream_t stream,
const scalar_t* data_value,
const int64_t* data_spatial_shapes,
const int64_t* data_level_start_index,
const scalar_t* data_sampling_loc,
const scalar_t* data_attn_weight,
const int batch_size,
const int spatial_size,
const int num_heads,
const int channels,
const int num_levels,
const int num_query,
const int num_point,
scalar_t* data_col)
{
const int num_kernels = batch_size * num_query * num_heads * channels;
const int num_actual_kernels = batch_size * num_query * num_heads * channels;
const int num_threads = CUDA_NUM_THREADS;
ms_deformable_im2col_gpu_kernel<scalar_t>
<<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
0, stream>>>(
num_kernels, data_value, data_spatial_shapes, data_level_start_index, data_sampling_loc, data_attn_weight,
batch_size, spatial_size, num_heads, channels, num_levels, num_query, num_point, data_col);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
printf("error in ms_deformable_im2col_cuda: %s\n", cudaGetErrorString(err));
}
}
template <typename scalar_t>
void ms_deformable_col2im_cuda(cudaStream_t stream,
const scalar_t* grad_col,
const scalar_t* data_value,
const int64_t * data_spatial_shapes,
const int64_t * data_level_start_index,
const scalar_t * data_sampling_loc,
const scalar_t * data_attn_weight,
const int batch_size,
const int spatial_size,
const int num_heads,
const int channels,
const int num_levels,
const int num_query,
const int num_point,
scalar_t* grad_value,
scalar_t* grad_sampling_loc,
scalar_t* grad_attn_weight)
{
const int num_threads = (channels > CUDA_NUM_THREADS)?CUDA_NUM_THREADS:channels;
const int num_kernels = batch_size * num_query * num_heads * channels;
const int num_actual_kernels = batch_size * num_query * num_heads * channels;
if (channels > 1024)
{
if ((channels & 1023) == 0)
{
ms_deformable_col2im_gpu_kernel_shm_reduce_v2_multi_blocks<scalar_t>
<<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
num_threads*3*sizeof(scalar_t), stream>>>(
num_kernels,
grad_col,
data_value,
data_spatial_shapes,
data_level_start_index,
data_sampling_loc,
data_attn_weight,
batch_size,
spatial_size,
num_heads,
channels,
num_levels,
num_query,
num_point,
grad_value,
grad_sampling_loc,
grad_attn_weight);
}
else
{
ms_deformable_col2im_gpu_kernel_gm<scalar_t>
<<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
0, stream>>>(
num_kernels,
grad_col,
data_value,
data_spatial_shapes,
data_level_start_index,
data_sampling_loc,
data_attn_weight,
batch_size,
spatial_size,
num_heads,
channels,
num_levels,
num_query,
num_point,
grad_value,
grad_sampling_loc,
grad_attn_weight);
}
}
else{
switch(channels)
{
case 1:
ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1<scalar_t, 1>
<<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
0, stream>>>(
num_kernels,
grad_col,
data_value,
data_spatial_shapes,
data_level_start_index,
data_sampling_loc,
data_attn_weight,
batch_size,
spatial_size,
num_heads,
channels,
num_levels,
num_query,
num_point,
grad_value,
grad_sampling_loc,
grad_attn_weight);
break;
case 2:
ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1<scalar_t, 2>
<<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
0, stream>>>(
num_kernels,
grad_col,
data_value,
data_spatial_shapes,
data_level_start_index,
data_sampling_loc,
data_attn_weight,
batch_size,
spatial_size,
num_heads,
channels,
num_levels,
num_query,
num_point,
grad_value,
grad_sampling_loc,
grad_attn_weight);
break;
case 4:
ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1<scalar_t, 4>
<<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
0, stream>>>(
num_kernels,
grad_col,
data_value,
data_spatial_shapes,
data_level_start_index,
data_sampling_loc,
data_attn_weight,
batch_size,
spatial_size,
num_heads,
channels,
num_levels,
num_query,
num_point,
grad_value,
grad_sampling_loc,
grad_attn_weight);
break;
case 8:
ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1<scalar_t, 8>
<<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
0, stream>>>(
num_kernels,
grad_col,
data_value,
data_spatial_shapes,
data_level_start_index,
data_sampling_loc,
data_attn_weight,
batch_size,
spatial_size,
num_heads,
channels,
num_levels,
num_query,
num_point,
grad_value,
grad_sampling_loc,
grad_attn_weight);
break;
case 16:
ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1<scalar_t, 16>
<<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
0, stream>>>(
num_kernels,
grad_col,
data_value,
data_spatial_shapes,
data_level_start_index,
data_sampling_loc,
data_attn_weight,
batch_size,
spatial_size,
num_heads,
channels,
num_levels,
num_query,
num_point,
grad_value,
grad_sampling_loc,
grad_attn_weight);
break;
case 32:
ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1<scalar_t, 32>
<<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
0, stream>>>(
num_kernels,
grad_col,
data_value,
data_spatial_shapes,
data_level_start_index,
data_sampling_loc,
data_attn_weight,
batch_size,
spatial_size,
num_heads,
channels,
num_levels,
num_query,
num_point,
grad_value,
grad_sampling_loc,
grad_attn_weight);
break;
case 64:
ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2<scalar_t, 64>
<<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
0, stream>>>(
num_kernels,
grad_col,
data_value,
data_spatial_shapes,
data_level_start_index,
data_sampling_loc,
data_attn_weight,
batch_size,
spatial_size,
num_heads,
channels,
num_levels,
num_query,
num_point,
grad_value,
grad_sampling_loc,
grad_attn_weight);
break;
case 128:
ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2<scalar_t, 128>
<<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
0, stream>>>(
num_kernels,
grad_col,
data_value,
data_spatial_shapes,
data_level_start_index,
data_sampling_loc,
data_attn_weight,
batch_size,
spatial_size,
num_heads,
channels,
num_levels,
num_query,
num_point,
grad_value,
grad_sampling_loc,
grad_attn_weight);
break;
case 256:
ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2<scalar_t, 256>
<<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
0, stream>>>(
num_kernels,
grad_col,
data_value,
data_spatial_shapes,
data_level_start_index,
data_sampling_loc,
data_attn_weight,
batch_size,
spatial_size,
num_heads,
channels,
num_levels,
num_query,
num_point,
grad_value,
grad_sampling_loc,
grad_attn_weight);
break;
case 512:
ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2<scalar_t, 512>
<<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
0, stream>>>(
num_kernels,
grad_col,
data_value,
data_spatial_shapes,
data_level_start_index,
data_sampling_loc,
data_attn_weight,
batch_size,
spatial_size,
num_heads,
channels,
num_levels,
num_query,
num_point,
grad_value,
grad_sampling_loc,
grad_attn_weight);
break;
case 1024:
ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2<scalar_t, 1024>
<<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
0, stream>>>(
num_kernels,
grad_col,
data_value,
data_spatial_shapes,
data_level_start_index,
data_sampling_loc,
data_attn_weight,
batch_size,
spatial_size,
num_heads,
channels,
num_levels,
num_query,
num_point,
grad_value,
grad_sampling_loc,
grad_attn_weight);
break;
default:
if (channels < 64)
{
ms_deformable_col2im_gpu_kernel_shm_reduce_v1<scalar_t>
<<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
num_threads*3*sizeof(scalar_t), stream>>>(
num_kernels,
grad_col,
data_value,
data_spatial_shapes,
data_level_start_index,
data_sampling_loc,
data_attn_weight,
batch_size,
spatial_size,
num_heads,
channels,
num_levels,
num_query,
num_point,
grad_value,
grad_sampling_loc,
grad_attn_weight);
}
else
{
ms_deformable_col2im_gpu_kernel_shm_reduce_v2<scalar_t>
<<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
num_threads*3*sizeof(scalar_t), stream>>>(
num_kernels,
grad_col,
data_value,
data_spatial_shapes,
data_level_start_index,
data_sampling_loc,
data_attn_weight,
batch_size,
spatial_size,
num_heads,
channels,
num_levels,
num_query,
num_point,
grad_value,
grad_sampling_loc,
grad_attn_weight);
}
}
}
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
printf("error in ms_deformable_col2im_cuda: %s\n", cudaGetErrorString(err));
}
}
|
27182812/ChatGLM-LLaMA-chinese-insturct | 1,139 | src/transformers/models/deformable_detr/custom_kernel/cuda/ms_deform_attn_cuda.h | /*!
**************************************************************************************************
* Deformable DETR
* Copyright (c) 2020 SenseTime. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 [see LICENSE for details]
**************************************************************************************************
* Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
**************************************************************************************************
*/
#pragma once
#include <torch/extension.h>
at::Tensor ms_deform_attn_cuda_forward(
const at::Tensor &value,
const at::Tensor &spatial_shapes,
const at::Tensor &level_start_index,
const at::Tensor &sampling_loc,
const at::Tensor &attn_weight,
const int im2col_step);
std::vector<at::Tensor> ms_deform_attn_cuda_backward(
const at::Tensor &value,
const at::Tensor &spatial_shapes,
const at::Tensor &level_start_index,
const at::Tensor &sampling_loc,
const at::Tensor &attn_weight,
const at::Tensor &grad_output,
const int im2col_step);
|
27182812/ChatGLM-LLaMA-chinese-insturct | 7,360 | src/transformers/models/deformable_detr/custom_kernel/cuda/ms_deform_attn_cuda.cu | /*!
**************************************************************************************************
* Deformable DETR
* Copyright (c) 2020 SenseTime. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 [see LICENSE for details]
**************************************************************************************************
* Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
**************************************************************************************************
*/
#include <vector>
#include "cuda/ms_deform_im2col_cuda.cuh"
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <cuda.h>
#include <cuda_runtime.h>
#pragma once
#include <torch/extension.h>
at::Tensor ms_deform_attn_cuda_forward(
const at::Tensor &value,
const at::Tensor &spatial_shapes,
const at::Tensor &level_start_index,
const at::Tensor &sampling_loc,
const at::Tensor &attn_weight,
const int im2col_step)
{
AT_ASSERTM(value.is_contiguous(), "value tensor has to be contiguous");
AT_ASSERTM(spatial_shapes.is_contiguous(), "spatial_shapes tensor has to be contiguous");
AT_ASSERTM(level_start_index.is_contiguous(), "level_start_index tensor has to be contiguous");
AT_ASSERTM(sampling_loc.is_contiguous(), "sampling_loc tensor has to be contiguous");
AT_ASSERTM(attn_weight.is_contiguous(), "attn_weight tensor has to be contiguous");
AT_ASSERTM(value.type().is_cuda(), "value must be a CUDA tensor");
AT_ASSERTM(spatial_shapes.type().is_cuda(), "spatial_shapes must be a CUDA tensor");
AT_ASSERTM(level_start_index.type().is_cuda(), "level_start_index must be a CUDA tensor");
AT_ASSERTM(sampling_loc.type().is_cuda(), "sampling_loc must be a CUDA tensor");
AT_ASSERTM(attn_weight.type().is_cuda(), "attn_weight must be a CUDA tensor");
const int batch = value.size(0);
const int spatial_size = value.size(1);
const int num_heads = value.size(2);
const int channels = value.size(3);
const int num_levels = spatial_shapes.size(0);
const int num_query = sampling_loc.size(1);
const int num_point = sampling_loc.size(4);
const int im2col_step_ = std::min(batch, im2col_step);
AT_ASSERTM(batch % im2col_step_ == 0, "batch(%d) must divide im2col_step(%d)", batch, im2col_step_);
auto output = at::zeros({batch, num_query, num_heads, channels}, value.options());
const int batch_n = im2col_step_;
auto output_n = output.view({batch/im2col_step_, batch_n, num_query, num_heads, channels});
auto per_value_size = spatial_size * num_heads * channels;
auto per_sample_loc_size = num_query * num_heads * num_levels * num_point * 2;
auto per_attn_weight_size = num_query * num_heads * num_levels * num_point;
for (int n = 0; n < batch/im2col_step_; ++n)
{
auto columns = output_n.select(0, n);
AT_DISPATCH_FLOATING_TYPES(value.type(), "ms_deform_attn_forward_cuda", ([&] {
ms_deformable_im2col_cuda(at::cuda::getCurrentCUDAStream(),
value.data<scalar_t>() + n * im2col_step_ * per_value_size,
spatial_shapes.data<int64_t>(),
level_start_index.data<int64_t>(),
sampling_loc.data<scalar_t>() + n * im2col_step_ * per_sample_loc_size,
attn_weight.data<scalar_t>() + n * im2col_step_ * per_attn_weight_size,
batch_n, spatial_size, num_heads, channels, num_levels, num_query, num_point,
columns.data<scalar_t>());
}));
}
output = output.view({batch, num_query, num_heads*channels});
return output;
}
std::vector<at::Tensor> ms_deform_attn_cuda_backward(
const at::Tensor &value,
const at::Tensor &spatial_shapes,
const at::Tensor &level_start_index,
const at::Tensor &sampling_loc,
const at::Tensor &attn_weight,
const at::Tensor &grad_output,
const int im2col_step)
{
AT_ASSERTM(value.is_contiguous(), "value tensor has to be contiguous");
AT_ASSERTM(spatial_shapes.is_contiguous(), "spatial_shapes tensor has to be contiguous");
AT_ASSERTM(level_start_index.is_contiguous(), "level_start_index tensor has to be contiguous");
AT_ASSERTM(sampling_loc.is_contiguous(), "sampling_loc tensor has to be contiguous");
AT_ASSERTM(attn_weight.is_contiguous(), "attn_weight tensor has to be contiguous");
AT_ASSERTM(grad_output.is_contiguous(), "grad_output tensor has to be contiguous");
AT_ASSERTM(value.type().is_cuda(), "value must be a CUDA tensor");
AT_ASSERTM(spatial_shapes.type().is_cuda(), "spatial_shapes must be a CUDA tensor");
AT_ASSERTM(level_start_index.type().is_cuda(), "level_start_index must be a CUDA tensor");
AT_ASSERTM(sampling_loc.type().is_cuda(), "sampling_loc must be a CUDA tensor");
AT_ASSERTM(attn_weight.type().is_cuda(), "attn_weight must be a CUDA tensor");
AT_ASSERTM(grad_output.type().is_cuda(), "grad_output must be a CUDA tensor");
const int batch = value.size(0);
const int spatial_size = value.size(1);
const int num_heads = value.size(2);
const int channels = value.size(3);
const int num_levels = spatial_shapes.size(0);
const int num_query = sampling_loc.size(1);
const int num_point = sampling_loc.size(4);
const int im2col_step_ = std::min(batch, im2col_step);
AT_ASSERTM(batch % im2col_step_ == 0, "batch(%d) must divide im2col_step(%d)", batch, im2col_step_);
auto grad_value = at::zeros_like(value);
auto grad_sampling_loc = at::zeros_like(sampling_loc);
auto grad_attn_weight = at::zeros_like(attn_weight);
const int batch_n = im2col_step_;
auto per_value_size = spatial_size * num_heads * channels;
auto per_sample_loc_size = num_query * num_heads * num_levels * num_point * 2;
auto per_attn_weight_size = num_query * num_heads * num_levels * num_point;
auto grad_output_n = grad_output.view({batch/im2col_step_, batch_n, num_query, num_heads, channels});
for (int n = 0; n < batch/im2col_step_; ++n)
{
auto grad_output_g = grad_output_n.select(0, n);
AT_DISPATCH_FLOATING_TYPES(value.type(), "ms_deform_attn_backward_cuda", ([&] {
ms_deformable_col2im_cuda(at::cuda::getCurrentCUDAStream(),
grad_output_g.data<scalar_t>(),
value.data<scalar_t>() + n * im2col_step_ * per_value_size,
spatial_shapes.data<int64_t>(),
level_start_index.data<int64_t>(),
sampling_loc.data<scalar_t>() + n * im2col_step_ * per_sample_loc_size,
attn_weight.data<scalar_t>() + n * im2col_step_ * per_attn_weight_size,
batch_n, spatial_size, num_heads, channels, num_levels, num_query, num_point,
grad_value.data<scalar_t>() + n * im2col_step_ * per_value_size,
grad_sampling_loc.data<scalar_t>() + n * im2col_step_ * per_sample_loc_size,
grad_attn_weight.data<scalar_t>() + n * im2col_step_ * per_attn_weight_size);
}));
}
return {
grad_value, grad_sampling_loc, grad_attn_weight
};
}
|
27182812/ChatGLM-LLaMA-chinese-insturct | 61,433 | src/transformers/models/deformable_detr/custom_kernel/cuda/ms_deform_attn_cuda.cuh | /*!
**************************************************************************************************
* Deformable DETR
* Copyright (c) 2020 SenseTime. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 [see LICENSE for details]
**************************************************************************************************
* Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
**************************************************************************************************
*/
#include <vector>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cstdio>
#include <algorithm>
#include <cstring>
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THCAtomics.cuh>
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
at::Tensor ms_deform_attn_cuda_forward(
const at::Tensor &value,
const at::Tensor &spatial_shapes,
const at::Tensor &level_start_index,
const at::Tensor &sampling_loc,
const at::Tensor &attn_weight,
const int im2col_step)
{
AT_ASSERTM(value.is_contiguous(), "value tensor has to be contiguous");
AT_ASSERTM(spatial_shapes.is_contiguous(), "spatial_shapes tensor has to be contiguous");
AT_ASSERTM(level_start_index.is_contiguous(), "level_start_index tensor has to be contiguous");
AT_ASSERTM(sampling_loc.is_contiguous(), "sampling_loc tensor has to be contiguous");
AT_ASSERTM(attn_weight.is_contiguous(), "attn_weight tensor has to be contiguous");
AT_ASSERTM(value.type().is_cuda(), "value must be a CUDA tensor");
AT_ASSERTM(spatial_shapes.type().is_cuda(), "spatial_shapes must be a CUDA tensor");
AT_ASSERTM(level_start_index.type().is_cuda(), "level_start_index must be a CUDA tensor");
AT_ASSERTM(sampling_loc.type().is_cuda(), "sampling_loc must be a CUDA tensor");
AT_ASSERTM(attn_weight.type().is_cuda(), "attn_weight must be a CUDA tensor");
const int batch = value.size(0);
const int spatial_size = value.size(1);
const int num_heads = value.size(2);
const int channels = value.size(3);
const int num_levels = spatial_shapes.size(0);
const int num_query = sampling_loc.size(1);
const int num_point = sampling_loc.size(4);
const int im2col_step_ = std::min(batch, im2col_step);
AT_ASSERTM(batch % im2col_step_ == 0, "batch(%d) must divide im2col_step(%d)", batch, im2col_step_);
auto output = at::zeros({batch, num_query, num_heads, channels}, value.options());
const int batch_n = im2col_step_;
auto output_n = output.view({batch/im2col_step_, batch_n, num_query, num_heads, channels});
auto per_value_size = spatial_size * num_heads * channels;
auto per_sample_loc_size = num_query * num_heads * num_levels * num_point * 2;
auto per_attn_weight_size = num_query * num_heads * num_levels * num_point;
for (int n = 0; n < batch/im2col_step_; ++n)
{
auto columns = output_n.select(0, n);
AT_DISPATCH_FLOATING_TYPES(value.type(), "ms_deform_attn_forward_cuda", ([&] {
ms_deformable_im2col_cuda(at::cuda::getCurrentCUDAStream(),
value.data<scalar_t>() + n * im2col_step_ * per_value_size,
spatial_shapes.data<int64_t>(),
level_start_index.data<int64_t>(),
sampling_loc.data<scalar_t>() + n * im2col_step_ * per_sample_loc_size,
attn_weight.data<scalar_t>() + n * im2col_step_ * per_attn_weight_size,
batch_n, spatial_size, num_heads, channels, num_levels, num_query, num_point,
columns.data<scalar_t>());
}));
}
output = output.view({batch, num_query, num_heads*channels});
return output;
}
std::vector<at::Tensor> ms_deform_attn_cuda_backward(
const at::Tensor &value,
const at::Tensor &spatial_shapes,
const at::Tensor &level_start_index,
const at::Tensor &sampling_loc,
const at::Tensor &attn_weight,
const at::Tensor &grad_output,
const int im2col_step)
{
AT_ASSERTM(value.is_contiguous(), "value tensor has to be contiguous");
AT_ASSERTM(spatial_shapes.is_contiguous(), "spatial_shapes tensor has to be contiguous");
AT_ASSERTM(level_start_index.is_contiguous(), "level_start_index tensor has to be contiguous");
AT_ASSERTM(sampling_loc.is_contiguous(), "sampling_loc tensor has to be contiguous");
AT_ASSERTM(attn_weight.is_contiguous(), "attn_weight tensor has to be contiguous");
AT_ASSERTM(grad_output.is_contiguous(), "grad_output tensor has to be contiguous");
AT_ASSERTM(value.type().is_cuda(), "value must be a CUDA tensor");
AT_ASSERTM(spatial_shapes.type().is_cuda(), "spatial_shapes must be a CUDA tensor");
AT_ASSERTM(level_start_index.type().is_cuda(), "level_start_index must be a CUDA tensor");
AT_ASSERTM(sampling_loc.type().is_cuda(), "sampling_loc must be a CUDA tensor");
AT_ASSERTM(attn_weight.type().is_cuda(), "attn_weight must be a CUDA tensor");
AT_ASSERTM(grad_output.type().is_cuda(), "grad_output must be a CUDA tensor");
const int batch = value.size(0);
const int spatial_size = value.size(1);
const int num_heads = value.size(2);
const int channels = value.size(3);
const int num_levels = spatial_shapes.size(0);
const int num_query = sampling_loc.size(1);
const int num_point = sampling_loc.size(4);
const int im2col_step_ = std::min(batch, im2col_step);
AT_ASSERTM(batch % im2col_step_ == 0, "batch(%d) must divide im2col_step(%d)", batch, im2col_step_);
auto grad_value = at::zeros_like(value);
auto grad_sampling_loc = at::zeros_like(sampling_loc);
auto grad_attn_weight = at::zeros_like(attn_weight);
const int batch_n = im2col_step_;
auto per_value_size = spatial_size * num_heads * channels;
auto per_sample_loc_size = num_query * num_heads * num_levels * num_point * 2;
auto per_attn_weight_size = num_query * num_heads * num_levels * num_point;
auto grad_output_n = grad_output.view({batch/im2col_step_, batch_n, num_query, num_heads, channels});
for (int n = 0; n < batch/im2col_step_; ++n)
{
auto grad_output_g = grad_output_n.select(0, n);
AT_DISPATCH_FLOATING_TYPES(value.type(), "ms_deform_attn_backward_cuda", ([&] {
ms_deformable_col2im_cuda(at::cuda::getCurrentCUDAStream(),
grad_output_g.data<scalar_t>(),
value.data<scalar_t>() + n * im2col_step_ * per_value_size,
spatial_shapes.data<int64_t>(),
level_start_index.data<int64_t>(),
sampling_loc.data<scalar_t>() + n * im2col_step_ * per_sample_loc_size,
attn_weight.data<scalar_t>() + n * im2col_step_ * per_attn_weight_size,
batch_n, spatial_size, num_heads, channels, num_levels, num_query, num_point,
grad_value.data<scalar_t>() + n * im2col_step_ * per_value_size,
grad_sampling_loc.data<scalar_t>() + n * im2col_step_ * per_sample_loc_size,
grad_attn_weight.data<scalar_t>() + n * im2col_step_ * per_attn_weight_size);
}));
}
return {
grad_value, grad_sampling_loc, grad_attn_weight
};
}
const int CUDA_NUM_THREADS = 1024;
inline int GET_BLOCKS(const int N, const int num_threads)
{
return (N + num_threads - 1) / num_threads;
}
template <typename scalar_t>
__device__ scalar_t ms_deform_attn_im2col_bilinear(const scalar_t* &bottom_data,
const int &height, const int &width, const int &nheads, const int &channels,
const scalar_t &h, const scalar_t &w, const int &m, const int &c)
{
const int h_low = floor(h);
const int w_low = floor(w);
const int h_high = h_low + 1;
const int w_high = w_low + 1;
const scalar_t lh = h - h_low;
const scalar_t lw = w - w_low;
const scalar_t hh = 1 - lh, hw = 1 - lw;
const int w_stride = nheads * channels;
const int h_stride = width * w_stride;
const int h_low_ptr_offset = h_low * h_stride;
const int h_high_ptr_offset = h_low_ptr_offset + h_stride;
const int w_low_ptr_offset = w_low * w_stride;
const int w_high_ptr_offset = w_low_ptr_offset + w_stride;
const int base_ptr = m * channels + c;
scalar_t v1 = 0;
if (h_low >= 0 && w_low >= 0)
{
const int ptr1 = h_low_ptr_offset + w_low_ptr_offset + base_ptr;
v1 = bottom_data[ptr1];
}
scalar_t v2 = 0;
if (h_low >= 0 && w_high <= width - 1)
{
const int ptr2 = h_low_ptr_offset + w_high_ptr_offset + base_ptr;
v2 = bottom_data[ptr2];
}
scalar_t v3 = 0;
if (h_high <= height - 1 && w_low >= 0)
{
const int ptr3 = h_high_ptr_offset + w_low_ptr_offset + base_ptr;
v3 = bottom_data[ptr3];
}
scalar_t v4 = 0;
if (h_high <= height - 1 && w_high <= width - 1)
{
const int ptr4 = h_high_ptr_offset + w_high_ptr_offset + base_ptr;
v4 = bottom_data[ptr4];
}
const scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
const scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename scalar_t>
__device__ void ms_deform_attn_col2im_bilinear(const scalar_t* &bottom_data,
const int &height, const int &width, const int &nheads, const int &channels,
const scalar_t &h, const scalar_t &w, const int &m, const int &c,
const scalar_t &top_grad,
const scalar_t &attn_weight,
scalar_t* &grad_value,
scalar_t* grad_sampling_loc,
scalar_t* grad_attn_weight)
{
const int h_low = floor(h);
const int w_low = floor(w);
const int h_high = h_low + 1;
const int w_high = w_low + 1;
const scalar_t lh = h - h_low;
const scalar_t lw = w - w_low;
const scalar_t hh = 1 - lh, hw = 1 - lw;
const int w_stride = nheads * channels;
const int h_stride = width * w_stride;
const int h_low_ptr_offset = h_low * h_stride;
const int h_high_ptr_offset = h_low_ptr_offset + h_stride;
const int w_low_ptr_offset = w_low * w_stride;
const int w_high_ptr_offset = w_low_ptr_offset + w_stride;
const int base_ptr = m * channels + c;
const scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
const scalar_t top_grad_value = top_grad * attn_weight;
scalar_t grad_h_weight = 0, grad_w_weight = 0;
scalar_t v1 = 0;
if (h_low >= 0 && w_low >= 0)
{
const int ptr1 = h_low_ptr_offset + w_low_ptr_offset + base_ptr;
v1 = bottom_data[ptr1];
grad_h_weight -= hw * v1;
grad_w_weight -= hh * v1;
atomicAdd(grad_value+ptr1, w1*top_grad_value);
}
scalar_t v2 = 0;
if (h_low >= 0 && w_high <= width - 1)
{
const int ptr2 = h_low_ptr_offset + w_high_ptr_offset + base_ptr;
v2 = bottom_data[ptr2];
grad_h_weight -= lw * v2;
grad_w_weight += hh * v2;
atomicAdd(grad_value+ptr2, w2*top_grad_value);
}
scalar_t v3 = 0;
if (h_high <= height - 1 && w_low >= 0)
{
const int ptr3 = h_high_ptr_offset + w_low_ptr_offset + base_ptr;
v3 = bottom_data[ptr3];
grad_h_weight += hw * v3;
grad_w_weight -= lh * v3;
atomicAdd(grad_value+ptr3, w3*top_grad_value);
}
scalar_t v4 = 0;
if (h_high <= height - 1 && w_high <= width - 1)
{
const int ptr4 = h_high_ptr_offset + w_high_ptr_offset + base_ptr;
v4 = bottom_data[ptr4];
grad_h_weight += lw * v4;
grad_w_weight += lh * v4;
atomicAdd(grad_value+ptr4, w4*top_grad_value);
}
const scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
*grad_attn_weight = top_grad * val;
*grad_sampling_loc = width * grad_w_weight * top_grad_value;
*(grad_sampling_loc + 1) = height * grad_h_weight * top_grad_value;
}
template <typename scalar_t>
__device__ void ms_deform_attn_col2im_bilinear_gm(const scalar_t* &bottom_data,
const int &height, const int &width, const int &nheads, const int &channels,
const scalar_t &h, const scalar_t &w, const int &m, const int &c,
const scalar_t &top_grad,
const scalar_t &attn_weight,
scalar_t* &grad_value,
scalar_t* grad_sampling_loc,
scalar_t* grad_attn_weight)
{
const int h_low = floor(h);
const int w_low = floor(w);
const int h_high = h_low + 1;
const int w_high = w_low + 1;
const scalar_t lh = h - h_low;
const scalar_t lw = w - w_low;
const scalar_t hh = 1 - lh, hw = 1 - lw;
const int w_stride = nheads * channels;
const int h_stride = width * w_stride;
const int h_low_ptr_offset = h_low * h_stride;
const int h_high_ptr_offset = h_low_ptr_offset + h_stride;
const int w_low_ptr_offset = w_low * w_stride;
const int w_high_ptr_offset = w_low_ptr_offset + w_stride;
const int base_ptr = m * channels + c;
const scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
const scalar_t top_grad_value = top_grad * attn_weight;
scalar_t grad_h_weight = 0, grad_w_weight = 0;
scalar_t v1 = 0;
if (h_low >= 0 && w_low >= 0)
{
const int ptr1 = h_low_ptr_offset + w_low_ptr_offset + base_ptr;
v1 = bottom_data[ptr1];
grad_h_weight -= hw * v1;
grad_w_weight -= hh * v1;
atomicAdd(grad_value+ptr1, w1*top_grad_value);
}
scalar_t v2 = 0;
if (h_low >= 0 && w_high <= width - 1)
{
const int ptr2 = h_low_ptr_offset + w_high_ptr_offset + base_ptr;
v2 = bottom_data[ptr2];
grad_h_weight -= lw * v2;
grad_w_weight += hh * v2;
atomicAdd(grad_value+ptr2, w2*top_grad_value);
}
scalar_t v3 = 0;
if (h_high <= height - 1 && w_low >= 0)
{
const int ptr3 = h_high_ptr_offset + w_low_ptr_offset + base_ptr;
v3 = bottom_data[ptr3];
grad_h_weight += hw * v3;
grad_w_weight -= lh * v3;
atomicAdd(grad_value+ptr3, w3*top_grad_value);
}
scalar_t v4 = 0;
if (h_high <= height - 1 && w_high <= width - 1)
{
const int ptr4 = h_high_ptr_offset + w_high_ptr_offset + base_ptr;
v4 = bottom_data[ptr4];
grad_h_weight += lw * v4;
grad_w_weight += lh * v4;
atomicAdd(grad_value+ptr4, w4*top_grad_value);
}
const scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
atomicAdd(grad_attn_weight, top_grad * val);
atomicAdd(grad_sampling_loc, width * grad_w_weight * top_grad_value);
atomicAdd(grad_sampling_loc + 1, height * grad_h_weight * top_grad_value);
}
template <typename scalar_t>
__global__ void ms_deformable_im2col_gpu_kernel(const int n,
const scalar_t *data_value,
const int64_t *data_spatial_shapes,
const int64_t *data_level_start_index,
const scalar_t *data_sampling_loc,
const scalar_t *data_attn_weight,
const int batch_size,
const int spatial_size,
const int num_heads,
const int channels,
const int num_levels,
const int num_query,
const int num_point,
scalar_t *data_col)
{
CUDA_KERNEL_LOOP(index, n)
{
int _temp = index;
const int c_col = _temp % channels;
_temp /= channels;
const int sampling_index = _temp;
const int m_col = _temp % num_heads;
_temp /= num_heads;
const int q_col = _temp % num_query;
_temp /= num_query;
const int b_col = _temp;
scalar_t *data_col_ptr = data_col + index;
int data_weight_ptr = sampling_index * num_levels * num_point;
int data_loc_w_ptr = data_weight_ptr << 1;
const int qid_stride = num_heads * channels;
const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride;
scalar_t col = 0;
for (int l_col=0; l_col < num_levels; ++l_col)
{
const int level_start_id = data_level_start_index[l_col];
const int spatial_h_ptr = l_col << 1;
const int spatial_h = data_spatial_shapes[spatial_h_ptr];
const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1];
const scalar_t *data_value_ptr = data_value + (data_value_ptr_init_offset + level_start_id * qid_stride);
for (int p_col=0; p_col < num_point; ++p_col)
{
const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr];
const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1];
const scalar_t weight = data_attn_weight[data_weight_ptr];
const scalar_t h_im = loc_h * spatial_h - 0.5;
const scalar_t w_im = loc_w * spatial_w - 0.5;
if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w)
{
col += ms_deform_attn_im2col_bilinear(data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col) * weight;
}
data_weight_ptr += 1;
data_loc_w_ptr += 2;
}
}
*data_col_ptr = col;
}
}
template <typename scalar_t, unsigned int blockSize>
__global__ void ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1(const int n,
const scalar_t *grad_col,
const scalar_t *data_value,
const int64_t *data_spatial_shapes,
const int64_t *data_level_start_index,
const scalar_t *data_sampling_loc,
const scalar_t *data_attn_weight,
const int batch_size,
const int spatial_size,
const int num_heads,
const int channels,
const int num_levels,
const int num_query,
const int num_point,
scalar_t *grad_value,
scalar_t *grad_sampling_loc,
scalar_t *grad_attn_weight)
{
CUDA_KERNEL_LOOP(index, n)
{
__shared__ scalar_t cache_grad_sampling_loc[blockSize * 2];
__shared__ scalar_t cache_grad_attn_weight[blockSize];
unsigned int tid = threadIdx.x;
int _temp = index;
const int c_col = _temp % channels;
_temp /= channels;
const int sampling_index = _temp;
const int m_col = _temp % num_heads;
_temp /= num_heads;
const int q_col = _temp % num_query;
_temp /= num_query;
const int b_col = _temp;
const scalar_t top_grad = grad_col[index];
int data_weight_ptr = sampling_index * num_levels * num_point;
int data_loc_w_ptr = data_weight_ptr << 1;
const int grad_sampling_ptr = data_weight_ptr;
grad_sampling_loc += grad_sampling_ptr << 1;
grad_attn_weight += grad_sampling_ptr;
const int grad_weight_stride = 1;
const int grad_loc_stride = 2;
const int qid_stride = num_heads * channels;
const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride;
for (int l_col=0; l_col < num_levels; ++l_col)
{
const int level_start_id = data_level_start_index[l_col];
const int spatial_h_ptr = l_col << 1;
const int spatial_h = data_spatial_shapes[spatial_h_ptr];
const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1];
const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride;
const scalar_t *data_value_ptr = data_value + value_ptr_offset;
scalar_t *grad_value_ptr = grad_value + value_ptr_offset;
for (int p_col=0; p_col < num_point; ++p_col)
{
const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr];
const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1];
const scalar_t weight = data_attn_weight[data_weight_ptr];
const scalar_t h_im = loc_h * spatial_h - 0.5;
const scalar_t w_im = loc_w * spatial_w - 0.5;
*(cache_grad_sampling_loc+(threadIdx.x << 1)) = 0;
*(cache_grad_sampling_loc+((threadIdx.x << 1) + 1)) = 0;
*(cache_grad_attn_weight+threadIdx.x)=0;
if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w)
{
ms_deform_attn_col2im_bilinear(
data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col,
top_grad, weight, grad_value_ptr,
cache_grad_sampling_loc+(threadIdx.x << 1), cache_grad_attn_weight+threadIdx.x);
}
__syncthreads();
if (tid == 0)
{
scalar_t _grad_w=cache_grad_sampling_loc[0], _grad_h=cache_grad_sampling_loc[1], _grad_a=cache_grad_attn_weight[0];
int sid=2;
for (unsigned int tid = 1; tid < blockSize; ++tid)
{
_grad_w += cache_grad_sampling_loc[sid];
_grad_h += cache_grad_sampling_loc[sid + 1];
_grad_a += cache_grad_attn_weight[tid];
sid += 2;
}
*grad_sampling_loc = _grad_w;
*(grad_sampling_loc + 1) = _grad_h;
*grad_attn_weight = _grad_a;
}
__syncthreads();
data_weight_ptr += 1;
data_loc_w_ptr += 2;
grad_attn_weight += grad_weight_stride;
grad_sampling_loc += grad_loc_stride;
}
}
}
}
template <typename scalar_t, unsigned int blockSize>
__global__ void ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2(const int n,
const scalar_t *grad_col,
const scalar_t *data_value,
const int64_t *data_spatial_shapes,
const int64_t *data_level_start_index,
const scalar_t *data_sampling_loc,
const scalar_t *data_attn_weight,
const int batch_size,
const int spatial_size,
const int num_heads,
const int channels,
const int num_levels,
const int num_query,
const int num_point,
scalar_t *grad_value,
scalar_t *grad_sampling_loc,
scalar_t *grad_attn_weight)
{
CUDA_KERNEL_LOOP(index, n)
{
__shared__ scalar_t cache_grad_sampling_loc[blockSize * 2];
__shared__ scalar_t cache_grad_attn_weight[blockSize];
unsigned int tid = threadIdx.x;
int _temp = index;
const int c_col = _temp % channels;
_temp /= channels;
const int sampling_index = _temp;
const int m_col = _temp % num_heads;
_temp /= num_heads;
const int q_col = _temp % num_query;
_temp /= num_query;
const int b_col = _temp;
const scalar_t top_grad = grad_col[index];
int data_weight_ptr = sampling_index * num_levels * num_point;
int data_loc_w_ptr = data_weight_ptr << 1;
const int grad_sampling_ptr = data_weight_ptr;
grad_sampling_loc += grad_sampling_ptr << 1;
grad_attn_weight += grad_sampling_ptr;
const int grad_weight_stride = 1;
const int grad_loc_stride = 2;
const int qid_stride = num_heads * channels;
const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride;
for (int l_col=0; l_col < num_levels; ++l_col)
{
const int level_start_id = data_level_start_index[l_col];
const int spatial_h_ptr = l_col << 1;
const int spatial_h = data_spatial_shapes[spatial_h_ptr];
const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1];
const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride;
const scalar_t *data_value_ptr = data_value + value_ptr_offset;
scalar_t *grad_value_ptr = grad_value + value_ptr_offset;
for (int p_col=0; p_col < num_point; ++p_col)
{
const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr];
const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1];
const scalar_t weight = data_attn_weight[data_weight_ptr];
const scalar_t h_im = loc_h * spatial_h - 0.5;
const scalar_t w_im = loc_w * spatial_w - 0.5;
*(cache_grad_sampling_loc+(threadIdx.x << 1)) = 0;
*(cache_grad_sampling_loc+((threadIdx.x << 1) + 1)) = 0;
*(cache_grad_attn_weight+threadIdx.x)=0;
if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w)
{
ms_deform_attn_col2im_bilinear(
data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col,
top_grad, weight, grad_value_ptr,
cache_grad_sampling_loc+(threadIdx.x << 1), cache_grad_attn_weight+threadIdx.x);
}
__syncthreads();
for (unsigned int s=blockSize/2; s>0; s>>=1)
{
if (tid < s) {
const unsigned int xid1 = tid << 1;
const unsigned int xid2 = (tid + s) << 1;
cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + s];
cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2];
cache_grad_sampling_loc[xid1 + 1] += cache_grad_sampling_loc[xid2 + 1];
}
__syncthreads();
}
if (tid == 0)
{
*grad_sampling_loc = cache_grad_sampling_loc[0];
*(grad_sampling_loc + 1) = cache_grad_sampling_loc[1];
*grad_attn_weight = cache_grad_attn_weight[0];
}
__syncthreads();
data_weight_ptr += 1;
data_loc_w_ptr += 2;
grad_attn_weight += grad_weight_stride;
grad_sampling_loc += grad_loc_stride;
}
}
}
}
template <typename scalar_t>
__global__ void ms_deformable_col2im_gpu_kernel_shm_reduce_v1(const int n,
const scalar_t *grad_col,
const scalar_t *data_value,
const int64_t *data_spatial_shapes,
const int64_t *data_level_start_index,
const scalar_t *data_sampling_loc,
const scalar_t *data_attn_weight,
const int batch_size,
const int spatial_size,
const int num_heads,
const int channels,
const int num_levels,
const int num_query,
const int num_point,
scalar_t *grad_value,
scalar_t *grad_sampling_loc,
scalar_t *grad_attn_weight)
{
CUDA_KERNEL_LOOP(index, n)
{
extern __shared__ int _s[];
scalar_t* cache_grad_sampling_loc = (scalar_t*)_s;
scalar_t* cache_grad_attn_weight = cache_grad_sampling_loc + 2 * blockDim.x;
unsigned int tid = threadIdx.x;
int _temp = index;
const int c_col = _temp % channels;
_temp /= channels;
const int sampling_index = _temp;
const int m_col = _temp % num_heads;
_temp /= num_heads;
const int q_col = _temp % num_query;
_temp /= num_query;
const int b_col = _temp;
const scalar_t top_grad = grad_col[index];
int data_weight_ptr = sampling_index * num_levels * num_point;
int data_loc_w_ptr = data_weight_ptr << 1;
const int grad_sampling_ptr = data_weight_ptr;
grad_sampling_loc += grad_sampling_ptr << 1;
grad_attn_weight += grad_sampling_ptr;
const int grad_weight_stride = 1;
const int grad_loc_stride = 2;
const int qid_stride = num_heads * channels;
const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride;
for (int l_col=0; l_col < num_levels; ++l_col)
{
const int level_start_id = data_level_start_index[l_col];
const int spatial_h_ptr = l_col << 1;
const int spatial_h = data_spatial_shapes[spatial_h_ptr];
const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1];
const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride;
const scalar_t *data_value_ptr = data_value + value_ptr_offset;
scalar_t *grad_value_ptr = grad_value + value_ptr_offset;
for (int p_col=0; p_col < num_point; ++p_col)
{
const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr];
const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1];
const scalar_t weight = data_attn_weight[data_weight_ptr];
const scalar_t h_im = loc_h * spatial_h - 0.5;
const scalar_t w_im = loc_w * spatial_w - 0.5;
*(cache_grad_sampling_loc+(threadIdx.x << 1)) = 0;
*(cache_grad_sampling_loc+((threadIdx.x << 1) + 1)) = 0;
*(cache_grad_attn_weight+threadIdx.x)=0;
if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w)
{
ms_deform_attn_col2im_bilinear(
data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col,
top_grad, weight, grad_value_ptr,
cache_grad_sampling_loc+(threadIdx.x << 1), cache_grad_attn_weight+threadIdx.x);
}
__syncthreads();
if (tid == 0)
{
scalar_t _grad_w=cache_grad_sampling_loc[0], _grad_h=cache_grad_sampling_loc[1], _grad_a=cache_grad_attn_weight[0];
int sid=2;
for (unsigned int tid = 1; tid < blockDim.x; ++tid)
{
_grad_w += cache_grad_sampling_loc[sid];
_grad_h += cache_grad_sampling_loc[sid + 1];
_grad_a += cache_grad_attn_weight[tid];
sid += 2;
}
*grad_sampling_loc = _grad_w;
*(grad_sampling_loc + 1) = _grad_h;
*grad_attn_weight = _grad_a;
}
__syncthreads();
data_weight_ptr += 1;
data_loc_w_ptr += 2;
grad_attn_weight += grad_weight_stride;
grad_sampling_loc += grad_loc_stride;
}
}
}
}
template <typename scalar_t>
__global__ void ms_deformable_col2im_gpu_kernel_shm_reduce_v2(const int n,
const scalar_t *grad_col,
const scalar_t *data_value,
const int64_t *data_spatial_shapes,
const int64_t *data_level_start_index,
const scalar_t *data_sampling_loc,
const scalar_t *data_attn_weight,
const int batch_size,
const int spatial_size,
const int num_heads,
const int channels,
const int num_levels,
const int num_query,
const int num_point,
scalar_t *grad_value,
scalar_t *grad_sampling_loc,
scalar_t *grad_attn_weight)
{
CUDA_KERNEL_LOOP(index, n)
{
extern __shared__ int _s[];
scalar_t* cache_grad_sampling_loc = (scalar_t*)_s;
scalar_t* cache_grad_attn_weight = cache_grad_sampling_loc + 2 * blockDim.x;
unsigned int tid = threadIdx.x;
int _temp = index;
const int c_col = _temp % channels;
_temp /= channels;
const int sampling_index = _temp;
const int m_col = _temp % num_heads;
_temp /= num_heads;
const int q_col = _temp % num_query;
_temp /= num_query;
const int b_col = _temp;
const scalar_t top_grad = grad_col[index];
int data_weight_ptr = sampling_index * num_levels * num_point;
int data_loc_w_ptr = data_weight_ptr << 1;
const int grad_sampling_ptr = data_weight_ptr;
grad_sampling_loc += grad_sampling_ptr << 1;
grad_attn_weight += grad_sampling_ptr;
const int grad_weight_stride = 1;
const int grad_loc_stride = 2;
const int qid_stride = num_heads * channels;
const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride;
for (int l_col=0; l_col < num_levels; ++l_col)
{
const int level_start_id = data_level_start_index[l_col];
const int spatial_h_ptr = l_col << 1;
const int spatial_h = data_spatial_shapes[spatial_h_ptr];
const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1];
const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride;
const scalar_t *data_value_ptr = data_value + value_ptr_offset;
scalar_t *grad_value_ptr = grad_value + value_ptr_offset;
for (int p_col=0; p_col < num_point; ++p_col)
{
const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr];
const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1];
const scalar_t weight = data_attn_weight[data_weight_ptr];
const scalar_t h_im = loc_h * spatial_h - 0.5;
const scalar_t w_im = loc_w * spatial_w - 0.5;
*(cache_grad_sampling_loc+(threadIdx.x << 1)) = 0;
*(cache_grad_sampling_loc+((threadIdx.x << 1) + 1)) = 0;
*(cache_grad_attn_weight+threadIdx.x)=0;
if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w)
{
ms_deform_attn_col2im_bilinear(
data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col,
top_grad, weight, grad_value_ptr,
cache_grad_sampling_loc+(threadIdx.x << 1), cache_grad_attn_weight+threadIdx.x);
}
__syncthreads();
for (unsigned int s=blockDim.x/2, spre=blockDim.x; s>0; s>>=1, spre>>=1)
{
if (tid < s) {
const unsigned int xid1 = tid << 1;
const unsigned int xid2 = (tid + s) << 1;
cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + s];
cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2];
cache_grad_sampling_loc[xid1 + 1] += cache_grad_sampling_loc[xid2 + 1];
if (tid + (s << 1) < spre)
{
cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + (s << 1)];
cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2 + (s << 1)];
cache_grad_sampling_loc[xid1 + 1] += cache_grad_sampling_loc[xid2 + 1 + (s << 1)];
}
}
__syncthreads();
}
if (tid == 0)
{
*grad_sampling_loc = cache_grad_sampling_loc[0];
*(grad_sampling_loc + 1) = cache_grad_sampling_loc[1];
*grad_attn_weight = cache_grad_attn_weight[0];
}
__syncthreads();
data_weight_ptr += 1;
data_loc_w_ptr += 2;
grad_attn_weight += grad_weight_stride;
grad_sampling_loc += grad_loc_stride;
}
}
}
}
template <typename scalar_t>
__global__ void ms_deformable_col2im_gpu_kernel_shm_reduce_v2_multi_blocks(const int n,
const scalar_t *grad_col,
const scalar_t *data_value,
const int64_t *data_spatial_shapes,
const int64_t *data_level_start_index,
const scalar_t *data_sampling_loc,
const scalar_t *data_attn_weight,
const int batch_size,
const int spatial_size,
const int num_heads,
const int channels,
const int num_levels,
const int num_query,
const int num_point,
scalar_t *grad_value,
scalar_t *grad_sampling_loc,
scalar_t *grad_attn_weight)
{
CUDA_KERNEL_LOOP(index, n)
{
extern __shared__ int _s[];
scalar_t* cache_grad_sampling_loc = (scalar_t*)_s;
scalar_t* cache_grad_attn_weight = cache_grad_sampling_loc + 2 * blockDim.x;
unsigned int tid = threadIdx.x;
int _temp = index;
const int c_col = _temp % channels;
_temp /= channels;
const int sampling_index = _temp;
const int m_col = _temp % num_heads;
_temp /= num_heads;
const int q_col = _temp % num_query;
_temp /= num_query;
const int b_col = _temp;
const scalar_t top_grad = grad_col[index];
int data_weight_ptr = sampling_index * num_levels * num_point;
int data_loc_w_ptr = data_weight_ptr << 1;
const int grad_sampling_ptr = data_weight_ptr;
grad_sampling_loc += grad_sampling_ptr << 1;
grad_attn_weight += grad_sampling_ptr;
const int grad_weight_stride = 1;
const int grad_loc_stride = 2;
const int qid_stride = num_heads * channels;
const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride;
for (int l_col=0; l_col < num_levels; ++l_col)
{
const int level_start_id = data_level_start_index[l_col];
const int spatial_h_ptr = l_col << 1;
const int spatial_h = data_spatial_shapes[spatial_h_ptr];
const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1];
const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride;
const scalar_t *data_value_ptr = data_value + value_ptr_offset;
scalar_t *grad_value_ptr = grad_value + value_ptr_offset;
for (int p_col=0; p_col < num_point; ++p_col)
{
const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr];
const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1];
const scalar_t weight = data_attn_weight[data_weight_ptr];
const scalar_t h_im = loc_h * spatial_h - 0.5;
const scalar_t w_im = loc_w * spatial_w - 0.5;
*(cache_grad_sampling_loc+(threadIdx.x << 1)) = 0;
*(cache_grad_sampling_loc+((threadIdx.x << 1) + 1)) = 0;
*(cache_grad_attn_weight+threadIdx.x)=0;
if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w)
{
ms_deform_attn_col2im_bilinear(
data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col,
top_grad, weight, grad_value_ptr,
cache_grad_sampling_loc+(threadIdx.x << 1), cache_grad_attn_weight+threadIdx.x);
}
__syncthreads();
for (unsigned int s=blockDim.x/2, spre=blockDim.x; s>0; s>>=1, spre>>=1)
{
if (tid < s) {
const unsigned int xid1 = tid << 1;
const unsigned int xid2 = (tid + s) << 1;
cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + s];
cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2];
cache_grad_sampling_loc[xid1 + 1] += cache_grad_sampling_loc[xid2 + 1];
if (tid + (s << 1) < spre)
{
cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + (s << 1)];
cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2 + (s << 1)];
cache_grad_sampling_loc[xid1 + 1] += cache_grad_sampling_loc[xid2 + 1 + (s << 1)];
}
}
__syncthreads();
}
if (tid == 0)
{
atomicAdd(grad_sampling_loc, cache_grad_sampling_loc[0]);
atomicAdd(grad_sampling_loc + 1, cache_grad_sampling_loc[1]);
atomicAdd(grad_attn_weight, cache_grad_attn_weight[0]);
}
__syncthreads();
data_weight_ptr += 1;
data_loc_w_ptr += 2;
grad_attn_weight += grad_weight_stride;
grad_sampling_loc += grad_loc_stride;
}
}
}
}
template <typename scalar_t>
__global__ void ms_deformable_col2im_gpu_kernel_gm(const int n,
const scalar_t *grad_col,
const scalar_t *data_value,
const int64_t *data_spatial_shapes,
const int64_t *data_level_start_index,
const scalar_t *data_sampling_loc,
const scalar_t *data_attn_weight,
const int batch_size,
const int spatial_size,
const int num_heads,
const int channels,
const int num_levels,
const int num_query,
const int num_point,
scalar_t *grad_value,
scalar_t *grad_sampling_loc,
scalar_t *grad_attn_weight)
{
CUDA_KERNEL_LOOP(index, n)
{
int _temp = index;
const int c_col = _temp % channels;
_temp /= channels;
const int sampling_index = _temp;
const int m_col = _temp % num_heads;
_temp /= num_heads;
const int q_col = _temp % num_query;
_temp /= num_query;
const int b_col = _temp;
const scalar_t top_grad = grad_col[index];
int data_weight_ptr = sampling_index * num_levels * num_point;
int data_loc_w_ptr = data_weight_ptr << 1;
const int grad_sampling_ptr = data_weight_ptr;
grad_sampling_loc += grad_sampling_ptr << 1;
grad_attn_weight += grad_sampling_ptr;
const int grad_weight_stride = 1;
const int grad_loc_stride = 2;
const int qid_stride = num_heads * channels;
const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride;
for (int l_col=0; l_col < num_levels; ++l_col)
{
const int level_start_id = data_level_start_index[l_col];
const int spatial_h_ptr = l_col << 1;
const int spatial_h = data_spatial_shapes[spatial_h_ptr];
const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1];
const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride;
const scalar_t *data_value_ptr = data_value + value_ptr_offset;
scalar_t *grad_value_ptr = grad_value + value_ptr_offset;
for (int p_col=0; p_col < num_point; ++p_col)
{
const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr];
const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1];
const scalar_t weight = data_attn_weight[data_weight_ptr];
const scalar_t h_im = loc_h * spatial_h - 0.5;
const scalar_t w_im = loc_w * spatial_w - 0.5;
if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w)
{
ms_deform_attn_col2im_bilinear_gm(
data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col,
top_grad, weight, grad_value_ptr,
grad_sampling_loc, grad_attn_weight);
}
data_weight_ptr += 1;
data_loc_w_ptr += 2;
grad_attn_weight += grad_weight_stride;
grad_sampling_loc += grad_loc_stride;
}
}
}
}
template <typename scalar_t>
void ms_deformable_im2col_cuda(cudaStream_t stream,
const scalar_t* data_value,
const int64_t* data_spatial_shapes,
const int64_t* data_level_start_index,
const scalar_t* data_sampling_loc,
const scalar_t* data_attn_weight,
const int batch_size,
const int spatial_size,
const int num_heads,
const int channels,
const int num_levels,
const int num_query,
const int num_point,
scalar_t* data_col)
{
const int num_kernels = batch_size * num_query * num_heads * channels;
const int num_actual_kernels = batch_size * num_query * num_heads * channels;
const int num_threads = CUDA_NUM_THREADS;
ms_deformable_im2col_gpu_kernel<scalar_t>
<<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
0, stream>>>(
num_kernels, data_value, data_spatial_shapes, data_level_start_index, data_sampling_loc, data_attn_weight,
batch_size, spatial_size, num_heads, channels, num_levels, num_query, num_point, data_col);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
printf("error in ms_deformable_im2col_cuda: %s\n", cudaGetErrorString(err));
}
}
template <typename scalar_t>
void ms_deformable_col2im_cuda(cudaStream_t stream,
const scalar_t* grad_col,
const scalar_t* data_value,
const int64_t * data_spatial_shapes,
const int64_t * data_level_start_index,
const scalar_t * data_sampling_loc,
const scalar_t * data_attn_weight,
const int batch_size,
const int spatial_size,
const int num_heads,
const int channels,
const int num_levels,
const int num_query,
const int num_point,
scalar_t* grad_value,
scalar_t* grad_sampling_loc,
scalar_t* grad_attn_weight)
{
const int num_threads = (channels > CUDA_NUM_THREADS)?CUDA_NUM_THREADS:channels;
const int num_kernels = batch_size * num_query * num_heads * channels;
const int num_actual_kernels = batch_size * num_query * num_heads * channels;
if (channels > 1024)
{
if ((channels & 1023) == 0)
{
ms_deformable_col2im_gpu_kernel_shm_reduce_v2_multi_blocks<scalar_t>
<<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
num_threads*3*sizeof(scalar_t), stream>>>(
num_kernels,
grad_col,
data_value,
data_spatial_shapes,
data_level_start_index,
data_sampling_loc,
data_attn_weight,
batch_size,
spatial_size,
num_heads,
channels,
num_levels,
num_query,
num_point,
grad_value,
grad_sampling_loc,
grad_attn_weight);
}
else
{
ms_deformable_col2im_gpu_kernel_gm<scalar_t>
<<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
0, stream>>>(
num_kernels,
grad_col,
data_value,
data_spatial_shapes,
data_level_start_index,
data_sampling_loc,
data_attn_weight,
batch_size,
spatial_size,
num_heads,
channels,
num_levels,
num_query,
num_point,
grad_value,
grad_sampling_loc,
grad_attn_weight);
}
}
else{
switch(channels)
{
case 1:
ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1<scalar_t, 1>
<<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
0, stream>>>(
num_kernels,
grad_col,
data_value,
data_spatial_shapes,
data_level_start_index,
data_sampling_loc,
data_attn_weight,
batch_size,
spatial_size,
num_heads,
channels,
num_levels,
num_query,
num_point,
grad_value,
grad_sampling_loc,
grad_attn_weight);
break;
case 2:
ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1<scalar_t, 2>
<<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
0, stream>>>(
num_kernels,
grad_col,
data_value,
data_spatial_shapes,
data_level_start_index,
data_sampling_loc,
data_attn_weight,
batch_size,
spatial_size,
num_heads,
channels,
num_levels,
num_query,
num_point,
grad_value,
grad_sampling_loc,
grad_attn_weight);
break;
case 4:
ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1<scalar_t, 4>
<<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
0, stream>>>(
num_kernels,
grad_col,
data_value,
data_spatial_shapes,
data_level_start_index,
data_sampling_loc,
data_attn_weight,
batch_size,
spatial_size,
num_heads,
channels,
num_levels,
num_query,
num_point,
grad_value,
grad_sampling_loc,
grad_attn_weight);
break;
case 8:
ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1<scalar_t, 8>
<<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
0, stream>>>(
num_kernels,
grad_col,
data_value,
data_spatial_shapes,
data_level_start_index,
data_sampling_loc,
data_attn_weight,
batch_size,
spatial_size,
num_heads,
channels,
num_levels,
num_query,
num_point,
grad_value,
grad_sampling_loc,
grad_attn_weight);
break;
case 16:
ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1<scalar_t, 16>
<<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
0, stream>>>(
num_kernels,
grad_col,
data_value,
data_spatial_shapes,
data_level_start_index,
data_sampling_loc,
data_attn_weight,
batch_size,
spatial_size,
num_heads,
channels,
num_levels,
num_query,
num_point,
grad_value,
grad_sampling_loc,
grad_attn_weight);
break;
case 32:
ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1<scalar_t, 32>
<<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
0, stream>>>(
num_kernels,
grad_col,
data_value,
data_spatial_shapes,
data_level_start_index,
data_sampling_loc,
data_attn_weight,
batch_size,
spatial_size,
num_heads,
channels,
num_levels,
num_query,
num_point,
grad_value,
grad_sampling_loc,
grad_attn_weight);
break;
case 64:
ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2<scalar_t, 64>
<<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
0, stream>>>(
num_kernels,
grad_col,
data_value,
data_spatial_shapes,
data_level_start_index,
data_sampling_loc,
data_attn_weight,
batch_size,
spatial_size,
num_heads,
channels,
num_levels,
num_query,
num_point,
grad_value,
grad_sampling_loc,
grad_attn_weight);
break;
case 128:
ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2<scalar_t, 128>
<<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
0, stream>>>(
num_kernels,
grad_col,
data_value,
data_spatial_shapes,
data_level_start_index,
data_sampling_loc,
data_attn_weight,
batch_size,
spatial_size,
num_heads,
channels,
num_levels,
num_query,
num_point,
grad_value,
grad_sampling_loc,
grad_attn_weight);
break;
case 256:
ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2<scalar_t, 256>
<<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
0, stream>>>(
num_kernels,
grad_col,
data_value,
data_spatial_shapes,
data_level_start_index,
data_sampling_loc,
data_attn_weight,
batch_size,
spatial_size,
num_heads,
channels,
num_levels,
num_query,
num_point,
grad_value,
grad_sampling_loc,
grad_attn_weight);
break;
case 512:
ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2<scalar_t, 512>
<<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
0, stream>>>(
num_kernels,
grad_col,
data_value,
data_spatial_shapes,
data_level_start_index,
data_sampling_loc,
data_attn_weight,
batch_size,
spatial_size,
num_heads,
channels,
num_levels,
num_query,
num_point,
grad_value,
grad_sampling_loc,
grad_attn_weight);
break;
case 1024:
ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2<scalar_t, 1024>
<<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
0, stream>>>(
num_kernels,
grad_col,
data_value,
data_spatial_shapes,
data_level_start_index,
data_sampling_loc,
data_attn_weight,
batch_size,
spatial_size,
num_heads,
channels,
num_levels,
num_query,
num_point,
grad_value,
grad_sampling_loc,
grad_attn_weight);
break;
default:
if (channels < 64)
{
ms_deformable_col2im_gpu_kernel_shm_reduce_v1<scalar_t>
<<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
num_threads*3*sizeof(scalar_t), stream>>>(
num_kernels,
grad_col,
data_value,
data_spatial_shapes,
data_level_start_index,
data_sampling_loc,
data_attn_weight,
batch_size,
spatial_size,
num_heads,
channels,
num_levels,
num_query,
num_point,
grad_value,
grad_sampling_loc,
grad_attn_weight);
}
else
{
ms_deformable_col2im_gpu_kernel_shm_reduce_v2<scalar_t>
<<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,
num_threads*3*sizeof(scalar_t), stream>>>(
num_kernels,
grad_col,
data_value,
data_spatial_shapes,
data_level_start_index,
data_sampling_loc,
data_attn_weight,
batch_size,
spatial_size,
num_heads,
channels,
num_levels,
num_query,
num_point,
grad_value,
grad_sampling_loc,
grad_attn_weight);
}
}
}
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
printf("error in ms_deformable_col2im_cuda: %s\n", cudaGetErrorString(err));
}
}
|
27182812/ChatGLM-LLaMA-chinese-insturct | 33,153 | src/transformers/data/processors/squad.py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from functools import partial
from multiprocessing import Pool, cpu_count
import numpy as np
from tqdm import tqdm
from ...models.bert.tokenization_bert import whitespace_tokenize
from ...tokenization_utils_base import BatchEncoding, PreTrainedTokenizerBase, TruncationStrategy
from ...utils import is_tf_available, is_torch_available, logging
from .utils import DataProcessor
# Store the tokenizers which insert 2 separators tokens
MULTI_SEP_TOKENS_TOKENIZERS_SET = {"roberta", "camembert", "bart", "mpnet"}
if is_torch_available():
import torch
from torch.utils.data import TensorDataset
if is_tf_available():
import tensorflow as tf
logger = logging.get_logger(__name__)
def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer, orig_answer_text):
"""Returns tokenized answer spans that better match the annotated answer."""
tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text))
for new_start in range(input_start, input_end + 1):
for new_end in range(input_end, new_start - 1, -1):
text_span = " ".join(doc_tokens[new_start : (new_end + 1)])
if text_span == tok_answer_text:
return (new_start, new_end)
return (input_start, input_end)
def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
best_score = None
best_span_index = None
for span_index, doc_span in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
def _new_check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
# if len(doc_spans) == 1:
# return True
best_score = None
best_span_index = None
for span_index, doc_span in enumerate(doc_spans):
end = doc_span["start"] + doc_span["length"] - 1
if position < doc_span["start"]:
continue
if position > end:
continue
num_left_context = position - doc_span["start"]
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span["length"]
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
def _is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
def squad_convert_example_to_features(
example, max_seq_length, doc_stride, max_query_length, padding_strategy, is_training
):
features = []
if is_training and not example.is_impossible:
# Get start and end position
start_position = example.start_position
end_position = example.end_position
# If the answer cannot be found in the text, then skip this example.
actual_text = " ".join(example.doc_tokens[start_position : (end_position + 1)])
cleaned_answer_text = " ".join(whitespace_tokenize(example.answer_text))
if actual_text.find(cleaned_answer_text) == -1:
logger.warning(f"Could not find answer: '{actual_text}' vs. '{cleaned_answer_text}'")
return []
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for i, token in enumerate(example.doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
if tokenizer.__class__.__name__ in [
"RobertaTokenizer",
"LongformerTokenizer",
"BartTokenizer",
"RobertaTokenizerFast",
"LongformerTokenizerFast",
"BartTokenizerFast",
]:
sub_tokens = tokenizer.tokenize(token, add_prefix_space=True)
else:
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
if is_training and not example.is_impossible:
tok_start_position = orig_to_tok_index[example.start_position]
if example.end_position < len(example.doc_tokens) - 1:
tok_end_position = orig_to_tok_index[example.end_position + 1] - 1
else:
tok_end_position = len(all_doc_tokens) - 1
(tok_start_position, tok_end_position) = _improve_answer_span(
all_doc_tokens, tok_start_position, tok_end_position, tokenizer, example.answer_text
)
spans = []
truncated_query = tokenizer.encode(
example.question_text, add_special_tokens=False, truncation=True, max_length=max_query_length
)
# Tokenizers who insert 2 SEP tokens in-between <context> & <question> need to have special handling
# in the way they compute mask of added tokens.
tokenizer_type = type(tokenizer).__name__.replace("Tokenizer", "").lower()
sequence_added_tokens = (
tokenizer.model_max_length - tokenizer.max_len_single_sentence + 1
if tokenizer_type in MULTI_SEP_TOKENS_TOKENIZERS_SET
else tokenizer.model_max_length - tokenizer.max_len_single_sentence
)
sequence_pair_added_tokens = tokenizer.model_max_length - tokenizer.max_len_sentences_pair
span_doc_tokens = all_doc_tokens
while len(spans) * doc_stride < len(all_doc_tokens):
# Define the side we want to truncate / pad and the text/pair sorting
if tokenizer.padding_side == "right":
texts = truncated_query
pairs = span_doc_tokens
truncation = TruncationStrategy.ONLY_SECOND.value
else:
texts = span_doc_tokens
pairs = truncated_query
truncation = TruncationStrategy.ONLY_FIRST.value
encoded_dict = tokenizer.encode_plus( # TODO(thom) update this logic
texts,
pairs,
truncation=truncation,
padding=padding_strategy,
max_length=max_seq_length,
return_overflowing_tokens=True,
stride=max_seq_length - doc_stride - len(truncated_query) - sequence_pair_added_tokens,
return_token_type_ids=True,
)
paragraph_len = min(
len(all_doc_tokens) - len(spans) * doc_stride,
max_seq_length - len(truncated_query) - sequence_pair_added_tokens,
)
if tokenizer.pad_token_id in encoded_dict["input_ids"]:
if tokenizer.padding_side == "right":
non_padded_ids = encoded_dict["input_ids"][: encoded_dict["input_ids"].index(tokenizer.pad_token_id)]
else:
last_padding_id_position = (
len(encoded_dict["input_ids"]) - 1 - encoded_dict["input_ids"][::-1].index(tokenizer.pad_token_id)
)
non_padded_ids = encoded_dict["input_ids"][last_padding_id_position + 1 :]
else:
non_padded_ids = encoded_dict["input_ids"]
tokens = tokenizer.convert_ids_to_tokens(non_padded_ids)
token_to_orig_map = {}
for i in range(paragraph_len):
index = len(truncated_query) + sequence_added_tokens + i if tokenizer.padding_side == "right" else i
token_to_orig_map[index] = tok_to_orig_index[len(spans) * doc_stride + i]
encoded_dict["paragraph_len"] = paragraph_len
encoded_dict["tokens"] = tokens
encoded_dict["token_to_orig_map"] = token_to_orig_map
encoded_dict["truncated_query_with_special_tokens_length"] = len(truncated_query) + sequence_added_tokens
encoded_dict["token_is_max_context"] = {}
encoded_dict["start"] = len(spans) * doc_stride
encoded_dict["length"] = paragraph_len
spans.append(encoded_dict)
if "overflowing_tokens" not in encoded_dict or (
"overflowing_tokens" in encoded_dict and len(encoded_dict["overflowing_tokens"]) == 0
):
break
span_doc_tokens = encoded_dict["overflowing_tokens"]
for doc_span_index in range(len(spans)):
for j in range(spans[doc_span_index]["paragraph_len"]):
is_max_context = _new_check_is_max_context(spans, doc_span_index, doc_span_index * doc_stride + j)
index = (
j
if tokenizer.padding_side == "left"
else spans[doc_span_index]["truncated_query_with_special_tokens_length"] + j
)
spans[doc_span_index]["token_is_max_context"][index] = is_max_context
for span in spans:
# Identify the position of the CLS token
cls_index = span["input_ids"].index(tokenizer.cls_token_id)
# p_mask: mask with 1 for token than cannot be in the answer (0 for token which can be in an answer)
# Original TF implementation also keep the classification token (set to 0)
p_mask = np.ones_like(span["token_type_ids"])
if tokenizer.padding_side == "right":
p_mask[len(truncated_query) + sequence_added_tokens :] = 0
else:
p_mask[-len(span["tokens"]) : -(len(truncated_query) + sequence_added_tokens)] = 0
pad_token_indices = np.where(span["input_ids"] == tokenizer.pad_token_id)
special_token_indices = np.asarray(
tokenizer.get_special_tokens_mask(span["input_ids"], already_has_special_tokens=True)
).nonzero()
p_mask[pad_token_indices] = 1
p_mask[special_token_indices] = 1
# Set the cls index to 0: the CLS index can be used for impossible answers
p_mask[cls_index] = 0
span_is_impossible = example.is_impossible
start_position = 0
end_position = 0
if is_training and not span_is_impossible:
# For training, if our document chunk does not contain an annotation
# we throw it out, since there is nothing to predict.
doc_start = span["start"]
doc_end = span["start"] + span["length"] - 1
out_of_span = False
if not (tok_start_position >= doc_start and tok_end_position <= doc_end):
out_of_span = True
if out_of_span:
start_position = cls_index
end_position = cls_index
span_is_impossible = True
else:
if tokenizer.padding_side == "left":
doc_offset = 0
else:
doc_offset = len(truncated_query) + sequence_added_tokens
start_position = tok_start_position - doc_start + doc_offset
end_position = tok_end_position - doc_start + doc_offset
features.append(
SquadFeatures(
span["input_ids"],
span["attention_mask"],
span["token_type_ids"],
cls_index,
p_mask.tolist(),
example_index=0, # Can not set unique_id and example_index here. They will be set after multiple processing.
unique_id=0,
paragraph_len=span["paragraph_len"],
token_is_max_context=span["token_is_max_context"],
tokens=span["tokens"],
token_to_orig_map=span["token_to_orig_map"],
start_position=start_position,
end_position=end_position,
is_impossible=span_is_impossible,
qas_id=example.qas_id,
)
)
return features
def squad_convert_example_to_features_init(tokenizer_for_convert: PreTrainedTokenizerBase):
global tokenizer
tokenizer = tokenizer_for_convert
def squad_convert_examples_to_features(
examples,
tokenizer,
max_seq_length,
doc_stride,
max_query_length,
is_training,
padding_strategy="max_length",
return_dataset=False,
threads=1,
tqdm_enabled=True,
):
"""
Converts a list of examples into a list of features that can be directly given as input to a model. It is
model-dependant and takes advantage of many of the tokenizer's features to create the model's inputs.
Args:
examples: list of [`~data.processors.squad.SquadExample`]
tokenizer: an instance of a child of [`PreTrainedTokenizer`]
max_seq_length: The maximum sequence length of the inputs.
doc_stride: The stride used when the context is too large and is split across several features.
max_query_length: The maximum length of the query.
is_training: whether to create features for model evaluation or model training.
padding_strategy: Default to "max_length". Which padding strategy to use
return_dataset: Default False. Either 'pt' or 'tf'.
if 'pt': returns a torch.data.TensorDataset, if 'tf': returns a tf.data.Dataset
threads: multiple processing threads.
Returns:
list of [`~data.processors.squad.SquadFeatures`]
Example:
```python
processor = SquadV2Processor()
examples = processor.get_dev_examples(data_dir)
features = squad_convert_examples_to_features(
examples=examples,
tokenizer=tokenizer,
max_seq_length=args.max_seq_length,
doc_stride=args.doc_stride,
max_query_length=args.max_query_length,
is_training=not evaluate,
)
```"""
# Defining helper methods
features = []
threads = min(threads, cpu_count())
with Pool(threads, initializer=squad_convert_example_to_features_init, initargs=(tokenizer,)) as p:
annotate_ = partial(
squad_convert_example_to_features,
max_seq_length=max_seq_length,
doc_stride=doc_stride,
max_query_length=max_query_length,
padding_strategy=padding_strategy,
is_training=is_training,
)
features = list(
tqdm(
p.imap(annotate_, examples, chunksize=32),
total=len(examples),
desc="convert squad examples to features",
disable=not tqdm_enabled,
)
)
new_features = []
unique_id = 1000000000
example_index = 0
for example_features in tqdm(
features, total=len(features), desc="add example index and unique id", disable=not tqdm_enabled
):
if not example_features:
continue
for example_feature in example_features:
example_feature.example_index = example_index
example_feature.unique_id = unique_id
new_features.append(example_feature)
unique_id += 1
example_index += 1
features = new_features
del new_features
if return_dataset == "pt":
if not is_torch_available():
raise RuntimeError("PyTorch must be installed to return a PyTorch dataset.")
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_masks = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
all_cls_index = torch.tensor([f.cls_index for f in features], dtype=torch.long)
all_p_mask = torch.tensor([f.p_mask for f in features], dtype=torch.float)
all_is_impossible = torch.tensor([f.is_impossible for f in features], dtype=torch.float)
if not is_training:
all_feature_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
dataset = TensorDataset(
all_input_ids, all_attention_masks, all_token_type_ids, all_feature_index, all_cls_index, all_p_mask
)
else:
all_start_positions = torch.tensor([f.start_position for f in features], dtype=torch.long)
all_end_positions = torch.tensor([f.end_position for f in features], dtype=torch.long)
dataset = TensorDataset(
all_input_ids,
all_attention_masks,
all_token_type_ids,
all_start_positions,
all_end_positions,
all_cls_index,
all_p_mask,
all_is_impossible,
)
return features, dataset
elif return_dataset == "tf":
if not is_tf_available():
raise RuntimeError("TensorFlow must be installed to return a TensorFlow dataset.")
def gen():
for i, ex in enumerate(features):
if ex.token_type_ids is None:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"feature_index": i,
"qas_id": ex.qas_id,
},
{
"start_positions": ex.start_position,
"end_positions": ex.end_position,
"cls_index": ex.cls_index,
"p_mask": ex.p_mask,
"is_impossible": ex.is_impossible,
},
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
"feature_index": i,
"qas_id": ex.qas_id,
},
{
"start_positions": ex.start_position,
"end_positions": ex.end_position,
"cls_index": ex.cls_index,
"p_mask": ex.p_mask,
"is_impossible": ex.is_impossible,
},
)
# Why have we split the batch into a tuple? PyTorch just has a list of tensors.
if "token_type_ids" in tokenizer.model_input_names:
train_types = (
{
"input_ids": tf.int32,
"attention_mask": tf.int32,
"token_type_ids": tf.int32,
"feature_index": tf.int64,
"qas_id": tf.string,
},
{
"start_positions": tf.int64,
"end_positions": tf.int64,
"cls_index": tf.int64,
"p_mask": tf.int32,
"is_impossible": tf.int32,
},
)
train_shapes = (
{
"input_ids": tf.TensorShape([None]),
"attention_mask": tf.TensorShape([None]),
"token_type_ids": tf.TensorShape([None]),
"feature_index": tf.TensorShape([]),
"qas_id": tf.TensorShape([]),
},
{
"start_positions": tf.TensorShape([]),
"end_positions": tf.TensorShape([]),
"cls_index": tf.TensorShape([]),
"p_mask": tf.TensorShape([None]),
"is_impossible": tf.TensorShape([]),
},
)
else:
train_types = (
{"input_ids": tf.int32, "attention_mask": tf.int32, "feature_index": tf.int64, "qas_id": tf.string},
{
"start_positions": tf.int64,
"end_positions": tf.int64,
"cls_index": tf.int64,
"p_mask": tf.int32,
"is_impossible": tf.int32,
},
)
train_shapes = (
{
"input_ids": tf.TensorShape([None]),
"attention_mask": tf.TensorShape([None]),
"feature_index": tf.TensorShape([]),
"qas_id": tf.TensorShape([]),
},
{
"start_positions": tf.TensorShape([]),
"end_positions": tf.TensorShape([]),
"cls_index": tf.TensorShape([]),
"p_mask": tf.TensorShape([None]),
"is_impossible": tf.TensorShape([]),
},
)
return tf.data.Dataset.from_generator(gen, train_types, train_shapes)
else:
return features
class SquadProcessor(DataProcessor):
"""
Processor for the SQuAD data set. overridden by SquadV1Processor and SquadV2Processor, used by the version 1.1 and
version 2.0 of SQuAD, respectively.
"""
train_file = None
dev_file = None
def _get_example_from_tensor_dict(self, tensor_dict, evaluate=False):
if not evaluate:
answer = tensor_dict["answers"]["text"][0].numpy().decode("utf-8")
answer_start = tensor_dict["answers"]["answer_start"][0].numpy()
answers = []
else:
answers = [
{"answer_start": start.numpy(), "text": text.numpy().decode("utf-8")}
for start, text in zip(tensor_dict["answers"]["answer_start"], tensor_dict["answers"]["text"])
]
answer = None
answer_start = None
return SquadExample(
qas_id=tensor_dict["id"].numpy().decode("utf-8"),
question_text=tensor_dict["question"].numpy().decode("utf-8"),
context_text=tensor_dict["context"].numpy().decode("utf-8"),
answer_text=answer,
start_position_character=answer_start,
title=tensor_dict["title"].numpy().decode("utf-8"),
answers=answers,
)
def get_examples_from_dataset(self, dataset, evaluate=False):
"""
Creates a list of [`~data.processors.squad.SquadExample`] using a TFDS dataset.
Args:
dataset: The tfds dataset loaded from *tensorflow_datasets.load("squad")*
evaluate: Boolean specifying if in evaluation mode or in training mode
Returns:
List of SquadExample
Examples:
```python
>>> import tensorflow_datasets as tfds
>>> dataset = tfds.load("squad")
>>> training_examples = get_examples_from_dataset(dataset, evaluate=False)
>>> evaluation_examples = get_examples_from_dataset(dataset, evaluate=True)
```"""
if evaluate:
dataset = dataset["validation"]
else:
dataset = dataset["train"]
examples = []
for tensor_dict in tqdm(dataset):
examples.append(self._get_example_from_tensor_dict(tensor_dict, evaluate=evaluate))
return examples
def get_train_examples(self, data_dir, filename=None):
"""
Returns the training examples from the data directory.
Args:
data_dir: Directory containing the data files used for training and evaluating.
filename: None by default, specify this if the training file has a different name than the original one
which is `train-v1.1.json` and `train-v2.0.json` for squad versions 1.1 and 2.0 respectively.
"""
if data_dir is None:
data_dir = ""
if self.train_file is None:
raise ValueError("SquadProcessor should be instantiated via SquadV1Processor or SquadV2Processor")
with open(
os.path.join(data_dir, self.train_file if filename is None else filename), "r", encoding="utf-8"
) as reader:
input_data = json.load(reader)["data"]
return self._create_examples(input_data, "train")
def get_dev_examples(self, data_dir, filename=None):
"""
Returns the evaluation example from the data directory.
Args:
data_dir: Directory containing the data files used for training and evaluating.
filename: None by default, specify this if the evaluation file has a different name than the original one
which is `dev-v1.1.json` and `dev-v2.0.json` for squad versions 1.1 and 2.0 respectively.
"""
if data_dir is None:
data_dir = ""
if self.dev_file is None:
raise ValueError("SquadProcessor should be instantiated via SquadV1Processor or SquadV2Processor")
with open(
os.path.join(data_dir, self.dev_file if filename is None else filename), "r", encoding="utf-8"
) as reader:
input_data = json.load(reader)["data"]
return self._create_examples(input_data, "dev")
def _create_examples(self, input_data, set_type):
is_training = set_type == "train"
examples = []
for entry in tqdm(input_data):
title = entry["title"]
for paragraph in entry["paragraphs"]:
context_text = paragraph["context"]
for qa in paragraph["qas"]:
qas_id = qa["id"]
question_text = qa["question"]
start_position_character = None
answer_text = None
answers = []
is_impossible = qa.get("is_impossible", False)
if not is_impossible:
if is_training:
answer = qa["answers"][0]
answer_text = answer["text"]
start_position_character = answer["answer_start"]
else:
answers = qa["answers"]
example = SquadExample(
qas_id=qas_id,
question_text=question_text,
context_text=context_text,
answer_text=answer_text,
start_position_character=start_position_character,
title=title,
is_impossible=is_impossible,
answers=answers,
)
examples.append(example)
return examples
class SquadV1Processor(SquadProcessor):
train_file = "train-v1.1.json"
dev_file = "dev-v1.1.json"
class SquadV2Processor(SquadProcessor):
train_file = "train-v2.0.json"
dev_file = "dev-v2.0.json"
class SquadExample:
"""
A single training/test example for the Squad dataset, as loaded from disk.
Args:
qas_id: The example's unique identifier
question_text: The question string
context_text: The context string
answer_text: The answer string
start_position_character: The character position of the start of the answer
title: The title of the example
answers: None by default, this is used during evaluation. Holds answers as well as their start positions.
is_impossible: False by default, set to True if the example has no possible answer.
"""
def __init__(
self,
qas_id,
question_text,
context_text,
answer_text,
start_position_character,
title,
answers=[],
is_impossible=False,
):
self.qas_id = qas_id
self.question_text = question_text
self.context_text = context_text
self.answer_text = answer_text
self.title = title
self.is_impossible = is_impossible
self.answers = answers
self.start_position, self.end_position = 0, 0
doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
# Split on whitespace so that different tokens may be attributed to their original position.
for c in self.context_text:
if _is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens) - 1)
self.doc_tokens = doc_tokens
self.char_to_word_offset = char_to_word_offset
# Start and end positions only has a value during evaluation.
if start_position_character is not None and not is_impossible:
self.start_position = char_to_word_offset[start_position_character]
self.end_position = char_to_word_offset[
min(start_position_character + len(answer_text) - 1, len(char_to_word_offset) - 1)
]
class SquadFeatures:
"""
Single squad example features to be fed to a model. Those features are model-specific and can be crafted from
[`~data.processors.squad.SquadExample`] using the
:method:*~transformers.data.processors.squad.squad_convert_examples_to_features* method.
Args:
input_ids: Indices of input sequence tokens in the vocabulary.
attention_mask: Mask to avoid performing attention on padding token indices.
token_type_ids: Segment token indices to indicate first and second portions of the inputs.
cls_index: the index of the CLS token.
p_mask: Mask identifying tokens that can be answers vs. tokens that cannot.
Mask with 1 for tokens than cannot be in the answer and 0 for token that can be in an answer
example_index: the index of the example
unique_id: The unique Feature identifier
paragraph_len: The length of the context
token_is_max_context:
List of booleans identifying which tokens have their maximum context in this feature object. If a token
does not have their maximum context in this feature object, it means that another feature object has more
information related to that token and should be prioritized over this feature for that token.
tokens: list of tokens corresponding to the input ids
token_to_orig_map: mapping between the tokens and the original text, needed in order to identify the answer.
start_position: start of the answer token index
end_position: end of the answer token index
encoding: optionally store the BatchEncoding with the fast-tokenizer alignment methods.
"""
def __init__(
self,
input_ids,
attention_mask,
token_type_ids,
cls_index,
p_mask,
example_index,
unique_id,
paragraph_len,
token_is_max_context,
tokens,
token_to_orig_map,
start_position,
end_position,
is_impossible,
qas_id: str = None,
encoding: BatchEncoding = None,
):
self.input_ids = input_ids
self.attention_mask = attention_mask
self.token_type_ids = token_type_ids
self.cls_index = cls_index
self.p_mask = p_mask
self.example_index = example_index
self.unique_id = unique_id
self.paragraph_len = paragraph_len
self.token_is_max_context = token_is_max_context
self.tokens = tokens
self.token_to_orig_map = token_to_orig_map
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
self.qas_id = qas_id
self.encoding = encoding
class SquadResult:
"""
Constructs a SquadResult which can be used to evaluate a model's output on the SQuAD dataset.
Args:
unique_id: The unique identifier corresponding to that example.
start_logits: The logits corresponding to the start of the answer
end_logits: The logits corresponding to the end of the answer
"""
def __init__(self, unique_id, start_logits, end_logits, start_top_index=None, end_top_index=None, cls_logits=None):
self.start_logits = start_logits
self.end_logits = end_logits
self.unique_id = unique_id
if start_top_index:
self.start_top_index = start_top_index
self.end_top_index = end_top_index
self.cls_logits = cls_logits
|
27182812/ChatGLM-LLaMA-chinese-insturct | 23,216 | src/transformers/data/processors/glue.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" GLUE processors and helpers"""
import os
import warnings
from dataclasses import asdict
from enum import Enum
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_tf_available, logging
from .utils import DataProcessor, InputExample, InputFeatures
if is_tf_available():
import tensorflow as tf
logger = logging.get_logger(__name__)
DEPRECATION_WARNING = (
"This {0} will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets "
"library. You can have a look at this example script for pointers: "
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py"
)
def glue_convert_examples_to_features(
examples: Union[List[InputExample], "tf.data.Dataset"],
tokenizer: PreTrainedTokenizer,
max_length: Optional[int] = None,
task=None,
label_list=None,
output_mode=None,
):
"""
Loads a data file into a list of `InputFeatures`
Args:
examples: List of `InputExamples` or `tf.data.Dataset` containing the examples.
tokenizer: Instance of a tokenizer that will tokenize the examples
max_length: Maximum example length. Defaults to the tokenizer's max_len
task: GLUE task
label_list: List of labels. Can be obtained from the processor using the `processor.get_labels()` method
output_mode: String indicating the output mode. Either `regression` or `classification`
Returns:
If the `examples` input is a `tf.data.Dataset`, will return a `tf.data.Dataset` containing the task-specific
features. If the input is a list of `InputExamples`, will return a list of task-specific `InputFeatures` which
can be fed to the model.
"""
warnings.warn(DEPRECATION_WARNING.format("function"), FutureWarning)
if is_tf_available() and isinstance(examples, tf.data.Dataset):
if task is None:
raise ValueError("When calling glue_convert_examples_to_features from TF, the task parameter is required.")
return _tf_glue_convert_examples_to_features(examples, tokenizer, max_length=max_length, task=task)
return _glue_convert_examples_to_features(
examples, tokenizer, max_length=max_length, task=task, label_list=label_list, output_mode=output_mode
)
if is_tf_available():
def _tf_glue_convert_examples_to_features(
examples: tf.data.Dataset,
tokenizer: PreTrainedTokenizer,
task=str,
max_length: Optional[int] = None,
) -> tf.data.Dataset:
"""
Returns:
A `tf.data.Dataset` containing the task-specific features.
"""
processor = glue_processors[task]()
examples = [processor.tfds_map(processor.get_example_from_tensor_dict(example)) for example in examples]
features = glue_convert_examples_to_features(examples, tokenizer, max_length=max_length, task=task)
label_type = tf.float32 if task == "sts-b" else tf.int64
def gen():
for ex in features:
d = {k: v for k, v in asdict(ex).items() if v is not None}
label = d.pop("label")
yield (d, label)
input_names = tokenizer.model_input_names
return tf.data.Dataset.from_generator(
gen,
({k: tf.int32 for k in input_names}, label_type),
({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])),
)
def _glue_convert_examples_to_features(
examples: List[InputExample],
tokenizer: PreTrainedTokenizer,
max_length: Optional[int] = None,
task=None,
label_list=None,
output_mode=None,
):
if max_length is None:
max_length = tokenizer.model_max_length
if task is not None:
processor = glue_processors[task]()
if label_list is None:
label_list = processor.get_labels()
logger.info(f"Using label list {label_list} for task {task}")
if output_mode is None:
output_mode = glue_output_modes[task]
logger.info(f"Using output mode {output_mode} for task {task}")
label_map = {label: i for i, label in enumerate(label_list)}
def label_from_example(example: InputExample) -> Union[int, float, None]:
if example.label is None:
return None
if output_mode == "classification":
return label_map[example.label]
elif output_mode == "regression":
return float(example.label)
raise KeyError(output_mode)
labels = [label_from_example(example) for example in examples]
batch_encoding = tokenizer(
[(example.text_a, example.text_b) for example in examples],
max_length=max_length,
padding="max_length",
truncation=True,
)
features = []
for i in range(len(examples)):
inputs = {k: batch_encoding[k][i] for k in batch_encoding}
feature = InputFeatures(**inputs, label=labels[i])
features.append(feature)
for i, example in enumerate(examples[:5]):
logger.info("*** Example ***")
logger.info(f"guid: {example.guid}")
logger.info(f"features: {features[i]}")
return features
class OutputMode(Enum):
classification = "classification"
regression = "regression"
class MrpcProcessor(DataProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning)
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(
tensor_dict["idx"].numpy(),
tensor_dict["sentence1"].numpy().decode("utf-8"),
tensor_dict["sentence2"].numpy().decode("utf-8"),
str(tensor_dict["label"].numpy()),
)
def get_train_examples(self, data_dir):
"""See base class."""
logger.info(f"LOOKING AT {os.path.join(data_dir, 'train.tsv')}")
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training, dev and test sets."""
examples = []
for i, line in enumerate(lines):
if i == 0:
continue
guid = f"{set_type}-{i}"
text_a = line[3]
text_b = line[4]
label = None if set_type == "test" else line[0]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class MnliProcessor(DataProcessor):
"""Processor for the MultiNLI data set (GLUE version)."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning)
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(
tensor_dict["idx"].numpy(),
tensor_dict["premise"].numpy().decode("utf-8"),
tensor_dict["hypothesis"].numpy().decode("utf-8"),
str(tensor_dict["label"].numpy()),
)
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")), "dev_matched")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "test_matched.tsv")), "test_matched")
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training, dev and test sets."""
examples = []
for i, line in enumerate(lines):
if i == 0:
continue
guid = f"{set_type}-{line[0]}"
text_a = line[8]
text_b = line[9]
label = None if set_type.startswith("test") else line[-1]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class MnliMismatchedProcessor(MnliProcessor):
"""Processor for the MultiNLI Mismatched data set (GLUE version)."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning)
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev_mismatched.tsv")), "dev_mismatched")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "test_mismatched.tsv")), "test_mismatched")
class ColaProcessor(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning)
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(
tensor_dict["idx"].numpy(),
tensor_dict["sentence"].numpy().decode("utf-8"),
None,
str(tensor_dict["label"].numpy()),
)
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training, dev and test sets."""
test_mode = set_type == "test"
if test_mode:
lines = lines[1:]
text_index = 1 if test_mode else 3
examples = []
for i, line in enumerate(lines):
guid = f"{set_type}-{i}"
text_a = line[text_index]
label = None if test_mode else line[1]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class Sst2Processor(DataProcessor):
"""Processor for the SST-2 data set (GLUE version)."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning)
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(
tensor_dict["idx"].numpy(),
tensor_dict["sentence"].numpy().decode("utf-8"),
None,
str(tensor_dict["label"].numpy()),
)
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training, dev and test sets."""
examples = []
text_index = 1 if set_type == "test" else 0
for i, line in enumerate(lines):
if i == 0:
continue
guid = f"{set_type}-{i}"
text_a = line[text_index]
label = None if set_type == "test" else line[1]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class StsbProcessor(DataProcessor):
"""Processor for the STS-B data set (GLUE version)."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning)
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(
tensor_dict["idx"].numpy(),
tensor_dict["sentence1"].numpy().decode("utf-8"),
tensor_dict["sentence2"].numpy().decode("utf-8"),
str(tensor_dict["label"].numpy()),
)
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return [None]
def _create_examples(self, lines, set_type):
"""Creates examples for the training, dev and test sets."""
examples = []
for i, line in enumerate(lines):
if i == 0:
continue
guid = f"{set_type}-{line[0]}"
text_a = line[7]
text_b = line[8]
label = None if set_type == "test" else line[-1]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class QqpProcessor(DataProcessor):
"""Processor for the QQP data set (GLUE version)."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning)
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(
tensor_dict["idx"].numpy(),
tensor_dict["question1"].numpy().decode("utf-8"),
tensor_dict["question2"].numpy().decode("utf-8"),
str(tensor_dict["label"].numpy()),
)
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training, dev and test sets."""
test_mode = set_type == "test"
q1_index = 1 if test_mode else 3
q2_index = 2 if test_mode else 4
examples = []
for i, line in enumerate(lines):
if i == 0:
continue
guid = f"{set_type}-{line[0]}"
try:
text_a = line[q1_index]
text_b = line[q2_index]
label = None if test_mode else line[5]
except IndexError:
continue
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class QnliProcessor(DataProcessor):
"""Processor for the QNLI data set (GLUE version)."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning)
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(
tensor_dict["idx"].numpy(),
tensor_dict["question"].numpy().decode("utf-8"),
tensor_dict["sentence"].numpy().decode("utf-8"),
str(tensor_dict["label"].numpy()),
)
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["entailment", "not_entailment"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training, dev and test sets."""
examples = []
for i, line in enumerate(lines):
if i == 0:
continue
guid = f"{set_type}-{line[0]}"
text_a = line[1]
text_b = line[2]
label = None if set_type == "test" else line[-1]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class RteProcessor(DataProcessor):
"""Processor for the RTE data set (GLUE version)."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning)
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(
tensor_dict["idx"].numpy(),
tensor_dict["sentence1"].numpy().decode("utf-8"),
tensor_dict["sentence2"].numpy().decode("utf-8"),
str(tensor_dict["label"].numpy()),
)
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["entailment", "not_entailment"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training, dev and test sets."""
examples = []
for i, line in enumerate(lines):
if i == 0:
continue
guid = f"{set_type}-{line[0]}"
text_a = line[1]
text_b = line[2]
label = None if set_type == "test" else line[-1]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class WnliProcessor(DataProcessor):
"""Processor for the WNLI data set (GLUE version)."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning)
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(
tensor_dict["idx"].numpy(),
tensor_dict["sentence1"].numpy().decode("utf-8"),
tensor_dict["sentence2"].numpy().decode("utf-8"),
str(tensor_dict["label"].numpy()),
)
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training, dev and test sets."""
examples = []
for i, line in enumerate(lines):
if i == 0:
continue
guid = f"{set_type}-{line[0]}"
text_a = line[1]
text_b = line[2]
label = None if set_type == "test" else line[-1]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
glue_tasks_num_labels = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
glue_processors = {
"cola": ColaProcessor,
"mnli": MnliProcessor,
"mnli-mm": MnliMismatchedProcessor,
"mrpc": MrpcProcessor,
"sst-2": Sst2Processor,
"sts-b": StsbProcessor,
"qqp": QqpProcessor,
"qnli": QnliProcessor,
"rte": RteProcessor,
"wnli": WnliProcessor,
}
glue_output_modes = {
"cola": "classification",
"mnli": "classification",
"mnli-mm": "classification",
"mrpc": "classification",
"sst-2": "classification",
"sts-b": "regression",
"qqp": "classification",
"qnli": "classification",
"rte": "classification",
"wnli": "classification",
}
|
27182812/ChatGLM-LLaMA-chinese-insturct | 13,829 | src/transformers/data/processors/utils.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import dataclasses
import json
from dataclasses import dataclass
from typing import List, Optional, Union
from ...utils import is_tf_available, is_torch_available, logging
logger = logging.get_logger(__name__)
@dataclass
class InputExample:
"""
A single training/test example for simple sequence classification.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
guid: str
text_a: str
text_b: Optional[str] = None
label: Optional[str] = None
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(dataclasses.asdict(self), indent=2) + "\n"
@dataclass(frozen=True)
class InputFeatures:
"""
A single set of features of data. Property names are the same names as the corresponding inputs to a model.
Args:
input_ids: Indices of input sequence tokens in the vocabulary.
attention_mask: Mask to avoid performing attention on padding token indices.
Mask values selected in `[0, 1]`: Usually `1` for tokens that are NOT MASKED, `0` for MASKED (padded)
tokens.
token_type_ids: (Optional) Segment token indices to indicate first and second
portions of the inputs. Only some models use them.
label: (Optional) Label corresponding to the input. Int for classification problems,
float for regression problems.
"""
input_ids: List[int]
attention_mask: Optional[List[int]] = None
token_type_ids: Optional[List[int]] = None
label: Optional[Union[int, float]] = None
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(dataclasses.asdict(self)) + "\n"
class DataProcessor:
"""Base class for data converters for sequence classification data sets."""
def get_example_from_tensor_dict(self, tensor_dict):
"""
Gets an example from a dict with tensorflow tensors.
Args:
tensor_dict: Keys and values should match the corresponding Glue
tensorflow_dataset examples.
"""
raise NotImplementedError()
def get_train_examples(self, data_dir):
"""Gets a collection of [`InputExample`] for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of [`InputExample`] for the dev set."""
raise NotImplementedError()
def get_test_examples(self, data_dir):
"""Gets a collection of [`InputExample`] for the test set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
def tfds_map(self, example):
"""
Some tensorflow_datasets datasets are not formatted the same way the GLUE datasets are. This method converts
examples to the correct format.
"""
if len(self.get_labels()) > 1:
example.label = self.get_labels()[int(example.label)]
return example
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r", encoding="utf-8-sig") as f:
return list(csv.reader(f, delimiter="\t", quotechar=quotechar))
class SingleSentenceClassificationProcessor(DataProcessor):
"""Generic processor for a single sentence classification data set."""
def __init__(self, labels=None, examples=None, mode="classification", verbose=False):
self.labels = [] if labels is None else labels
self.examples = [] if examples is None else examples
self.mode = mode
self.verbose = verbose
def __len__(self):
return len(self.examples)
def __getitem__(self, idx):
if isinstance(idx, slice):
return SingleSentenceClassificationProcessor(labels=self.labels, examples=self.examples[idx])
return self.examples[idx]
@classmethod
def create_from_csv(
cls, file_name, split_name="", column_label=0, column_text=1, column_id=None, skip_first_row=False, **kwargs
):
processor = cls(**kwargs)
processor.add_examples_from_csv(
file_name,
split_name=split_name,
column_label=column_label,
column_text=column_text,
column_id=column_id,
skip_first_row=skip_first_row,
overwrite_labels=True,
overwrite_examples=True,
)
return processor
@classmethod
def create_from_examples(cls, texts_or_text_and_labels, labels=None, **kwargs):
processor = cls(**kwargs)
processor.add_examples(texts_or_text_and_labels, labels=labels)
return processor
def add_examples_from_csv(
self,
file_name,
split_name="",
column_label=0,
column_text=1,
column_id=None,
skip_first_row=False,
overwrite_labels=False,
overwrite_examples=False,
):
lines = self._read_tsv(file_name)
if skip_first_row:
lines = lines[1:]
texts = []
labels = []
ids = []
for i, line in enumerate(lines):
texts.append(line[column_text])
labels.append(line[column_label])
if column_id is not None:
ids.append(line[column_id])
else:
guid = f"{split_name}-{i}" if split_name else str(i)
ids.append(guid)
return self.add_examples(
texts, labels, ids, overwrite_labels=overwrite_labels, overwrite_examples=overwrite_examples
)
def add_examples(
self, texts_or_text_and_labels, labels=None, ids=None, overwrite_labels=False, overwrite_examples=False
):
if labels is not None and len(texts_or_text_and_labels) != len(labels):
raise ValueError(
f"Text and labels have mismatched lengths {len(texts_or_text_and_labels)} and {len(labels)}"
)
if ids is not None and len(texts_or_text_and_labels) != len(ids):
raise ValueError(f"Text and ids have mismatched lengths {len(texts_or_text_and_labels)} and {len(ids)}")
if ids is None:
ids = [None] * len(texts_or_text_and_labels)
if labels is None:
labels = [None] * len(texts_or_text_and_labels)
examples = []
added_labels = set()
for text_or_text_and_label, label, guid in zip(texts_or_text_and_labels, labels, ids):
if isinstance(text_or_text_and_label, (tuple, list)) and label is None:
text, label = text_or_text_and_label
else:
text = text_or_text_and_label
added_labels.add(label)
examples.append(InputExample(guid=guid, text_a=text, text_b=None, label=label))
# Update examples
if overwrite_examples:
self.examples = examples
else:
self.examples.extend(examples)
# Update labels
if overwrite_labels:
self.labels = list(added_labels)
else:
self.labels = list(set(self.labels).union(added_labels))
return self.examples
def get_features(
self,
tokenizer,
max_length=None,
pad_on_left=False,
pad_token=0,
mask_padding_with_zero=True,
return_tensors=None,
):
"""
Convert examples in a list of `InputFeatures`
Args:
tokenizer: Instance of a tokenizer that will tokenize the examples
max_length: Maximum example length
pad_on_left: If set to `True`, the examples will be padded on the left rather than on the right (default)
pad_token: Padding token
mask_padding_with_zero: If set to `True`, the attention mask will be filled by `1` for actual values
and by `0` for padded values. If set to `False`, inverts it (`1` for padded values, `0` for actual
values)
Returns:
If the `examples` input is a `tf.data.Dataset`, will return a `tf.data.Dataset` containing the
task-specific features. If the input is a list of `InputExamples`, will return a list of task-specific
`InputFeatures` which can be fed to the model.
"""
if max_length is None:
max_length = tokenizer.max_len
label_map = {label: i for i, label in enumerate(self.labels)}
all_input_ids = []
for ex_index, example in enumerate(self.examples):
if ex_index % 10000 == 0:
logger.info(f"Tokenizing example {ex_index}")
input_ids = tokenizer.encode(
example.text_a,
add_special_tokens=True,
max_length=min(max_length, tokenizer.max_len),
)
all_input_ids.append(input_ids)
batch_length = max(len(input_ids) for input_ids in all_input_ids)
features = []
for ex_index, (input_ids, example) in enumerate(zip(all_input_ids, self.examples)):
if ex_index % 10000 == 0:
logger.info(f"Writing example {ex_index}/{len(self.examples)}")
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = batch_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
attention_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + attention_mask
else:
input_ids = input_ids + ([pad_token] * padding_length)
attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
if len(input_ids) != batch_length:
raise ValueError(f"Error with input length {len(input_ids)} vs {batch_length}")
if len(attention_mask) != batch_length:
raise ValueError(f"Error with input length {len(attention_mask)} vs {batch_length}")
if self.mode == "classification":
label = label_map[example.label]
elif self.mode == "regression":
label = float(example.label)
else:
raise ValueError(self.mode)
if ex_index < 5 and self.verbose:
logger.info("*** Example ***")
logger.info(f"guid: {example.guid}")
logger.info(f"input_ids: {' '.join([str(x) for x in input_ids])}")
logger.info(f"attention_mask: {' '.join([str(x) for x in attention_mask])}")
logger.info(f"label: {example.label} (id = {label})")
features.append(InputFeatures(input_ids=input_ids, attention_mask=attention_mask, label=label))
if return_tensors is None:
return features
elif return_tensors == "tf":
if not is_tf_available():
raise RuntimeError("return_tensors set to 'tf' but TensorFlow 2.0 can't be imported")
import tensorflow as tf
def gen():
for ex in features:
yield ({"input_ids": ex.input_ids, "attention_mask": ex.attention_mask}, ex.label)
dataset = tf.data.Dataset.from_generator(
gen,
({"input_ids": tf.int32, "attention_mask": tf.int32}, tf.int64),
({"input_ids": tf.TensorShape([None]), "attention_mask": tf.TensorShape([None])}, tf.TensorShape([])),
)
return dataset
elif return_tensors == "pt":
if not is_torch_available():
raise RuntimeError("return_tensors set to 'pt' but PyTorch can't be imported")
import torch
from torch.utils.data import TensorDataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
if self.mode == "classification":
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
elif self.mode == "regression":
all_labels = torch.tensor([f.label for f in features], dtype=torch.float)
dataset = TensorDataset(all_input_ids, all_attention_mask, all_labels)
return dataset
else:
raise ValueError("return_tensors should be one of 'tf' or 'pt'")
|
27182812/ChatGLM-LLaMA-chinese-insturct | 3,489 | src/transformers/data/processors/xnli.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" XNLI utils (dataset loading and evaluation)"""
import os
from ...utils import logging
from .utils import DataProcessor, InputExample
logger = logging.get_logger(__name__)
class XnliProcessor(DataProcessor):
"""
Processor for the XNLI dataset. Adapted from
https://github.com/google-research/bert/blob/f39e881b169b9d53bea03d2d341b31707a6c052b/run_classifier.py#L207
"""
def __init__(self, language, train_language=None):
self.language = language
self.train_language = train_language
def get_train_examples(self, data_dir):
"""See base class."""
lg = self.language if self.train_language is None else self.train_language
lines = self._read_tsv(os.path.join(data_dir, f"XNLI-MT-1.0/multinli/multinli.train.{lg}.tsv"))
examples = []
for i, line in enumerate(lines):
if i == 0:
continue
guid = f"train-{i}"
text_a = line[0]
text_b = line[1]
label = "contradiction" if line[2] == "contradictory" else line[2]
if not isinstance(text_a, str):
raise ValueError(f"Training input {text_a} is not a string")
if not isinstance(text_b, str):
raise ValueError(f"Training input {text_b} is not a string")
if not isinstance(label, str):
raise ValueError(f"Training label {label} is not a string")
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_test_examples(self, data_dir):
"""See base class."""
lines = self._read_tsv(os.path.join(data_dir, "XNLI-1.0/xnli.test.tsv"))
examples = []
for i, line in enumerate(lines):
if i == 0:
continue
language = line[0]
if language != self.language:
continue
guid = f"test-{i}"
text_a = line[6]
text_b = line[7]
label = line[1]
if not isinstance(text_a, str):
raise ValueError(f"Training input {text_a} is not a string")
if not isinstance(text_b, str):
raise ValueError(f"Training input {text_b} is not a string")
if not isinstance(label, str):
raise ValueError(f"Training label {label} is not a string")
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
xnli_processors = {
"xnli": XnliProcessor,
}
xnli_output_modes = {
"xnli": "classification",
}
xnli_tasks_num_labels = {
"xnli": 3,
}
|
27182812/ChatGLM-LLaMA-chinese-insturct | 29,698 | src/transformers/data/metrics/squad_metrics.py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Very heavily inspired by the official evaluation script for SQuAD version 2.0 which was modified by XLNet authors to
update `find_best_threshold` scripts for SQuAD V2.0
In addition to basic functionality, we also compute additional statistics and plot precision-recall curves if an
additional na_prob.json file is provided. This file is expected to map question ID's to the model's predicted
probability that a question is unanswerable.
"""
import collections
import json
import math
import re
import string
from ...models.bert import BasicTokenizer
from ...utils import logging
logger = logging.get_logger(__name__)
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
regex = re.compile(r"\b(a|an|the)\b", re.UNICODE)
return re.sub(regex, " ", text)
def white_space_fix(text):
return " ".join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def get_tokens(s):
if not s:
return []
return normalize_answer(s).split()
def compute_exact(a_gold, a_pred):
return int(normalize_answer(a_gold) == normalize_answer(a_pred))
def compute_f1(a_gold, a_pred):
gold_toks = get_tokens(a_gold)
pred_toks = get_tokens(a_pred)
common = collections.Counter(gold_toks) & collections.Counter(pred_toks)
num_same = sum(common.values())
if len(gold_toks) == 0 or len(pred_toks) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks)
if num_same == 0:
return 0
precision = 1.0 * num_same / len(pred_toks)
recall = 1.0 * num_same / len(gold_toks)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def get_raw_scores(examples, preds):
"""
Computes the exact and f1 scores from the examples and the model predictions
"""
exact_scores = {}
f1_scores = {}
for example in examples:
qas_id = example.qas_id
gold_answers = [answer["text"] for answer in example.answers if normalize_answer(answer["text"])]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
gold_answers = [""]
if qas_id not in preds:
print(f"Missing prediction for {qas_id}")
continue
prediction = preds[qas_id]
exact_scores[qas_id] = max(compute_exact(a, prediction) for a in gold_answers)
f1_scores[qas_id] = max(compute_f1(a, prediction) for a in gold_answers)
return exact_scores, f1_scores
def apply_no_ans_threshold(scores, na_probs, qid_to_has_ans, na_prob_thresh):
new_scores = {}
for qid, s in scores.items():
pred_na = na_probs[qid] > na_prob_thresh
if pred_na:
new_scores[qid] = float(not qid_to_has_ans[qid])
else:
new_scores[qid] = s
return new_scores
def make_eval_dict(exact_scores, f1_scores, qid_list=None):
if not qid_list:
total = len(exact_scores)
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values()) / total),
("f1", 100.0 * sum(f1_scores.values()) / total),
("total", total),
]
)
else:
total = len(qid_list)
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list) / total),
("f1", 100.0 * sum(f1_scores[k] for k in qid_list) / total),
("total", total),
]
)
def merge_eval(main_eval, new_eval, prefix):
for k in new_eval:
main_eval[f"{prefix}_{k}"] = new_eval[k]
def find_best_thresh_v2(preds, scores, na_probs, qid_to_has_ans):
num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])
cur_score = num_no_ans
best_score = cur_score
best_thresh = 0.0
qid_list = sorted(na_probs, key=lambda k: na_probs[k])
for i, qid in enumerate(qid_list):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
diff = scores[qid]
else:
if preds[qid]:
diff = -1
else:
diff = 0
cur_score += diff
if cur_score > best_score:
best_score = cur_score
best_thresh = na_probs[qid]
has_ans_score, has_ans_cnt = 0, 0
for qid in qid_list:
if not qid_to_has_ans[qid]:
continue
has_ans_cnt += 1
if qid not in scores:
continue
has_ans_score += scores[qid]
return 100.0 * best_score / len(scores), best_thresh, 1.0 * has_ans_score / has_ans_cnt
def find_all_best_thresh_v2(main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans):
best_exact, exact_thresh, has_ans_exact = find_best_thresh_v2(preds, exact_raw, na_probs, qid_to_has_ans)
best_f1, f1_thresh, has_ans_f1 = find_best_thresh_v2(preds, f1_raw, na_probs, qid_to_has_ans)
main_eval["best_exact"] = best_exact
main_eval["best_exact_thresh"] = exact_thresh
main_eval["best_f1"] = best_f1
main_eval["best_f1_thresh"] = f1_thresh
main_eval["has_ans_exact"] = has_ans_exact
main_eval["has_ans_f1"] = has_ans_f1
def find_best_thresh(preds, scores, na_probs, qid_to_has_ans):
num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])
cur_score = num_no_ans
best_score = cur_score
best_thresh = 0.0
qid_list = sorted(na_probs, key=lambda k: na_probs[k])
for _, qid in enumerate(qid_list):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
diff = scores[qid]
else:
if preds[qid]:
diff = -1
else:
diff = 0
cur_score += diff
if cur_score > best_score:
best_score = cur_score
best_thresh = na_probs[qid]
return 100.0 * best_score / len(scores), best_thresh
def find_all_best_thresh(main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans):
best_exact, exact_thresh = find_best_thresh(preds, exact_raw, na_probs, qid_to_has_ans)
best_f1, f1_thresh = find_best_thresh(preds, f1_raw, na_probs, qid_to_has_ans)
main_eval["best_exact"] = best_exact
main_eval["best_exact_thresh"] = exact_thresh
main_eval["best_f1"] = best_f1
main_eval["best_f1_thresh"] = f1_thresh
def squad_evaluate(examples, preds, no_answer_probs=None, no_answer_probability_threshold=1.0):
qas_id_to_has_answer = {example.qas_id: bool(example.answers) for example in examples}
has_answer_qids = [qas_id for qas_id, has_answer in qas_id_to_has_answer.items() if has_answer]
no_answer_qids = [qas_id for qas_id, has_answer in qas_id_to_has_answer.items() if not has_answer]
if no_answer_probs is None:
no_answer_probs = {k: 0.0 for k in preds}
exact, f1 = get_raw_scores(examples, preds)
exact_threshold = apply_no_ans_threshold(
exact, no_answer_probs, qas_id_to_has_answer, no_answer_probability_threshold
)
f1_threshold = apply_no_ans_threshold(f1, no_answer_probs, qas_id_to_has_answer, no_answer_probability_threshold)
evaluation = make_eval_dict(exact_threshold, f1_threshold)
if has_answer_qids:
has_ans_eval = make_eval_dict(exact_threshold, f1_threshold, qid_list=has_answer_qids)
merge_eval(evaluation, has_ans_eval, "HasAns")
if no_answer_qids:
no_ans_eval = make_eval_dict(exact_threshold, f1_threshold, qid_list=no_answer_qids)
merge_eval(evaluation, no_ans_eval, "NoAns")
if no_answer_probs:
find_all_best_thresh(evaluation, preds, exact, f1, no_answer_probs, qas_id_to_has_answer)
return evaluation
def get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False):
"""Project the tokenized prediction back to the original text."""
# When we created the data, we kept track of the alignment between original
# (whitespace tokenized) tokens and our WordPiece tokenized tokens. So
# now `orig_text` contains the span of our original text corresponding to the
# span that we predicted.
#
# However, `orig_text` may contain extra characters that we don't want in
# our prediction.
#
# For example, let's say:
# pred_text = steve smith
# orig_text = Steve Smith's
#
# We don't want to return `orig_text` because it contains the extra "'s".
#
# We don't want to return `pred_text` because it's already been normalized
# (the SQuAD eval script also does punctuation stripping/lower casing but
# our tokenizer does additional normalization like stripping accent
# characters).
#
# What we really want to return is "Steve Smith".
#
# Therefore, we have to apply a semi-complicated alignment heuristic between
# `pred_text` and `orig_text` to get a character-to-character alignment. This
# can fail in certain cases in which case we just return `orig_text`.
def _strip_spaces(text):
ns_chars = []
ns_to_s_map = collections.OrderedDict()
for i, c in enumerate(text):
if c == " ":
continue
ns_to_s_map[len(ns_chars)] = i
ns_chars.append(c)
ns_text = "".join(ns_chars)
return (ns_text, ns_to_s_map)
# We first tokenize `orig_text`, strip whitespace from the result
# and `pred_text`, and check if they are the same length. If they are
# NOT the same length, the heuristic has failed. If they are the same
# length, we assume the characters are one-to-one aligned.
tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
tok_text = " ".join(tokenizer.tokenize(orig_text))
start_position = tok_text.find(pred_text)
if start_position == -1:
if verbose_logging:
logger.info(f"Unable to find text: '{pred_text}' in '{orig_text}'")
return orig_text
end_position = start_position + len(pred_text) - 1
(orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
(tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
if len(orig_ns_text) != len(tok_ns_text):
if verbose_logging:
logger.info(f"Length not equal after stripping spaces: '{orig_ns_text}' vs '{tok_ns_text}'")
return orig_text
# We then project the characters in `pred_text` back to `orig_text` using
# the character-to-character alignment.
tok_s_to_ns_map = {}
for i, tok_index in tok_ns_to_s_map.items():
tok_s_to_ns_map[tok_index] = i
orig_start_position = None
if start_position in tok_s_to_ns_map:
ns_start_position = tok_s_to_ns_map[start_position]
if ns_start_position in orig_ns_to_s_map:
orig_start_position = orig_ns_to_s_map[ns_start_position]
if orig_start_position is None:
if verbose_logging:
logger.info("Couldn't map start position")
return orig_text
orig_end_position = None
if end_position in tok_s_to_ns_map:
ns_end_position = tok_s_to_ns_map[end_position]
if ns_end_position in orig_ns_to_s_map:
orig_end_position = orig_ns_to_s_map[ns_end_position]
if orig_end_position is None:
if verbose_logging:
logger.info("Couldn't map end position")
return orig_text
output_text = orig_text[orig_start_position : (orig_end_position + 1)]
return output_text
def _get_best_indexes(logits, n_best_size):
"""Get the n-best logits from a list."""
index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
best_indexes = []
for i in range(len(index_and_score)):
if i >= n_best_size:
break
best_indexes.append(index_and_score[i][0])
return best_indexes
def _compute_softmax(scores):
"""Compute softmax probability over raw logits."""
if not scores:
return []
max_score = None
for score in scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
x = math.exp(score - max_score)
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append(score / total_sum)
return probs
def compute_predictions_logits(
all_examples,
all_features,
all_results,
n_best_size,
max_answer_length,
do_lower_case,
output_prediction_file,
output_nbest_file,
output_null_log_odds_file,
verbose_logging,
version_2_with_negative,
null_score_diff_threshold,
tokenizer,
):
"""Write final predictions to the json file and log-odds of null if needed."""
if output_prediction_file:
logger.info(f"Writing predictions to: {output_prediction_file}")
if output_nbest_file:
logger.info(f"Writing nbest to: {output_nbest_file}")
if output_null_log_odds_file and version_2_with_negative:
logger.info(f"Writing null_log_odds to: {output_null_log_odds_file}")
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
"PrelimPrediction", ["feature_index", "start_index", "end_index", "start_logit", "end_logit"]
)
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
scores_diff_json = collections.OrderedDict()
for example_index, example in enumerate(all_examples):
features = example_index_to_features[example_index]
prelim_predictions = []
# keep track of the minimum score of null start+end of position 0
score_null = 1000000 # large and positive
min_null_feature_index = 0 # the paragraph slice with min null score
null_start_logit = 0 # the start logit at the slice with min null score
null_end_logit = 0 # the end logit at the slice with min null score
for feature_index, feature in enumerate(features):
result = unique_id_to_result[feature.unique_id]
start_indexes = _get_best_indexes(result.start_logits, n_best_size)
end_indexes = _get_best_indexes(result.end_logits, n_best_size)
# if we could have irrelevant answers, get the min score of irrelevant
if version_2_with_negative:
feature_null_score = result.start_logits[0] + result.end_logits[0]
if feature_null_score < score_null:
score_null = feature_null_score
min_null_feature_index = feature_index
null_start_logit = result.start_logits[0]
null_end_logit = result.end_logits[0]
for start_index in start_indexes:
for end_index in end_indexes:
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index >= len(feature.tokens):
continue
if end_index >= len(feature.tokens):
continue
if start_index not in feature.token_to_orig_map:
continue
if end_index not in feature.token_to_orig_map:
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_index,
end_index=end_index,
start_logit=result.start_logits[start_index],
end_logit=result.end_logits[end_index],
)
)
if version_2_with_negative:
prelim_predictions.append(
_PrelimPrediction(
feature_index=min_null_feature_index,
start_index=0,
end_index=0,
start_logit=null_start_logit,
end_logit=null_end_logit,
)
)
prelim_predictions = sorted(prelim_predictions, key=lambda x: (x.start_logit + x.end_logit), reverse=True)
_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
"NbestPrediction", ["text", "start_logit", "end_logit"]
)
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
if pred.start_index > 0: # this is a non-null prediction
tok_tokens = feature.tokens[pred.start_index : (pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start : (orig_doc_end + 1)]
tok_text = tokenizer.convert_tokens_to_string(tok_tokens)
# tok_text = " ".join(tok_tokens)
#
# # De-tokenize WordPieces that have been split off.
# tok_text = tok_text.replace(" ##", "")
# tok_text = tok_text.replace("##", "")
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
final_text = get_final_text(tok_text, orig_text, do_lower_case, verbose_logging)
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
else:
final_text = ""
seen_predictions[final_text] = True
nbest.append(_NbestPrediction(text=final_text, start_logit=pred.start_logit, end_logit=pred.end_logit))
# if we didn't include the empty option in the n-best, include it
if version_2_with_negative:
if "" not in seen_predictions:
nbest.append(_NbestPrediction(text="", start_logit=null_start_logit, end_logit=null_end_logit))
# In very rare edge cases we could only have single null prediction.
# So we just create a nonce prediction in this case to avoid failure.
if len(nbest) == 1:
nbest.insert(0, _NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
if len(nbest) < 1:
raise ValueError("No valid predictions")
total_scores = []
best_non_null_entry = None
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit)
if not best_non_null_entry:
if entry.text:
best_non_null_entry = entry
probs = _compute_softmax(total_scores)
nbest_json = []
for i, entry in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_logit"] = entry.start_logit
output["end_logit"] = entry.end_logit
nbest_json.append(output)
if len(nbest_json) < 1:
raise ValueError("No valid predictions")
if not version_2_with_negative:
all_predictions[example.qas_id] = nbest_json[0]["text"]
else:
# predict "" iff the null score - the score of best non-null > threshold
score_diff = score_null - best_non_null_entry.start_logit - (best_non_null_entry.end_logit)
scores_diff_json[example.qas_id] = score_diff
if score_diff > null_score_diff_threshold:
all_predictions[example.qas_id] = ""
else:
all_predictions[example.qas_id] = best_non_null_entry.text
all_nbest_json[example.qas_id] = nbest_json
if output_prediction_file:
with open(output_prediction_file, "w") as writer:
writer.write(json.dumps(all_predictions, indent=4) + "\n")
if output_nbest_file:
with open(output_nbest_file, "w") as writer:
writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
if output_null_log_odds_file and version_2_with_negative:
with open(output_null_log_odds_file, "w") as writer:
writer.write(json.dumps(scores_diff_json, indent=4) + "\n")
return all_predictions
def compute_predictions_log_probs(
all_examples,
all_features,
all_results,
n_best_size,
max_answer_length,
output_prediction_file,
output_nbest_file,
output_null_log_odds_file,
start_n_top,
end_n_top,
version_2_with_negative,
tokenizer,
verbose_logging,
):
"""
XLNet write prediction logic (more complex than Bert's). Write final predictions to the json file and log-odds of
null if needed.
Requires utils_squad_evaluate.py
"""
_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
"PrelimPrediction", ["feature_index", "start_index", "end_index", "start_log_prob", "end_log_prob"]
)
_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
"NbestPrediction", ["text", "start_log_prob", "end_log_prob"]
)
logger.info(f"Writing predictions to: {output_prediction_file}")
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
scores_diff_json = collections.OrderedDict()
for example_index, example in enumerate(all_examples):
features = example_index_to_features[example_index]
prelim_predictions = []
# keep track of the minimum score of null start+end of position 0
score_null = 1000000 # large and positive
for feature_index, feature in enumerate(features):
result = unique_id_to_result[feature.unique_id]
cur_null_score = result.cls_logits
# if we could have irrelevant answers, get the min score of irrelevant
score_null = min(score_null, cur_null_score)
for i in range(start_n_top):
for j in range(end_n_top):
start_log_prob = result.start_logits[i]
start_index = result.start_top_index[i]
j_index = i * end_n_top + j
end_log_prob = result.end_logits[j_index]
end_index = result.end_top_index[j_index]
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index >= feature.paragraph_len - 1:
continue
if end_index >= feature.paragraph_len - 1:
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_index,
end_index=end_index,
start_log_prob=start_log_prob,
end_log_prob=end_log_prob,
)
)
prelim_predictions = sorted(
prelim_predictions, key=lambda x: (x.start_log_prob + x.end_log_prob), reverse=True
)
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
# XLNet un-tokenizer
# Let's keep it simple for now and see if we need all this later.
#
# tok_start_to_orig_index = feature.tok_start_to_orig_index
# tok_end_to_orig_index = feature.tok_end_to_orig_index
# start_orig_pos = tok_start_to_orig_index[pred.start_index]
# end_orig_pos = tok_end_to_orig_index[pred.end_index]
# paragraph_text = example.paragraph_text
# final_text = paragraph_text[start_orig_pos: end_orig_pos + 1].strip()
# Previously used Bert untokenizer
tok_tokens = feature.tokens[pred.start_index : (pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start : (orig_doc_end + 1)]
tok_text = tokenizer.convert_tokens_to_string(tok_tokens)
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
if hasattr(tokenizer, "do_lower_case"):
do_lower_case = tokenizer.do_lower_case
else:
do_lower_case = tokenizer.do_lowercase_and_remove_accent
final_text = get_final_text(tok_text, orig_text, do_lower_case, verbose_logging)
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
nbest.append(
_NbestPrediction(text=final_text, start_log_prob=pred.start_log_prob, end_log_prob=pred.end_log_prob)
)
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(_NbestPrediction(text="", start_log_prob=-1e6, end_log_prob=-1e6))
total_scores = []
best_non_null_entry = None
for entry in nbest:
total_scores.append(entry.start_log_prob + entry.end_log_prob)
if not best_non_null_entry:
best_non_null_entry = entry
probs = _compute_softmax(total_scores)
nbest_json = []
for i, entry in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_log_prob"] = entry.start_log_prob
output["end_log_prob"] = entry.end_log_prob
nbest_json.append(output)
if len(nbest_json) < 1:
raise ValueError("No valid predictions")
if best_non_null_entry is None:
raise ValueError("No valid predictions")
score_diff = score_null
scores_diff_json[example.qas_id] = score_diff
# note(zhiliny): always predict best_non_null_entry
# and the evaluation script will search for the best threshold
all_predictions[example.qas_id] = best_non_null_entry.text
all_nbest_json[example.qas_id] = nbest_json
with open(output_prediction_file, "w") as writer:
writer.write(json.dumps(all_predictions, indent=4) + "\n")
with open(output_nbest_file, "w") as writer:
writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
if version_2_with_negative:
with open(output_null_log_odds_file, "w") as writer:
writer.write(json.dumps(scores_diff_json, indent=4) + "\n")
return all_predictions
|
27182812/ChatGLM-LLaMA-chinese-insturct | 3,607 | src/transformers/data/metrics/__init__.py | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import f1_score, matthews_corrcoef
DEPRECATION_WARNING = (
"This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate "
"library. You can have a look at this example script for pointers: "
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py"
)
def simple_accuracy(preds, labels):
warnings.warn(DEPRECATION_WARNING, FutureWarning)
requires_backends(simple_accuracy, "sklearn")
return (preds == labels).mean()
def acc_and_f1(preds, labels):
warnings.warn(DEPRECATION_WARNING, FutureWarning)
requires_backends(acc_and_f1, "sklearn")
acc = simple_accuracy(preds, labels)
f1 = f1_score(y_true=labels, y_pred=preds)
return {
"acc": acc,
"f1": f1,
"acc_and_f1": (acc + f1) / 2,
}
def pearson_and_spearman(preds, labels):
warnings.warn(DEPRECATION_WARNING, FutureWarning)
requires_backends(pearson_and_spearman, "sklearn")
pearson_corr = pearsonr(preds, labels)[0]
spearman_corr = spearmanr(preds, labels)[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def glue_compute_metrics(task_name, preds, labels):
warnings.warn(DEPRECATION_WARNING, FutureWarning)
requires_backends(glue_compute_metrics, "sklearn")
assert len(preds) == len(labels), f"Predictions and labels have mismatched lengths {len(preds)} and {len(labels)}"
if task_name == "cola":
return {"mcc": matthews_corrcoef(labels, preds)}
elif task_name == "sst-2":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "mrpc":
return acc_and_f1(preds, labels)
elif task_name == "sts-b":
return pearson_and_spearman(preds, labels)
elif task_name == "qqp":
return acc_and_f1(preds, labels)
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(preds, labels)}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(preds, labels)}
elif task_name == "qnli":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "rte":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "wnli":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "hans":
return {"acc": simple_accuracy(preds, labels)}
else:
raise KeyError(task_name)
def xnli_compute_metrics(task_name, preds, labels):
warnings.warn(DEPRECATION_WARNING, FutureWarning)
requires_backends(xnli_compute_metrics, "sklearn")
assert len(preds) == len(labels), f"Predictions and labels have mismatched lengths {len(preds)} and {len(labels)}"
if task_name == "xnli":
return {"acc": simple_accuracy(preds, labels)}
else:
raise KeyError(task_name)
|
27182812/ChatGLM-LLaMA-chinese-insturct | 9,219 | src/transformers/data/datasets/squad.py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadV1Processor, SquadV2Processor, squad_convert_examples_to_features
logger = logging.get_logger(__name__)
MODEL_CONFIG_CLASSES = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class SquadDataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
model_type: str = field(
default=None, metadata={"help": "Model type selected in the list: " + ", ".join(MODEL_TYPES)}
)
data_dir: str = field(
default=None, metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."}
)
max_seq_length: int = field(
default=128,
metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
},
)
doc_stride: int = field(
default=128,
metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."},
)
max_query_length: int = field(
default=64,
metadata={
"help": (
"The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length."
)
},
)
max_answer_length: int = field(
default=30,
metadata={
"help": (
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
)
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
version_2_with_negative: bool = field(
default=False, metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."}
)
null_score_diff_threshold: float = field(
default=0.0, metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."}
)
n_best_size: int = field(
default=20, metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."}
)
lang_id: int = field(
default=0,
metadata={
"help": (
"language id of input for language-specific xlm models (see"
" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"
)
},
)
threads: int = field(default=1, metadata={"help": "multiple threads for converting example to features"})
class Split(Enum):
train = "train"
dev = "dev"
class SquadDataset(Dataset):
"""
This will be superseded by a framework-agnostic approach soon.
"""
args: SquadDataTrainingArguments
features: List[SquadFeatures]
mode: Split
is_language_sensitive: bool
def __init__(
self,
args: SquadDataTrainingArguments,
tokenizer: PreTrainedTokenizer,
limit_length: Optional[int] = None,
mode: Union[str, Split] = Split.train,
is_language_sensitive: Optional[bool] = False,
cache_dir: Optional[str] = None,
dataset_format: Optional[str] = "pt",
):
self.args = args
self.is_language_sensitive = is_language_sensitive
self.processor = SquadV2Processor() if args.version_2_with_negative else SquadV1Processor()
if isinstance(mode, str):
try:
mode = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name")
self.mode = mode
# Load data features from cache or dataset file
version_tag = "v2" if args.version_2_with_negative else "v1"
cached_features_file = os.path.join(
cache_dir if cache_dir is not None else args.data_dir,
f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}",
)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lock_path = cached_features_file + ".lock"
with FileLock(lock_path):
if os.path.exists(cached_features_file) and not args.overwrite_cache:
start = time.time()
self.old_features = torch.load(cached_features_file)
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
self.features = self.old_features["features"]
self.dataset = self.old_features.get("dataset", None)
self.examples = self.old_features.get("examples", None)
logger.info(
f"Loading features from cached file {cached_features_file} [took %.3f s]", time.time() - start
)
if self.dataset is None or self.examples is None:
logger.warning(
f"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"
" future run"
)
else:
if mode == Split.dev:
self.examples = self.processor.get_dev_examples(args.data_dir)
else:
self.examples = self.processor.get_train_examples(args.data_dir)
self.features, self.dataset = squad_convert_examples_to_features(
examples=self.examples,
tokenizer=tokenizer,
max_seq_length=args.max_seq_length,
doc_stride=args.doc_stride,
max_query_length=args.max_query_length,
is_training=mode == Split.train,
threads=args.threads,
return_dataset=dataset_format,
)
start = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples},
cached_features_file,
)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]"
)
def __len__(self):
return len(self.features)
def __getitem__(self, i) -> Dict[str, torch.Tensor]:
# Convert to Tensors and build dataset
feature = self.features[i]
input_ids = torch.tensor(feature.input_ids, dtype=torch.long)
attention_mask = torch.tensor(feature.attention_mask, dtype=torch.long)
token_type_ids = torch.tensor(feature.token_type_ids, dtype=torch.long)
cls_index = torch.tensor(feature.cls_index, dtype=torch.long)
p_mask = torch.tensor(feature.p_mask, dtype=torch.float)
is_impossible = torch.tensor(feature.is_impossible, dtype=torch.float)
inputs = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask})
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible})
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape, dtype=torch.int64) * self.args.lang_id)})
if self.mode == Split.train:
start_positions = torch.tensor(feature.start_position, dtype=torch.long)
end_positions = torch.tensor(feature.end_position, dtype=torch.long)
inputs.update({"start_positions": start_positions, "end_positions": end_positions})
return inputs
|
27182812/ChatGLM-LLaMA-chinese-insturct | 23,718 | src/transformers/data/datasets/language_modeling.py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import pickle
import random
import time
import warnings
from typing import Dict, List, Optional
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
logger = logging.get_logger(__name__)
DEPRECATION_WARNING = (
"This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets "
"library. You can have a look at this example script for pointers: {0}"
)
class TextDataset(Dataset):
"""
This will be superseded by a framework-agnostic approach soon.
"""
def __init__(
self,
tokenizer: PreTrainedTokenizer,
file_path: str,
block_size: int,
overwrite_cache=False,
cache_dir: Optional[str] = None,
):
warnings.warn(
DEPRECATION_WARNING.format(
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_mlm.py"
),
FutureWarning,
)
if os.path.isfile(file_path) is False:
raise ValueError(f"Input file path {file_path} not found")
block_size = block_size - tokenizer.num_special_tokens_to_add(pair=False)
directory, filename = os.path.split(file_path)
cached_features_file = os.path.join(
cache_dir if cache_dir is not None else directory,
f"cached_lm_{tokenizer.__class__.__name__}_{block_size}_{filename}",
)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lock_path = cached_features_file + ".lock"
with FileLock(lock_path):
if os.path.exists(cached_features_file) and not overwrite_cache:
start = time.time()
with open(cached_features_file, "rb") as handle:
self.examples = pickle.load(handle)
logger.info(
f"Loading features from cached file {cached_features_file} [took %.3f s]", time.time() - start
)
else:
logger.info(f"Creating features from dataset file at {directory}")
self.examples = []
with open(file_path, encoding="utf-8") as f:
text = f.read()
tokenized_text = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(text))
for i in range(0, len(tokenized_text) - block_size + 1, block_size): # Truncate in block of block_size
self.examples.append(
tokenizer.build_inputs_with_special_tokens(tokenized_text[i : i + block_size])
)
# Note that we are losing the last truncated example here for the sake of simplicity (no padding)
# If your dataset is small, first you should look for a bigger one :-) and second you
# can change this behavior by adding (model specific) padding.
start = time.time()
with open(cached_features_file, "wb") as handle:
pickle.dump(self.examples, handle, protocol=pickle.HIGHEST_PROTOCOL)
logger.info(
f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]"
)
def __len__(self):
return len(self.examples)
def __getitem__(self, i) -> torch.Tensor:
return torch.tensor(self.examples[i], dtype=torch.long)
class LineByLineTextDataset(Dataset):
"""
This will be superseded by a framework-agnostic approach soon.
"""
def __init__(self, tokenizer: PreTrainedTokenizer, file_path: str, block_size: int):
warnings.warn(
DEPRECATION_WARNING.format(
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_mlm.py"
),
FutureWarning,
)
if os.path.isfile(file_path) is False:
raise ValueError(f"Input file path {file_path} not found")
# Here, we do not cache the features, operating under the assumption
# that we will soon use fast multithreaded tokenizers from the
# `tokenizers` repo everywhere =)
logger.info(f"Creating features from dataset file at {file_path}")
with open(file_path, encoding="utf-8") as f:
lines = [line for line in f.read().splitlines() if (len(line) > 0 and not line.isspace())]
batch_encoding = tokenizer(lines, add_special_tokens=True, truncation=True, max_length=block_size)
self.examples = batch_encoding["input_ids"]
self.examples = [{"input_ids": torch.tensor(e, dtype=torch.long)} for e in self.examples]
def __len__(self):
return len(self.examples)
def __getitem__(self, i) -> Dict[str, torch.tensor]:
return self.examples[i]
class LineByLineWithRefDataset(Dataset):
"""
This will be superseded by a framework-agnostic approach soon.
"""
def __init__(self, tokenizer: PreTrainedTokenizer, file_path: str, block_size: int, ref_path: str):
warnings.warn(
DEPRECATION_WARNING.format(
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_mlm_wwm.py"
),
FutureWarning,
)
if os.path.isfile(file_path) is False:
raise ValueError(f"Input file path {file_path} not found")
if os.path.isfile(ref_path) is False:
raise ValueError(f"Ref file path {file_path} not found")
# Here, we do not cache the features, operating under the assumption
# that we will soon use fast multithreaded tokenizers from the
# `tokenizers` repo everywhere =)
logger.info(f"Creating features from dataset file at {file_path}")
logger.info(f"Use ref segment results at {ref_path}")
with open(file_path, encoding="utf-8") as f:
data = f.readlines() # use this method to avoid delimiter '\u2029' to split a line
data = [line.strip() for line in data if len(line) > 0 and not line.isspace()]
# Get ref inf from file
with open(ref_path, encoding="utf-8") as f:
ref = [json.loads(line) for line in f.read().splitlines() if (len(line) > 0 and not line.isspace())]
if len(data) != len(ref):
raise ValueError(
f"Length of Input file should be equal to Ref file. But the length of {file_path} is {len(data)} "
f"while length of {ref_path} is {len(ref)}"
)
batch_encoding = tokenizer(data, add_special_tokens=True, truncation=True, max_length=block_size)
self.examples = batch_encoding["input_ids"]
self.examples = [{"input_ids": torch.tensor(e, dtype=torch.long)} for e in self.examples]
n = len(self.examples)
for i in range(n):
self.examples[i]["chinese_ref"] = torch.tensor(ref[i], dtype=torch.long)
def __len__(self):
return len(self.examples)
def __getitem__(self, i) -> Dict[str, torch.tensor]:
return self.examples[i]
class LineByLineWithSOPTextDataset(Dataset):
"""
Dataset for sentence order prediction task, prepare sentence pairs for SOP task
"""
def __init__(self, tokenizer: PreTrainedTokenizer, file_dir: str, block_size: int):
warnings.warn(
DEPRECATION_WARNING.format(
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_mlm.py"
),
FutureWarning,
)
if os.path.isdir(file_dir) is False:
raise ValueError(f"{file_dir} is not a directory")
logger.info(f"Creating features from dataset file folder at {file_dir}")
self.examples = []
# TODO: randomness could apply a random seed, ex. rng = random.Random(random_seed)
# file path looks like ./dataset/wiki_1, ./dataset/wiki_2
for file_name in os.listdir(file_dir):
file_path = os.path.join(file_dir, file_name)
if os.path.isfile(file_path) is False:
raise ValueError(f"{file_path} is not a file")
article_open = False
with open(file_path, encoding="utf-8") as f:
original_lines = f.readlines()
article_lines = []
for line in original_lines:
if "<doc id=" in line:
article_open = True
elif "</doc>" in line:
article_open = False
document = [
tokenizer.convert_tokens_to_ids(tokenizer.tokenize(line))
for line in article_lines[1:]
if (len(line) > 0 and not line.isspace())
]
examples = self.create_examples_from_document(document, block_size, tokenizer)
self.examples.extend(examples)
article_lines = []
else:
if article_open:
article_lines.append(line)
logger.info("Dataset parse finished.")
def create_examples_from_document(self, document, block_size, tokenizer, short_seq_prob=0.1):
"""Creates examples for a single document."""
# Account for special tokens
max_num_tokens = block_size - tokenizer.num_special_tokens_to_add(pair=True)
# We *usually* want to fill up the entire sequence since we are padding
# to `block_size` anyways, so short sequences are generally wasted
# computation. However, we *sometimes*
# (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter
# sequences to minimize the mismatch between pretraining and fine-tuning.
# The `target_seq_length` is just a rough target however, whereas
# `block_size` is a hard limit.
target_seq_length = max_num_tokens
if random.random() < short_seq_prob:
target_seq_length = random.randint(2, max_num_tokens)
# We DON'T just concatenate all of the tokens from a document into a long
# sequence and choose an arbitrary split point because this would make the
# next sentence prediction task too easy. Instead, we split the input into
# segments "A" and "B" based on the actual "sentences" provided by the user
# input.
examples = []
current_chunk = [] # a buffer stored current working segments
current_length = 0
i = 0
while i < len(document):
segment = document[i] # get a segment
if not segment:
i += 1
continue
current_chunk.append(segment) # add a segment to current chunk
current_length += len(segment) # overall token length
# if current length goes to the target length or reaches the end of file, start building token a and b
if i == len(document) - 1 or current_length >= target_seq_length:
if current_chunk:
# `a_end` is how many segments from `current_chunk` go into the `A` (first) sentence.
a_end = 1
# if current chunk has more than 2 sentences, pick part of it `A` (first) sentence
if len(current_chunk) >= 2:
a_end = random.randint(1, len(current_chunk) - 1)
# token a
tokens_a = []
for j in range(a_end):
tokens_a.extend(current_chunk[j])
# token b
tokens_b = []
for j in range(a_end, len(current_chunk)):
tokens_b.extend(current_chunk[j])
if len(tokens_a) == 0 or len(tokens_b) == 0:
continue
# switch tokens_a and tokens_b randomly
if random.random() < 0.5:
is_next = False
tokens_a, tokens_b = tokens_b, tokens_a
else:
is_next = True
def truncate_seq_pair(tokens_a, tokens_b, max_num_tokens):
"""Truncates a pair of sequences to a maximum sequence length."""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_num_tokens:
break
trunc_tokens = tokens_a if len(tokens_a) > len(tokens_b) else tokens_b
if not (len(trunc_tokens) >= 1):
raise ValueError("Sequence length to be truncated must be no less than one")
# We want to sometimes truncate from the front and sometimes from the
# back to add more randomness and avoid biases.
if random.random() < 0.5:
del trunc_tokens[0]
else:
trunc_tokens.pop()
truncate_seq_pair(tokens_a, tokens_b, max_num_tokens)
if not (len(tokens_a) >= 1):
raise ValueError(f"Length of sequence a is {len(tokens_a)} which must be no less than 1")
if not (len(tokens_b) >= 1):
raise ValueError(f"Length of sequence b is {len(tokens_b)} which must be no less than 1")
# add special tokens
input_ids = tokenizer.build_inputs_with_special_tokens(tokens_a, tokens_b)
# add token type ids, 0 for sentence a, 1 for sentence b
token_type_ids = tokenizer.create_token_type_ids_from_sequences(tokens_a, tokens_b)
example = {
"input_ids": torch.tensor(input_ids, dtype=torch.long),
"token_type_ids": torch.tensor(token_type_ids, dtype=torch.long),
"sentence_order_label": torch.tensor(0 if is_next else 1, dtype=torch.long),
}
examples.append(example)
current_chunk = [] # clear current chunk
current_length = 0 # reset current text length
i += 1 # go to next line
return examples
def __len__(self):
return len(self.examples)
def __getitem__(self, i) -> Dict[str, torch.tensor]:
return self.examples[i]
class TextDatasetForNextSentencePrediction(Dataset):
"""
This will be superseded by a framework-agnostic approach soon.
"""
def __init__(
self,
tokenizer: PreTrainedTokenizer,
file_path: str,
block_size: int,
overwrite_cache=False,
short_seq_probability=0.1,
nsp_probability=0.5,
):
warnings.warn(
DEPRECATION_WARNING.format(
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_mlm.py"
),
FutureWarning,
)
if not os.path.isfile(file_path):
raise ValueError(f"Input file path {file_path} not found")
self.short_seq_probability = short_seq_probability
self.nsp_probability = nsp_probability
directory, filename = os.path.split(file_path)
cached_features_file = os.path.join(
directory,
f"cached_nsp_{tokenizer.__class__.__name__}_{block_size}_{filename}",
)
self.tokenizer = tokenizer
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lock_path = cached_features_file + ".lock"
# Input file format:
# (1) One sentence per line. These should ideally be actual sentences, not
# entire paragraphs or arbitrary spans of text. (Because we use the
# sentence boundaries for the "next sentence prediction" task).
# (2) Blank lines between documents. Document boundaries are needed so
# that the "next sentence prediction" task doesn't span between documents.
#
# Example:
# I am very happy.
# Here is the second sentence.
#
# A new document.
with FileLock(lock_path):
if os.path.exists(cached_features_file) and not overwrite_cache:
start = time.time()
with open(cached_features_file, "rb") as handle:
self.examples = pickle.load(handle)
logger.info(
f"Loading features from cached file {cached_features_file} [took %.3f s]", time.time() - start
)
else:
logger.info(f"Creating features from dataset file at {directory}")
self.documents = [[]]
with open(file_path, encoding="utf-8") as f:
while True:
line = f.readline()
if not line:
break
line = line.strip()
# Empty lines are used as document delimiters
if not line and len(self.documents[-1]) != 0:
self.documents.append([])
tokens = tokenizer.tokenize(line)
tokens = tokenizer.convert_tokens_to_ids(tokens)
if tokens:
self.documents[-1].append(tokens)
logger.info(f"Creating examples from {len(self.documents)} documents.")
self.examples = []
for doc_index, document in enumerate(self.documents):
self.create_examples_from_document(document, doc_index, block_size)
start = time.time()
with open(cached_features_file, "wb") as handle:
pickle.dump(self.examples, handle, protocol=pickle.HIGHEST_PROTOCOL)
logger.info(
f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]"
)
def create_examples_from_document(self, document: List[List[int]], doc_index: int, block_size: int):
"""Creates examples for a single document."""
max_num_tokens = block_size - self.tokenizer.num_special_tokens_to_add(pair=True)
# We *usually* want to fill up the entire sequence since we are padding
# to `block_size` anyways, so short sequences are generally wasted
# computation. However, we *sometimes*
# (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter
# sequences to minimize the mismatch between pretraining and fine-tuning.
# The `target_seq_length` is just a rough target however, whereas
# `block_size` is a hard limit.
target_seq_length = max_num_tokens
if random.random() < self.short_seq_probability:
target_seq_length = random.randint(2, max_num_tokens)
current_chunk = [] # a buffer stored current working segments
current_length = 0
i = 0
while i < len(document):
segment = document[i]
current_chunk.append(segment)
current_length += len(segment)
if i == len(document) - 1 or current_length >= target_seq_length:
if current_chunk:
# `a_end` is how many segments from `current_chunk` go into the `A`
# (first) sentence.
a_end = 1
if len(current_chunk) >= 2:
a_end = random.randint(1, len(current_chunk) - 1)
tokens_a = []
for j in range(a_end):
tokens_a.extend(current_chunk[j])
tokens_b = []
if len(current_chunk) == 1 or random.random() < self.nsp_probability:
is_random_next = True
target_b_length = target_seq_length - len(tokens_a)
# This should rarely go for more than one iteration for large
# corpora. However, just to be careful, we try to make sure that
# the random document is not the same as the document
# we're processing.
for _ in range(10):
random_document_index = random.randint(0, len(self.documents) - 1)
if random_document_index != doc_index:
break
random_document = self.documents[random_document_index]
random_start = random.randint(0, len(random_document) - 1)
for j in range(random_start, len(random_document)):
tokens_b.extend(random_document[j])
if len(tokens_b) >= target_b_length:
break
# We didn't actually use these segments so we "put them back" so
# they don't go to waste.
num_unused_segments = len(current_chunk) - a_end
i -= num_unused_segments
# Actual next
else:
is_random_next = False
for j in range(a_end, len(current_chunk)):
tokens_b.extend(current_chunk[j])
if not (len(tokens_a) >= 1):
raise ValueError(f"Length of sequence a is {len(tokens_a)} which must be no less than 1")
if not (len(tokens_b) >= 1):
raise ValueError(f"Length of sequence b is {len(tokens_b)} which must be no less than 1")
# add special tokens
input_ids = self.tokenizer.build_inputs_with_special_tokens(tokens_a, tokens_b)
# add token type ids, 0 for sentence a, 1 for sentence b
token_type_ids = self.tokenizer.create_token_type_ids_from_sequences(tokens_a, tokens_b)
example = {
"input_ids": torch.tensor(input_ids, dtype=torch.long),
"token_type_ids": torch.tensor(token_type_ids, dtype=torch.long),
"next_sentence_label": torch.tensor(1 if is_random_next else 0, dtype=torch.long),
}
self.examples.append(example)
current_chunk = []
current_length = 0
i += 1
def __len__(self):
return len(self.examples)
def __getitem__(self, i):
return self.examples[i]
|
27182812/ChatGLM-LLaMA-chinese-insturct | 6,160 | src/transformers/data/datasets/glue.py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
logger = logging.get_logger(__name__)
@dataclass
class GlueDataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
Using `HfArgumentParser` we can turn this class into argparse arguments to be able to specify them on the command
line.
"""
task_name: str = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys())})
data_dir: str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."}
)
max_seq_length: int = field(
default=128,
metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
def __post_init__(self):
self.task_name = self.task_name.lower()
class Split(Enum):
train = "train"
dev = "dev"
test = "test"
class GlueDataset(Dataset):
"""
This will be superseded by a framework-agnostic approach soon.
"""
args: GlueDataTrainingArguments
output_mode: str
features: List[InputFeatures]
def __init__(
self,
args: GlueDataTrainingArguments,
tokenizer: PreTrainedTokenizerBase,
limit_length: Optional[int] = None,
mode: Union[str, Split] = Split.train,
cache_dir: Optional[str] = None,
):
warnings.warn(
"This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets "
"library. You can have a look at this example script for pointers: "
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py",
FutureWarning,
)
self.args = args
self.processor = glue_processors[args.task_name]()
self.output_mode = glue_output_modes[args.task_name]
if isinstance(mode, str):
try:
mode = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name")
# Load data features from cache or dataset file
cached_features_file = os.path.join(
cache_dir if cache_dir is not None else args.data_dir,
f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}",
)
label_list = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
label_list[1], label_list[2] = label_list[2], label_list[1]
self.label_list = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lock_path = cached_features_file + ".lock"
with FileLock(lock_path):
if os.path.exists(cached_features_file) and not args.overwrite_cache:
start = time.time()
self.features = torch.load(cached_features_file)
logger.info(
f"Loading features from cached file {cached_features_file} [took %.3f s]", time.time() - start
)
else:
logger.info(f"Creating features from dataset file at {args.data_dir}")
if mode == Split.dev:
examples = self.processor.get_dev_examples(args.data_dir)
elif mode == Split.test:
examples = self.processor.get_test_examples(args.data_dir)
else:
examples = self.processor.get_train_examples(args.data_dir)
if limit_length is not None:
examples = examples[:limit_length]
self.features = glue_convert_examples_to_features(
examples,
tokenizer,
max_length=args.max_seq_length,
label_list=label_list,
output_mode=self.output_mode,
)
start = time.time()
torch.save(self.features, cached_features_file)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]"
)
def __len__(self):
return len(self.features)
def __getitem__(self, i) -> InputFeatures:
return self.features[i]
def get_labels(self):
return self.label_list
|
2881099/FreeSql.Cloud | 7,635 | README.md | <h1 align="center"> 🦄 FreeSql.Cloud </h1>
为 FreeSql 提供跨数据库访问,分布式事务TCC、SAGA解决方案,支持 .NET Core 2.1+, .NET Framework 4.0+.
## 快速开始
> dotnet add package FreeSql.Cloud
or
> Install-Package FreeSql.Cloud
```c#
public enum DbEnum { db1, db2 }
public class FreeSqlCloud : FreeSqlCloud<DbEnum>
{
public FreeSqlCloud() : base(null) { }
public FreeSqlCloud(string distributeKey) : base(distributeKey) { }
}
var fsql = new FreeSqlCloud();
fsql.DistributeTrace = log => Console.WriteLine(log.Split('\n')[0].Trim());
fsql.Register(DbEnum.db1, () => new FreeSqlBuilder().UseConnectionString(DataType.Sqlite, @"Data Source=db1.db").Build());
fsql.Register(DbEnum.db2, () => new FreeSqlBuilder().UseConnectionString(DataType.Sqlite, @"Data Source=db2.db").Build());
services.AddSingleton<IFreeSql>(fsql);
services.AddSingleton(fsql);
```
> FreeSqlCloud 必须定义成单例模式
> new FreeSqlCloud() 多连接管理,DbEnum 换成 string 就是多租户管理
> new FreeSqlCloud("myapp") 开启 TCC/SAGA 事务生效
## 如何使用?
FreeSqlCloud 的访问方式和 IFreeSql 一样:
```c#
fsql.Select<T>();
fsql.Insert<T>();
fsql.Update<T>();
fsql.Delete<T>();
//...
```
切换数据库(多线程安全):
```c#
fsql.Change(DbEnum.db2).Select<T>();
//同一线程,或异步await 后续 fsql.Select/Insert/Update/Delete 操作是 db2
fsql.Use(DbEnum.db2).Select<T>();
//单次有效
using (fsql.Change(DbEnum.db2)) {
//todo..
}
//FreeSql.Cloud v1.6.8 一个范围内切换,之后再切换回去
```
自动定向数据库配置:
```c#
fsql.EntitySteering = (_, e) =>
{
if (e.EntityType == typeof(User)) e.DBKey = DbEnum.db2;
//查询 User 自动定向 db2
};
```
## 关于仓储对象 Repository
1、静态仓储对象
FreeSql.Repository/UnitOfWorkManager 对象创建时固定了 IFreeSql,因此无法跟随 FreeSqlCloud 切换数据库。
> 注意:是同一个对象实例创建之后,无法跟随切换,创建新对象实例不受影响。
租户分库场景 Repository/UnitOfWorkManager 创建之前,先调用 fsql.Change 切换好数据库。
[《FreeSql.Cloud 如何使用 UnitOfWorkManager 实现 AOP 事务?》](https://github.com/dotnetcore/FreeSql/wiki/DI-UnitOfWorkManager#freesqlcloud-%E5%A6%82%E4%BD%95%E4%BD%BF%E7%94%A8-unitofworkmanager)
2、动态创建对象(不推荐)
但是。。。仍然有一种特殊需求,Repository 在创建之后,仍然能跟随 fsql.Change 切换数据库。
```c#
var repo = DB.Cloud.GetCloudRepository<User>();
DB.Cloud.Change(DbEnum.db2);
Console.WriteLine(repo.Orm.Ado.ConnectionString); //repo -> db2
DB.Cloud.Change(DbEnum.db1);
Console.WriteLine(repo.Orm.Ado.ConnectionString); //repo -> db1
```
这种机制太不可控,所以只做了简单的扩展方法创建,并不推荐 Ioc 注入。
## 关于并发
FreeSqlCloud 内部使用 IdleBus + AsyncLocal\<string\> 方式实现,Change/Use 多线程并发是安全的。
FreeSqlCloud 实现了接口 IFreeSql,但它不负责直接交互数据库,只是个代理层。
```c#
public class FreeSqlCloud<TDBKey> : IFreeSql
{
AsyncLocal<TDBKey> _currentKey = new AsyncLocal<TDBKey>();
IFreeSql _current => _idlebus.Get(_currentKey.Value);
IdleBus<TDBKey, IFreeSql> _idlebus;
...
public IAdo Ado => _current.Ado;
public GlobalFilter GlobalFilter => _current.GlobalFilter;
public void Transaction(Action handler) => _current.Transaction(handler);
...
}
```
AsyncLocal 负责存储执行上下文 DBKey 值,在异步或同步并发场景是安全的,fsql.Change(DbEnum.db2) 会改变该值。fsql.Change/Use 方法返回 IFreeSql 特殊实现,大大降低 IdleBus 因误用被释放的异常(原因:IdleBus.Get 返回值不允许被外部变量长期引用,应每次 Get 获取对象)
## 关于分布式事务
1、简介
FreeSqlCloud 提供 TCC/SAGA 分布式事务调度、失败重试、持久化重启后重新唤醒事务单元、等管理功能。
TCC 事务特点:
- Try 用于资源冻结/预扣;
- Try 全部环节通过,代表业务一定能完成,进入 Confirm 环节;
- Try 任何环节失败,代表业务失败,进入 Cancel 环节;
- Confirm 失败会进行重试N次,直到交付成功,或者人工干预;
- Cancel 失败会进行重试N次,直到取消成功,或者人工干预;
```c#
// 测试数据
fsql.Use(DbEnum.db1).Insert(new User { Id = 1, Name = "testuser01", Point = 10 }).ExecuteAffrows();
fsql.Use(DbEnum.db2).Insert(new Goods { Id = 1, Title = "testgoods01", Stock = 0 }).ExecuteAffrows();
var orderId = Guid.NewGuid();
await fsql.StartTcc(orderId.ToString(), "支付购买",
new TccOptions
{
MaxRetryCount = 10,
RetryInterval = TimeSpan.FromSeconds(10)
})
.Then<Tcc1>(DbEnum.db1, new BuyUnitState { UserId = 1, Point = 10, GoodsId = 1, OrderId = orderId })
.Then<Tcc2>(DbEnum.db2, new BuyUnitState { UserId = 1, Point = 10, GoodsId = 1, OrderId = orderId })
.Then<Tcc3>(DbEnum.db2, new BuyUnitState { UserId = 1, Point = 10, GoodsId = 1, OrderId = orderId })
.ExecuteAsync();
```
```shell
2022-08-16 10:47:53 【myapp】db1 注册成功, 并存储 TCC/SAGA 事务相关数据
2022-08-16 10:47:53 【myapp】成功加载历史未完成 TCC 事务 0 个
2022-08-16 10:47:53 【myapp】成功加载历史未完成 SAGA 事务 0 个
2022-08-16 10:47:53 【myapp】TCC (3a9c548f-95b1-43b4-b918-9c3817d4c316, 支付购买) Created successful, retry count: 10, interval: 10S
2022-08-16 10:47:53 【myapp】TCC (3a9c548f-95b1-43b4-b918-9c3817d4c316, 支付购买) Unit1(第1步:数据库db1 扣除用户积分) TRY successful
2022-08-16 10:47:53 【myapp】数据库使用[Use] db2
2022-08-16 10:47:53 【myapp】TCC (3a9c548f-95b1-43b4-b918-9c3817d4c316, 支付购买) Unit2(第2步:数据库db2 扣除库存) TRY failed, ready to CANCEL, -ERR 扣除库存失败
2022-08-16 10:47:53 【myapp】TCC (3a9c548f-95b1-43b4-b918-9c3817d4c316, 支付购买) Unit1(第1步:数据库db1 扣除用户积分) CANCEL successful
2022-08-16 10:47:53 【myapp】TCC (3a9c548f-95b1-43b4-b918-9c3817d4c316, 支付购买) Completed, all units CANCEL successfully
```
> 请查看[TCC/SAGA完整的演示代码](https://github.com/2881099/FreeSql.Cloud/blob/master/examples/net60_tcc_saga/Program.cs)
SAGA 事务特点:
- Commit 用于业务提交;
- Commit 全部环节通过,代表业务交付成功;
- Commit 任何环节失败,代表业务失败,进入 Cancel 环节;
- Cancel 失败会进行重试N次,直到取消成功,或者人工干预;
2、唯一标识
FreeSqlCloud 使用唯一标识区分,解决冲突问题,举例:
```c#
var fsql = new FreeSqlCloud("myapp");
var fsql2 = new FreeSqlCloud("myapp2");
```
fsql2 访问不到 fsql 产生的分布式事务,如果 webapi 部署多实例,只需要设置实例各自对应的 name 区分即可。
3、持久化
fsql.Register 第一个注册的称之为【主库】,存储 TCC/SAGA 持久数据,程序启动的时候,会将未处理完的事务载入内存重新调度。
自动创建表 tcc_myapp、saga_myapp:
> 提示:fsql2 会创建表 tcc_myapp2、saga_myapp2
| 字段名 | 描述 |
| --- | --- |
| tid | 事务ID |
| title | 事务描述,查看日志更直观 |
| total | 所有单元数量 |
| create_time | 创建时间 |
| finish_time | 完成时间 |
| status | Pending, Confirmed, Canceled, ManualOperation |
| max_retry_count | 最大重试次数,如果仍然失败将转为【人工干预】 |
| retry_interval | 重试间隔(秒) |
| retry_count | 已重试次数 |
| retry_time | 最后重试时间 |
自动创建表 tcc_myapp_unit、saga_myapp_unit:
> 提示:fsql2 会创建表 tcc_myapp2_unit、saga_myapp2_unit
| 字段名 | 描述 |
| --- | --- |
| tid | 事务ID |
| index | 单元下标,1到N |
| description | 单元描述,使用 [Description("xx")] 特性设置,查看日志更直观 |
| stage | Try, Confirm, Cancel |
| type_name | 对应 c# TccUnit/SagaUnit 反射类型信息,用于创建 TccUnit/SagaUnit 对象 |
| state | 状态数据 |
| state_type_name | 状态数据对应的 c# 反射类型信息 |
| create_time | 创建时间 |
| db_key | 用于唤醒时使用 fsql.Use(db_key) 对应的事务或开启事务 |
其他库会创建表 myapp_unit_invoked 判断重复执行
4、单元
TccUnit、SagaUnit 方法内可以使用 Orm 访问当前事务对象。
单元方法除了操作数据库,也支持远程访问 webapi/grpc,发生异常时触发重试调度。由于网络不确定因素,较坏的情况比如单元调用 webapi/grpc 成功,但是 tcc_unit 表保存状态失败,导致单元又会重试执行,所以 web/grpc 提供方应该保证幂等操作,无论多少次调用结果都一致。
```c#
// HTTP 服务编排??
var orderId = Guid.NewGuid();
await DB.Cloud.StartSaga(orderId.ToString(), "支付购买webapi(saga)",
new SagaOptions
{
MaxRetryCount = 10,
RetryInterval = TimeSpan.FromSeconds(10)
})
.Then<HttpSaga>(default, new HttpUnitState
{
Url = "https://192.168.1.100/saga/UserPoint",
Data = "UserId=1&Point=10&GoodsId=1&OrderId=" + orderId
})
.Then<HttpSaga>(default, new HttpUnitState
{
Url = "https://192.168.1.100/saga/GoodsStock",
Data = "UserId=1&Point=10&GoodsId=1&OrderId=" + orderId
})
.Then<HttpSaga>(default, new HttpUnitState
{
Url = "https://192.168.1.100/saga/OrderNew",
Data = "UserId=1&Point=10&GoodsId=1&OrderId=" + orderId
})
.ExecuteAsync();
class HttpSaga : SagaUnit<HttpUnitState>
{
public override Task Commit()
{
//Console.WriteLine("请求 webapi:" + State.Url + "/Commit" + State.Data);
return Task.CompletedTask;
}
public override Task Cancel()
{
//Console.WriteLine("请求 webapi:" + State.Url + "/Cancel" + State.Data);
return Task.CompletedTask;
}
}
class HttpUnitState
{
public string Url { get; set; }
public string Data { get; set; }
}
``` |
2881099/FreeSql.Cloud | 2,641 | examples/net60_webapi/DB.cs | using FreeSql;
using System;
namespace net60_webapi
{
public enum DbEnum { db1, db2, db3 }
public class FreeSqlCloud : FreeSqlCloud<DbEnum>
{
public FreeSqlCloud() : base(null) { }
public FreeSqlCloud(string distributeKey) : base(distributeKey) { }
}
public static class DB
{
public static FreeSqlCloud Cloud => cloudLazy.Value;
readonly static Lazy<FreeSqlCloud> cloudLazy = new Lazy<FreeSqlCloud>(() =>
{
var fsql = new FreeSqlCloud("app001");
fsql.DistributeTrace += log => Console.WriteLine(log.Split('\n')[0].Trim());
fsql.Register(DbEnum.db3, () => new FreeSqlBuilder()
.UseConnectionString(DataType.Sqlite, @"Data Source=:memory:;max pool size=1")
.UseAutoSyncStructure(true)
.Build());
fsql.Register(DbEnum.db2, () => new FreeSqlBuilder()
.UseConnectionString(DataType.Sqlite, @"Data Source=:memory:;max pool size=2")
.UseAutoSyncStructure(true)
.Build());
fsql.Register(DbEnum.db1, () => new FreeSqlBuilder()
.UseConnectionString(DataType.Sqlite, @"Data Source=:memory:;max pool size=3")
.UseAutoSyncStructure(true)
.Build());
Console.WriteLine(fsql.Ado.ConnectionString);
using (fsql.Change(DbEnum.db2))
{
Console.WriteLine(fsql.Ado.ConnectionString);
}
Console.WriteLine(fsql.Ado.ConnectionString);
fsql.EntitySteering = (_, e) =>
{
if (e.EntityType == typeof(User)) e.DBKey = DbEnum.db1;
else if (e.EntityType == typeof(Goods)) e.DBKey = DbEnum.db2;
else if (e.EntityType == typeof(Order)) e.DBKey = DbEnum.db3;
#region 另一种读写分离
//switch (e.MethodName)
//{
// case "Select":
// if (e.EntityType == typeof(Program)) ; //判断某一个实体类型
// if (e.DBKey == DbEnum.db1) //判断主库时
// {
// var dbkeyIndex = new Random().Next(0, e.AvailableDBKeys.Length);
// e.DBKey = e.AvailableDBKeys[dbkeyIndex]; //重新定向到其他 db
// }
// break;
// case "Insert":
// case "Update":
// case "Delete":
// case "InsertOrUpdate":
// break;
//}
#endregion
};
return fsql;
});
}
}
|
2881099/FreeSql.Cloud | 1,227 | examples/net60_webapi/FodyWeavers.xsd | <?xml version="1.0" encoding="utf-8"?>
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<!-- This file was generated by Fody. Manual changes to this file will be lost when your project is rebuilt. -->
<xs:element name="Weavers">
<xs:complexType>
<xs:all>
<xs:element name="Rougamo" minOccurs="0" maxOccurs="1" type="xs:anyType" />
</xs:all>
<xs:attribute name="VerifyAssembly" type="xs:boolean">
<xs:annotation>
<xs:documentation>'true' to run assembly verification (PEVerify) on the target assembly after all weavers have been executed.</xs:documentation>
</xs:annotation>
</xs:attribute>
<xs:attribute name="VerifyIgnoreCodes" type="xs:string">
<xs:annotation>
<xs:documentation>A comma-separated list of error codes that can be safely ignored in assembly verification.</xs:documentation>
</xs:annotation>
</xs:attribute>
<xs:attribute name="GenerateXsd" type="xs:boolean">
<xs:annotation>
<xs:documentation>'false' to turn off automatic generation of the XML Schema file.</xs:documentation>
</xs:annotation>
</xs:attribute>
</xs:complexType>
</xs:element>
</xs:schema> |
2881099/FreeSql.Cloud | 7,274 | examples/net60_webapi/Program.cs | using FreeSql;
using FreeSql.Cloud.Abstract;
using FreeSql.Internal;
using net60_webapi;
using Rougamo.Context;
using System.Data;
using static System.Net.Mime.MediaTypeNames;
var builder = WebApplication.CreateBuilder(args);
builder.Services.AddSingleton(DB.Cloud); //ע FreeSqlCloud<DbEnum>
builder.Services.AddSingleton(provider => DB.Cloud.Use(DbEnum.db1)); //ע db1 IFreeSql
builder.Services.AddScoped<UnitOfWorkManagerCloud>();
//ע Repository
builder.Services.AddScoped(typeof(IBaseRepository<>), typeof(RepositoryCloud<>)); //db1
builder.Services.AddScoped(typeof(BaseRepository<>), typeof(RepositoryCloud<>)); //db1
builder.Services.AddScoped(typeof(IBaseRepository<,>), typeof(RepositoryCloud<,>)); //db1
builder.Services.AddScoped(typeof(BaseRepository<,>), typeof(RepositoryCloud<,>)); //db1
foreach (var repositoryType in typeof(User).Assembly.GetTypes().Where(a => a.IsAbstract == false && typeof(IBaseRepository).IsAssignableFrom(a)))
builder.Services.AddScoped(repositoryType);
var repo1 = DB.Cloud.GetCloudRepository<User>();
Console.WriteLine(repo1.Orm.Ado.ConnectionString);
DB.Cloud.Change(DbEnum.db2);
Console.WriteLine(repo1.Orm.Ado.ConnectionString);
DB.Cloud.Change(DbEnum.db3);
Console.WriteLine(repo1.Orm.Ado.ConnectionString);
DB.Cloud.Change(DbEnum.db1);
Console.WriteLine(repo1.Orm.Ado.ConnectionString);
builder.Services.AddScoped<UserService>();
var app = builder.Build();
app.Use(async (context, next) =>
{
TransactionalAttribute.SetServiceProvider(context.RequestServices);
await next();
});
app.MapGet("/", async context =>
{
var _userService = context.RequestServices.GetService<UserService>();
_userService.Test01();
await context.Response.WriteAsync("hello word");
});
AsyncLocalAccessor<int> access = new AsyncLocalAccessor<int>(() => 100);
await access.SetValue();
Console.WriteLine($"ValueAccessor before await FooAsync in Main: {access.Value}");
test();
app.Run();
void test()
{
var fsql = new FreeSqlCloud2();
Console.WriteLine(fsql.Ado.DataType);
fsql.Register("ojDb3D", () =>
new FreeSqlBuilder()
.UseConnectionFactory(DataType.PostgreSQL, () => null)
.UseNameConvert(NameConvertType.ToLower).Build());
fsql.Register("ChDb", () =>
new FreeSqlBuilder()
.UseConnectionFactory(DataType.Oracle, () => null)
.UseNameConvert(NameConvertType.ToUpper).Build());
Console.WriteLine(fsql.Ado.DataType);
using (fsql.Change("ChDb"))
{
Console.WriteLine(fsql.Ado.DataType);
using (fsql.Change("ojDb3D"))
{
Console.WriteLine(fsql.Ado.DataType);
}
Console.WriteLine(fsql.Ado.DataType);
}
Console.WriteLine(fsql.Ado.DataType);
}
public class FreeSqlCloud2 : FreeSqlCloud<string>
{
public FreeSqlCloud2() : base(null) { }
public FreeSqlCloud2(string distributekey) : base(distributekey) { }
}
public enum DbEnum2
{
ChDb = 0,
ojDb3D = 1
}
class UserService
{
readonly IBaseRepository<User> m_repo1;
readonly BaseRepository<User> m_repo2;
readonly UserRepository m_repo3;
public UserService(IBaseRepository<User> repo1, BaseRepository<User> repo2, UserRepository repo3)
{
m_repo1 = repo1;
m_repo2 = repo2;
m_repo3 = repo3;
}
public void Test01()
{
Console.WriteLine("aaa");
Test02().Wait();
Console.WriteLine("bbb");
}
[Transactional(DbEnum.db1, Propagation = Propagation.Required)] //db1
[Transactional(DbEnum.db3)] //db3
async public Task Test02()
{
Console.WriteLine("xxx");
Test03();
Console.WriteLine("yyy");
await Task.CompletedTask;
}
[Transactional(DbEnum.db2, Propagation = Propagation.Never)] //db1
public void Test03()
{
Console.WriteLine("zzz");
}
}
class UserRepository : RepositoryCloud<User>, IBaseRepository<User>
{
public UserRepository(UnitOfWorkManagerCloud uowm) : base(DbEnum.db3, uowm) { }
//todo..
}
class UnitOfWorkManagerCloud
{
readonly Dictionary<string, UnitOfWorkManager> m_managers = new Dictionary<string, UnitOfWorkManager>();
readonly FreeSqlCloud m_cloud;
public UnitOfWorkManagerCloud(IServiceProvider serviceProvider)
{
m_cloud = serviceProvider.GetService<FreeSqlCloud>();
}
public void Dispose()
{
foreach(var uowm in m_managers.Values)
{
uowm.Dispose();
}
m_managers.Clear();
}
public IUnitOfWork Begin(string db, Propagation propagation = Propagation.Required, IsolationLevel? isolationLevel = null)
{
return GetUnitOfWorkManager(db).Begin(propagation, isolationLevel);
}
public UnitOfWorkManager GetUnitOfWorkManager(string db)
{
if (m_managers.TryGetValue(db, out var uowm) == false)
{
uowm = new UnitOfWorkManager(m_cloud.Use(db));
m_managers.Add(db, uowm);
}
return uowm;
}
}
class RepositoryCloud<T> : DefaultRepository<T, int> where T : class
{
public RepositoryCloud(UnitOfWorkManagerCloud uomw) : this(DbEnum.db1, uomw) { } //DI
public RepositoryCloud(DbEnum db, UnitOfWorkManagerCloud uomw) : this(uomw.GetUnitOfWorkManager(db.ToString())) { }
RepositoryCloud(UnitOfWorkManager uomw) : base(uomw.Orm, uomw)
{
uomw.Binding(this);
}
}
class RepositoryCloud<T, TKey> : DefaultRepository<T, TKey> where T : class
{
public RepositoryCloud(UnitOfWorkManagerCloud uomw) : this(DbEnum.db1, uomw) { } //DI
public RepositoryCloud(DbEnum db, UnitOfWorkManagerCloud uomw) : this(uomw.GetUnitOfWorkManager(db.ToString())) { }
RepositoryCloud(UnitOfWorkManager uomw) : base(uomw.Orm, uomw)
{
uomw.Binding(this);
}
}
[AttributeUsage(AttributeTargets.Method, AllowMultiple = true)]
public class TransactionalAttribute : Rougamo.MoAttribute
{
public Propagation Propagation { get; set; } = Propagation.Required;
public IsolationLevel IsolationLevel { get => m_IsolationLevel.Value; set => m_IsolationLevel = value; }
IsolationLevel? m_IsolationLevel;
readonly DbEnum m_db;
public TransactionalAttribute(DbEnum db)
{
m_db = db;
}
static AsyncLocal<IServiceProvider> m_ServiceProvider = new AsyncLocal<IServiceProvider>();
public static void SetServiceProvider(IServiceProvider serviceProvider) => m_ServiceProvider.Value = serviceProvider;
IUnitOfWork _uow;
public override void OnEntry(MethodContext context)
{
var uowManager = m_ServiceProvider.Value.GetService<UnitOfWorkManagerCloud>();
_uow = uowManager.Begin(m_db.ToString(), this.Propagation, this.m_IsolationLevel);
}
public override void OnExit(MethodContext context)
{
if (typeof(Task).IsAssignableFrom(context.RealReturnType))
((Task)context.ReturnValue).ContinueWith(t => _OnExit());
else _OnExit();
void _OnExit()
{
try
{
if (context.Exception == null) _uow.Commit();
else _uow.Rollback();
}
finally
{
_uow.Dispose();
}
}
}
}
public static class Extension
{
public static async Task SetValue(this AsyncLocalAccessor<int> face)
{
face.Value = 200;
await Task.Delay(100);
}
} |
2881099/FreeSql.Cloud | 2,227 | examples/net40_tcc_saga/DB.cs | using FreeSql;
using System;
namespace net60_tcc_saga
{
public enum DbEnum { db1, db2, db3 }
public static class DB
{
public static FreeSqlCloud<DbEnum> Cloud => cloudLazy.Value;
readonly static Lazy<FreeSqlCloud<DbEnum>> cloudLazy = new Lazy<FreeSqlCloud<DbEnum>>(() =>
{
var fsql = new FreeSqlCloud<DbEnum>("app001");
fsql.DistributeTrace += log => Console.WriteLine(log.Split('\n')[0].Trim());
fsql.Register(DbEnum.db1, () => new FreeSqlBuilder()
.UseConnectionString(DataType.Sqlite, @"Data Source=:memory:")
.UseAutoSyncStructure(true)
.Build());
fsql.Register(DbEnum.db2, () => new FreeSqlBuilder()
.UseConnectionString(DataType.Sqlite, @"Data Source=:memory:")
.UseAutoSyncStructure(true)
.Build());
fsql.Register(DbEnum.db3, () => new FreeSqlBuilder()
.UseConnectionString(DataType.Sqlite, @"Data Source=:memory:")
.UseAutoSyncStructure(true)
.Build());
fsql.EntitySteering = (_, e) =>
{
if (e.EntityType == typeof(User)) e.DBKey = DbEnum.db1;
else if (e.EntityType == typeof(Goods)) e.DBKey = DbEnum.db2;
else if (e.EntityType == typeof(Order)) e.DBKey = DbEnum.db3;
#region 另一种读写分离
//switch (e.MethodName)
//{
// case "Select":
// if (e.EntityType == typeof(Program)) ; //判断某一个实体类型
// if (e.DBKey == DbEnum.db1) //判断主库时
// {
// var dbkeyIndex = new Random().Next(0, e.AvailableDBKeys.Length);
// e.DBKey = e.AvailableDBKeys[dbkeyIndex]; //重新定向到其他 db
// }
// break;
// case "Insert":
// case "Update":
// case "Delete":
// case "InsertOrUpdate":
// break;
//}
#endregion
};
return fsql;
});
}
}
|
2881099/FreeSql.Cloud | 2,601 | examples/net40_tcc_saga/BuySagaUnit.cs | using FreeSql;
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace net60_tcc_saga
{
class SagaUnit1State
{
public int UserId { get; set; }
public int Point { get; set; }
public Guid BuyLogId { get; set; }
public int GoodsId { get; set; }
public Guid OrderId { get; set; }
}
[Description("第1步:数据库db1 扣除用户积分")]
class Saga1 : SagaUnit<SagaUnit1State>
{
public override void Commit()
{
var affrows = Orm.Update<User>()
.Set(a => a.Point - State.Point)
.Where(a => a.Id == State.UserId && a.Point >= State.Point)
.ExecuteAffrows();
if (affrows <= 0) throw new Exception("扣除积分失败");
//记录积分变动日志?
}
public override void Cancel()
{
Orm.Update<User>()
.Set(a => a.Point + State.Point)
.Where(a => a.Id == State.UserId)
.ExecuteAffrows(); //退还积分
//记录积分变动日志?
}
}
class SagaUnit2State
{
public int UserId { get; set; }
public int Point { get; set; }
public Guid BuyLogId { get; set; }
public int GoodsId { get; set; }
public Guid OrderId { get; set; }
}
[Description("第2步:数据库db2 扣除库存")]
class Saga2 : SagaUnit<SagaUnit2State>
{
public override void Commit()
{
var affrows = Orm.Update<Goods>()
.Set(a => a.Stock - 1)
.Where(a => a.Id == State.GoodsId && a.Stock >= 1)
.ExecuteAffrows();
if (affrows <= 0) throw new Exception("扣除库存失败");
}
public override void Cancel()
{
Orm.Update<Goods>()
.Set(a => a.Stock + 1)
.Where(a => a.Id == State.GoodsId)
.ExecuteAffrows(); //退还库存
}
}
class SagaUnit3State
{
public int UserId { get; set; }
public int Point { get; set; }
public Guid BuyLogId { get; set; }
public int GoodsId { get; set; }
public Guid OrderId { get; set; }
}
[Description("第3步:数据库db3 创建订单")]
class Saga3 : SagaUnit<SagaUnit3State>
{
public override void Commit()
{
Orm.Insert(new Order { Id = State.OrderId, Status = Order.OrderStatus.Success, CreateTime = DateTime.Now })
.ExecuteAffrows();
}
public override void Cancel()
{
}
}
}
|
2881099/FreeSql.Cloud | 2,710 | examples/net40_tcc_saga/net40_tcc_saga.csproj | <?xml version="1.0" encoding="utf-8"?>
<Project ToolsVersion="15.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<Import Project="$(MSBuildExtensionsPath)\$(MSBuildToolsVersion)\Microsoft.Common.props" Condition="Exists('$(MSBuildExtensionsPath)\$(MSBuildToolsVersion)\Microsoft.Common.props')" />
<PropertyGroup>
<Configuration Condition=" '$(Configuration)' == '' ">Debug</Configuration>
<Platform Condition=" '$(Platform)' == '' ">AnyCPU</Platform>
<ProjectGuid>{5DE53692-4AE5-4DA5-B2ED-2FA9AEA06AA0}</ProjectGuid>
<OutputType>Exe</OutputType>
<RootNamespace>net40_tcc_saga</RootNamespace>
<AssemblyName>net40_tcc_saga</AssemblyName>
<TargetFrameworkVersion>v4.0</TargetFrameworkVersion>
<FileAlignment>512</FileAlignment>
<Deterministic>true</Deterministic>
</PropertyGroup>
<PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Debug|AnyCPU' ">
<PlatformTarget>AnyCPU</PlatformTarget>
<DebugSymbols>true</DebugSymbols>
<DebugType>full</DebugType>
<Optimize>false</Optimize>
<OutputPath>bin\Debug\</OutputPath>
<DefineConstants>DEBUG;TRACE</DefineConstants>
<ErrorReport>prompt</ErrorReport>
<WarningLevel>4</WarningLevel>
</PropertyGroup>
<PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Release|AnyCPU' ">
<PlatformTarget>AnyCPU</PlatformTarget>
<DebugType>pdbonly</DebugType>
<Optimize>true</Optimize>
<OutputPath>bin\Release\</OutputPath>
<DefineConstants>TRACE</DefineConstants>
<ErrorReport>prompt</ErrorReport>
<WarningLevel>4</WarningLevel>
</PropertyGroup>
<ItemGroup>
<Reference Include="System" />
<Reference Include="System.Core" />
<Reference Include="System.Xml.Linq" />
<Reference Include="System.Data.DataSetExtensions" />
<Reference Include="Microsoft.CSharp" />
<Reference Include="System.Data" />
<Reference Include="System.Xml" />
</ItemGroup>
<ItemGroup>
<Compile Include="BuySagaUnit.cs" />
<Compile Include="BuyTccUnit.cs" />
<Compile Include="DB.cs" />
<Compile Include="Entity.cs" />
<Compile Include="Program.cs" />
</ItemGroup>
<ItemGroup>
<PackageReference Include="FreeSql">
<Version>3.5.201</Version>
</PackageReference>
<PackageReference Include="FreeSql.Provider.Sqlite">
<Version>3.5.201</Version>
</PackageReference>
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\..\src\FreeSql.Cloud\FreeSql.Cloud.csproj">
<Project>{61c51d8c-d741-4100-bd28-7f79d3ef9142}</Project>
<Name>FreeSql.Cloud</Name>
</ProjectReference>
</ItemGroup>
<Import Project="$(MSBuildToolsPath)\Microsoft.CSharp.targets" />
</Project> |
2881099/FreeSql.Cloud | 3,009 | examples/net40_tcc_saga/BuyTccUnit.cs | using FreeSql;
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace net60_tcc_saga
{
class TccUnit1State
{
public int UserId { get; set; }
public int Point { get; set; }
public Guid BuyLogId { get; set; }
public int GoodsId { get; set; }
public Guid OrderId { get; set; }
}
[Description("第1步:数据库db1 扣除用户积分")]
class Tcc1 : TccUnit<TccUnit1State>
{
public override void Try()
{
var affrows = Orm.Update<User>()
.Set(a => a.Point - State.Point)
.Where(a => a.Id == State.UserId && a.Point >= State.Point)
.ExecuteAffrows();
if (affrows <= 0) throw new Exception("扣除积分失败");
//记录积分变动日志?
}
public override void Confirm()
{
}
public override void Cancel()
{
Orm.Update<User>()
.Set(a => a.Point + State.Point)
.Where(a => a.Id == State.UserId)
.ExecuteAffrows(); //退还积分
//记录积分变动日志?
}
}
class TccUnit2State
{
public int UserId { get; set; }
public int Point { get; set; }
public Guid BuyLogId { get; set; }
public int GoodsId { get; set; }
public Guid OrderId { get; set; }
}
[Description("第2步:数据库db2 扣除库存")]
class Tcc2 : TccUnit<TccUnit2State>
{
public override void Try()
{
var affrows = Orm.Update<Goods>()
.Set(a => a.Stock - 1)
.Where(a => a.Id == State.GoodsId && a.Stock >= 1)
.ExecuteAffrows();
if (affrows <= 0) throw new Exception("扣除库存失败");
}
public override void Confirm()
{
}
public override void Cancel()
{
Orm.Update<Goods>()
.Set(a => a.Stock + 1)
.Where(a => a.Id == State.GoodsId)
.ExecuteAffrows(); //退还库存
}
}
class TccUnit3State
{
public int UserId { get; set; }
public int Point { get; set; }
public Guid BuyLogId { get; set; }
public int GoodsId { get; set; }
public Guid OrderId { get; set; }
}
[Description("第3步:数据库db3 创建订单")]
class Tcc3 : TccUnit<TccUnit3State>
{
public override void Try()
{
Orm.Insert(new Order { Id = State.OrderId, Status = Order.OrderStatus.Pending, CreateTime = DateTime.Now })
.ExecuteAffrows();
}
public override void Confirm()
{
//幂等交付
Orm.Update<Order>()
.Set(a => a.Status == Order.OrderStatus.Success)
.Where(a => a.Id == State.OrderId && a.Status == Order.OrderStatus.Pending)
.ExecuteAffrows();
}
public override void Cancel()
{
}
}
}
|
2881099/FreeSql.Cloud | 1,918 | examples/net40_tcc_saga/Program.cs | using FreeSql;
using System;
using System.Threading.Tasks;
namespace net60_tcc_saga
{
class Program
{
static void Main(string[] args)
{
DB.Cloud.Insert(new User { Id = 1, Name = "testuser01", Point = 10 }).ExecuteAffrows();
DB.Cloud.Insert(new Goods { Id = 1, Title = "testgoods01", Stock = 0 }).ExecuteAffrows();
TestTcc();
TestSaga();
Console.ReadKey();
DB.Cloud.Dispose();
}
static void TestTcc()
{
var orderId = Guid.NewGuid();
DB.Cloud.StartTcc(orderId.ToString(), "支付购买TCC事务",
new TccOptions
{
MaxRetryCount = 10,
RetryInterval = TimeSpan.FromSeconds(10)
})
.Then<Tcc1>(DbEnum.db1, new TccUnit1State { UserId = 1, Point = 10, GoodsId = 1, OrderId = orderId })
.Then<Tcc2>(DbEnum.db2, new TccUnit2State { UserId = 1, Point = 10, GoodsId = 1, OrderId = orderId })
.Then<Tcc3>(DbEnum.db3, new TccUnit3State { UserId = 1, Point = 10, GoodsId = 1, OrderId = orderId })
.Execute();
}
static void TestSaga()
{
var orderId = Guid.NewGuid();
DB.Cloud.StartSaga(orderId.ToString(), "支付购买SAGA事务",
new SagaOptions
{
MaxRetryCount = 10,
RetryInterval = TimeSpan.FromSeconds(10)
})
.Then<Saga1>(DbEnum.db1, new SagaUnit1State { UserId = 1, Point = 10, GoodsId = 1, OrderId = orderId })
.Then<Saga2>(DbEnum.db2, new SagaUnit2State { UserId = 1, Point = 10, GoodsId = 1, OrderId = orderId })
.Then<Saga3>(DbEnum.db3, new SagaUnit3State { UserId = 1, Point = 10, GoodsId = 1, OrderId = orderId })
.Execute();
}
}
} |
2881099/FreeSql.Cloud | 2,227 | examples/net60_tcc_saga/DB.cs | using FreeSql;
using System;
namespace net60_tcc_saga
{
public enum DbEnum { db1, db2, db3 }
public static class DB
{
public static FreeSqlCloud<DbEnum> Cloud => cloudLazy.Value;
readonly static Lazy<FreeSqlCloud<DbEnum>> cloudLazy = new Lazy<FreeSqlCloud<DbEnum>>(() =>
{
var fsql = new FreeSqlCloud<DbEnum>("app001");
fsql.DistributeTrace += log => Console.WriteLine(log.Split('\n')[0].Trim());
fsql.Register(DbEnum.db1, () => new FreeSqlBuilder()
.UseConnectionString(DataType.Sqlite, @"Data Source=:memory:")
.UseAutoSyncStructure(true)
.Build());
fsql.Register(DbEnum.db2, () => new FreeSqlBuilder()
.UseConnectionString(DataType.Sqlite, @"Data Source=:memory:")
.UseAutoSyncStructure(true)
.Build());
fsql.Register(DbEnum.db3, () => new FreeSqlBuilder()
.UseConnectionString(DataType.Sqlite, @"Data Source=:memory:")
.UseAutoSyncStructure(true)
.Build());
fsql.EntitySteering = (_, e) =>
{
if (e.EntityType == typeof(User)) e.DBKey = DbEnum.db1;
else if (e.EntityType == typeof(Goods)) e.DBKey = DbEnum.db2;
else if (e.EntityType == typeof(Order)) e.DBKey = DbEnum.db3;
#region 另一种读写分离
//switch (e.MethodName)
//{
// case "Select":
// if (e.EntityType == typeof(Program)) ; //判断某一个实体类型
// if (e.DBKey == DbEnum.db1) //判断主库时
// {
// var dbkeyIndex = new Random().Next(0, e.AvailableDBKeys.Length);
// e.DBKey = e.AvailableDBKeys[dbkeyIndex]; //重新定向到其他 db
// }
// break;
// case "Insert":
// case "Update":
// case "Delete":
// case "InsertOrUpdate":
// break;
//}
#endregion
};
return fsql;
});
}
}
|
2881099/FreeSql.Cloud | 2,725 | examples/net60_tcc_saga/BuySagaUnit.cs | using FreeSql;
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace net60_tcc_saga
{
class SagaUnit1State
{
public int UserId { get; set; }
public int Point { get; set; }
public Guid BuyLogId { get; set; }
public int GoodsId { get; set; }
public Guid OrderId { get; set; }
}
[Description("第1步:数据库db1 扣除用户积分")]
class Saga1 : SagaUnit<SagaUnit1State>
{
public override async Task Commit()
{
var affrows = await Orm.Update<User>()
.Set(a => a.Point - State.Point)
.Where(a => a.Id == State.UserId && a.Point >= State.Point)
.ExecuteAffrowsAsync();
if (affrows <= 0) throw new Exception("扣除积分失败");
//记录积分变动日志?
}
public override async Task Cancel()
{
await Orm.Update<User>()
.Set(a => a.Point + State.Point)
.Where(a => a.Id == State.UserId)
.ExecuteAffrowsAsync(); //退还积分
//记录积分变动日志?
}
}
class SagaUnit2State
{
public int UserId { get; set; }
public int Point { get; set; }
public Guid BuyLogId { get; set; }
public int GoodsId { get; set; }
public Guid OrderId { get; set; }
}
[Description("第2步:数据库db2 扣除库存")]
class Saga2 : SagaUnit<SagaUnit2State>
{
public override async Task Commit()
{
var affrows = await Orm.Update<Goods>()
.Set(a => a.Stock - 1)
.Where(a => a.Id == State.GoodsId && a.Stock >= 1)
.ExecuteAffrowsAsync();
if (affrows <= 0) throw new Exception("扣除库存失败");
}
public override async Task Cancel()
{
await Orm.Update<Goods>()
.Set(a => a.Stock + 1)
.Where(a => a.Id == State.GoodsId)
.ExecuteAffrowsAsync(); //退还库存
}
}
class SagaUnit3State
{
public int UserId { get; set; }
public int Point { get; set; }
public Guid BuyLogId { get; set; }
public int GoodsId { get; set; }
public Guid OrderId { get; set; }
}
[Description("第3步:数据库db3 创建订单")]
class Saga3 : SagaUnit<SagaUnit3State>
{
public override async Task Commit()
{
await Orm.Insert(new Order { Id = State.OrderId, Status = Order.OrderStatus.Success, CreateTime = DateTime.Now })
.ExecuteAffrowsAsync();
}
public override Task Cancel()
{
return Task.CompletedTask;
}
}
}
|
2881099/FreeSql.Cloud | 3,228 | examples/net60_tcc_saga/BuyTccUnit.cs | using FreeSql;
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace net60_tcc_saga
{
class TccUnit1State
{
public int UserId { get; set; }
public int Point { get; set; }
public Guid BuyLogId { get; set; }
public int GoodsId { get; set; }
public Guid OrderId { get; set; }
}
[Description("第1步:数据库db1 扣除用户积分")]
class Tcc1 : TccUnit<TccUnit1State>
{
public override async Task Try()
{
var affrows = await Orm.Update<User>()
.Set(a => a.Point - State.Point)
.Where(a => a.Id == State.UserId && a.Point >= State.Point)
.ExecuteAffrowsAsync();
if (affrows <= 0) throw new Exception("扣除积分失败");
//记录积分变动日志?
}
public override Task Confirm()
{
return Task.CompletedTask;
}
public override async Task Cancel()
{
await Orm.Update<User>()
.Set(a => a.Point + State.Point)
.Where(a => a.Id == State.UserId)
.ExecuteAffrowsAsync(); //退还积分
//记录积分变动日志?
}
}
class TccUnit2State
{
public int UserId { get; set; }
public int Point { get; set; }
public Guid BuyLogId { get; set; }
public int GoodsId { get; set; }
public Guid OrderId { get; set; }
}
[Description("第2步:数据库db2 扣除库存")]
class Tcc2 : TccUnit<TccUnit2State>
{
public override async Task Try()
{
var affrows = await Orm.Update<Goods>()
.Set(a => a.Stock - 1)
.Where(a => a.Id == State.GoodsId && a.Stock >= 1)
.ExecuteAffrowsAsync();
if (affrows <= 0) throw new Exception("扣除库存失败");
}
public override Task Confirm()
{
return Task.CompletedTask;
}
public override async Task Cancel()
{
await Orm.Update<Goods>()
.Set(a => a.Stock + 1)
.Where(a => a.Id == State.GoodsId)
.ExecuteAffrowsAsync(); //退还库存
}
}
class TccUnit3State
{
public int UserId { get; set; }
public int Point { get; set; }
public Guid BuyLogId { get; set; }
public int GoodsId { get; set; }
public Guid OrderId { get; set; }
}
[Description("第3步:数据库db3 创建订单")]
class Tcc3 : TccUnit<TccUnit3State>
{
public override async Task Try()
{
await Orm.Insert(new Order { Id = State.OrderId, Status = Order.OrderStatus.Pending, CreateTime = DateTime.Now })
.ExecuteAffrowsAsync();
}
public override async Task Confirm()
{
//幂等交付
await Orm.Update<Order>()
.Set(a => a.Status == Order.OrderStatus.Success)
.Where(a => a.Id == State.OrderId && a.Status == Order.OrderStatus.Pending)
.ExecuteAffrowsAsync();
}
public override Task Cancel()
{
return Task.CompletedTask;
}
}
}
|
2881099/FreeSql.Cloud | 1,970 | examples/net60_tcc_saga/Program.cs | using FreeSql;
using System;
using System.Threading.Tasks;
namespace net60_tcc_saga
{
class Program
{
async static Task Main(string[] args)
{
DB.Cloud.Insert(new User { Id = 1, Name = "testuser01", Point = 10 }).ExecuteAffrows();
DB.Cloud.Insert(new Goods { Id = 1, Title = "testgoods01", Stock = 0 }).ExecuteAffrows();
await TestTcc();
await TestSaga();
Console.ReadKey();
DB.Cloud.Dispose();
}
async static Task TestTcc()
{
var orderId = Guid.NewGuid();
await DB.Cloud.StartTcc(orderId.ToString(), "支付购买TCC事务",
new TccOptions
{
MaxRetryCount = 10,
RetryInterval = TimeSpan.FromSeconds(10)
})
.Then<Tcc1>(DbEnum.db1, new TccUnit1State { UserId = 1, Point = 10, GoodsId = 1, OrderId = orderId })
.Then<Tcc2>(DbEnum.db2, new TccUnit2State { UserId = 1, Point = 10, GoodsId = 1, OrderId = orderId })
.Then<Tcc3>(DbEnum.db3, new TccUnit3State { UserId = 1, Point = 10, GoodsId = 1, OrderId = orderId })
.ExecuteAsync();
}
async static Task TestSaga()
{
var orderId = Guid.NewGuid();
await DB.Cloud.StartSaga(orderId.ToString(), "支付购买SAGA事务",
new SagaOptions
{
MaxRetryCount = 10,
RetryInterval = TimeSpan.FromSeconds(10)
})
.Then<Saga1>(DbEnum.db1, new SagaUnit1State { UserId = 1, Point = 10, GoodsId = 1, OrderId = orderId })
.Then<Saga2>(DbEnum.db2, new SagaUnit2State { UserId = 1, Point = 10, GoodsId = 1, OrderId = orderId })
.Then<Saga3>(DbEnum.db3, new SagaUnit3State { UserId = 1, Point = 10, GoodsId = 1, OrderId = orderId })
.ExecuteAsync();
}
}
} |
2881099/FreeSql.Cloud | 2,410 | examples/ConsoleApp45/ConsoleApp45.csproj | <?xml version="1.0" encoding="utf-8"?>
<Project ToolsVersion="15.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<Import Project="$(MSBuildExtensionsPath)\$(MSBuildToolsVersion)\Microsoft.Common.props" Condition="Exists('$(MSBuildExtensionsPath)\$(MSBuildToolsVersion)\Microsoft.Common.props')" />
<PropertyGroup>
<Configuration Condition=" '$(Configuration)' == '' ">Debug</Configuration>
<Platform Condition=" '$(Platform)' == '' ">AnyCPU</Platform>
<ProjectGuid>{D6601BFC-DD3A-42EC-AC97-216A1BA34A14}</ProjectGuid>
<OutputType>Exe</OutputType>
<RootNamespace>ConsoleApp45</RootNamespace>
<AssemblyName>ConsoleApp45</AssemblyName>
<TargetFrameworkVersion>v4.5</TargetFrameworkVersion>
<FileAlignment>512</FileAlignment>
<Deterministic>true</Deterministic>
<TargetFrameworkProfile />
</PropertyGroup>
<PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Debug|AnyCPU' ">
<PlatformTarget>AnyCPU</PlatformTarget>
<DebugSymbols>true</DebugSymbols>
<DebugType>full</DebugType>
<Optimize>false</Optimize>
<OutputPath>bin\Debug\</OutputPath>
<DefineConstants>DEBUG;TRACE</DefineConstants>
<ErrorReport>prompt</ErrorReport>
<WarningLevel>4</WarningLevel>
</PropertyGroup>
<PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Release|AnyCPU' ">
<PlatformTarget>AnyCPU</PlatformTarget>
<DebugType>pdbonly</DebugType>
<Optimize>true</Optimize>
<OutputPath>bin\Release\</OutputPath>
<DefineConstants>TRACE</DefineConstants>
<ErrorReport>prompt</ErrorReport>
<WarningLevel>4</WarningLevel>
</PropertyGroup>
<ItemGroup>
<Reference Include="System" />
<Reference Include="System.Core" />
<Reference Include="System.Xml.Linq" />
<Reference Include="System.Data.DataSetExtensions" />
<Reference Include="Microsoft.CSharp" />
<Reference Include="System.Data" />
<Reference Include="System.Net.Http" />
<Reference Include="System.Xml" />
</ItemGroup>
<ItemGroup>
<Compile Include="Program.cs" />
<Compile Include="Properties\AssemblyInfo.cs" />
</ItemGroup>
<ItemGroup>
<None Include="App.config" />
</ItemGroup>
<ItemGroup>
<PackageReference Include="FreeSql.Cloud">
<Version>1.9.0</Version>
</PackageReference>
</ItemGroup>
<Import Project="$(MSBuildToolsPath)\Microsoft.CSharp.targets" />
</Project> |
2881099/FreeSql.Cloud | 2,227 | examples/netcore31_tcc_saga/DB.cs | using FreeSql;
using System;
namespace net60_tcc_saga
{
public enum DbEnum { db1, db2, db3 }
public static class DB
{
public static FreeSqlCloud<DbEnum> Cloud => cloudLazy.Value;
readonly static Lazy<FreeSqlCloud<DbEnum>> cloudLazy = new Lazy<FreeSqlCloud<DbEnum>>(() =>
{
var fsql = new FreeSqlCloud<DbEnum>("app001");
fsql.DistributeTrace += log => Console.WriteLine(log.Split('\n')[0].Trim());
fsql.Register(DbEnum.db1, () => new FreeSqlBuilder()
.UseConnectionString(DataType.Sqlite, @"Data Source=:memory:")
.UseAutoSyncStructure(true)
.Build());
fsql.Register(DbEnum.db2, () => new FreeSqlBuilder()
.UseConnectionString(DataType.Sqlite, @"Data Source=:memory:")
.UseAutoSyncStructure(true)
.Build());
fsql.Register(DbEnum.db3, () => new FreeSqlBuilder()
.UseConnectionString(DataType.Sqlite, @"Data Source=:memory:")
.UseAutoSyncStructure(true)
.Build());
fsql.EntitySteering = (_, e) =>
{
if (e.EntityType == typeof(User)) e.DBKey = DbEnum.db1;
else if (e.EntityType == typeof(Goods)) e.DBKey = DbEnum.db2;
else if (e.EntityType == typeof(Order)) e.DBKey = DbEnum.db3;
#region 另一种读写分离
//switch (e.MethodName)
//{
// case "Select":
// if (e.EntityType == typeof(Program)) ; //判断某一个实体类型
// if (e.DBKey == DbEnum.db1) //判断主库时
// {
// var dbkeyIndex = new Random().Next(0, e.AvailableDBKeys.Length);
// e.DBKey = e.AvailableDBKeys[dbkeyIndex]; //重新定向到其他 db
// }
// break;
// case "Insert":
// case "Update":
// case "Delete":
// case "InsertOrUpdate":
// break;
//}
#endregion
};
return fsql;
});
}
}
|
2881099/FreeSql.Cloud | 2,725 | examples/netcore31_tcc_saga/BuySagaUnit.cs | using FreeSql;
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace net60_tcc_saga
{
class SagaUnit1State
{
public int UserId { get; set; }
public int Point { get; set; }
public Guid BuyLogId { get; set; }
public int GoodsId { get; set; }
public Guid OrderId { get; set; }
}
[Description("第1步:数据库db1 扣除用户积分")]
class Saga1 : SagaUnit<SagaUnit1State>
{
public override async Task Commit()
{
var affrows = await Orm.Update<User>()
.Set(a => a.Point - State.Point)
.Where(a => a.Id == State.UserId && a.Point >= State.Point)
.ExecuteAffrowsAsync();
if (affrows <= 0) throw new Exception("扣除积分失败");
//记录积分变动日志?
}
public override async Task Cancel()
{
await Orm.Update<User>()
.Set(a => a.Point + State.Point)
.Where(a => a.Id == State.UserId)
.ExecuteAffrowsAsync(); //退还积分
//记录积分变动日志?
}
}
class SagaUnit2State
{
public int UserId { get; set; }
public int Point { get; set; }
public Guid BuyLogId { get; set; }
public int GoodsId { get; set; }
public Guid OrderId { get; set; }
}
[Description("第2步:数据库db2 扣除库存")]
class Saga2 : SagaUnit<SagaUnit2State>
{
public override async Task Commit()
{
var affrows = await Orm.Update<Goods>()
.Set(a => a.Stock - 1)
.Where(a => a.Id == State.GoodsId && a.Stock >= 1)
.ExecuteAffrowsAsync();
if (affrows <= 0) throw new Exception("扣除库存失败");
}
public override async Task Cancel()
{
await Orm.Update<Goods>()
.Set(a => a.Stock + 1)
.Where(a => a.Id == State.GoodsId)
.ExecuteAffrowsAsync(); //退还库存
}
}
class SagaUnit3State
{
public int UserId { get; set; }
public int Point { get; set; }
public Guid BuyLogId { get; set; }
public int GoodsId { get; set; }
public Guid OrderId { get; set; }
}
[Description("第3步:数据库db3 创建订单")]
class Saga3 : SagaUnit<SagaUnit3State>
{
public override async Task Commit()
{
await Orm.Insert(new Order { Id = State.OrderId, Status = Order.OrderStatus.Success, CreateTime = DateTime.Now })
.ExecuteAffrowsAsync();
}
public override Task Cancel()
{
return Task.CompletedTask;
}
}
}
|
2881099/FreeSql.Cloud | 3,228 | examples/netcore31_tcc_saga/BuyTccUnit.cs | using FreeSql;
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace net60_tcc_saga
{
class TccUnit1State
{
public int UserId { get; set; }
public int Point { get; set; }
public Guid BuyLogId { get; set; }
public int GoodsId { get; set; }
public Guid OrderId { get; set; }
}
[Description("第1步:数据库db1 扣除用户积分")]
class Tcc1 : TccUnit<TccUnit1State>
{
public override async Task Try()
{
var affrows = await Orm.Update<User>()
.Set(a => a.Point - State.Point)
.Where(a => a.Id == State.UserId && a.Point >= State.Point)
.ExecuteAffrowsAsync();
if (affrows <= 0) throw new Exception("扣除积分失败");
//记录积分变动日志?
}
public override Task Confirm()
{
return Task.CompletedTask;
}
public override async Task Cancel()
{
await Orm.Update<User>()
.Set(a => a.Point + State.Point)
.Where(a => a.Id == State.UserId)
.ExecuteAffrowsAsync(); //退还积分
//记录积分变动日志?
}
}
class TccUnit2State
{
public int UserId { get; set; }
public int Point { get; set; }
public Guid BuyLogId { get; set; }
public int GoodsId { get; set; }
public Guid OrderId { get; set; }
}
[Description("第2步:数据库db2 扣除库存")]
class Tcc2 : TccUnit<TccUnit2State>
{
public override async Task Try()
{
var affrows = await Orm.Update<Goods>()
.Set(a => a.Stock - 1)
.Where(a => a.Id == State.GoodsId && a.Stock >= 1)
.ExecuteAffrowsAsync();
if (affrows <= 0) throw new Exception("扣除库存失败");
}
public override Task Confirm()
{
return Task.CompletedTask;
}
public override async Task Cancel()
{
await Orm.Update<Goods>()
.Set(a => a.Stock + 1)
.Where(a => a.Id == State.GoodsId)
.ExecuteAffrowsAsync(); //退还库存
}
}
class TccUnit3State
{
public int UserId { get; set; }
public int Point { get; set; }
public Guid BuyLogId { get; set; }
public int GoodsId { get; set; }
public Guid OrderId { get; set; }
}
[Description("第3步:数据库db3 创建订单")]
class Tcc3 : TccUnit<TccUnit3State>
{
public override async Task Try()
{
await Orm.Insert(new Order { Id = State.OrderId, Status = Order.OrderStatus.Pending, CreateTime = DateTime.Now })
.ExecuteAffrowsAsync();
}
public override async Task Confirm()
{
//幂等交付
await Orm.Update<Order>()
.Set(a => a.Status == Order.OrderStatus.Success)
.Where(a => a.Id == State.OrderId && a.Status == Order.OrderStatus.Pending)
.ExecuteAffrowsAsync();
}
public override Task Cancel()
{
return Task.CompletedTask;
}
}
}
|
2881099/FreeSql.Cloud | 5,497 | examples/netcore31_tcc_saga/Program.cs | using FreeSql;
using System;
using System.Threading.Tasks;
namespace net60_tcc_saga
{
class Program
{
async static Task Main(string[] args)
{
DB.Cloud.Insert(new User { Id = 1, Name = "testuser01", Point = 10 }).ExecuteAffrows();
DB.Cloud.Insert(new Goods { Id = 1, Title = "testgoods01", Stock = 0 }).ExecuteAffrows();
await TestTcc();
await TestSaga();
//await TestHttpTcc();
//await TestHttpSaga();
Console.ReadKey();
DB.Cloud.Dispose();
}
async static Task TestTcc()
{
var orderId = Guid.NewGuid();
await DB.Cloud.StartTcc(orderId.ToString(), "支付购买TCC事务",
new TccOptions
{
MaxRetryCount = 10,
RetryInterval = TimeSpan.FromSeconds(10)
})
.Then<Tcc1>(DbEnum.db1, new TccUnit1State { UserId = 1, Point = 10, GoodsId = 1, OrderId = orderId })
.Then<Tcc2>(DbEnum.db2, new TccUnit2State { UserId = 1, Point = 10, GoodsId = 1, OrderId = orderId })
.Then<Tcc3>(DbEnum.db3, new TccUnit3State { UserId = 1, Point = 10, GoodsId = 1, OrderId = orderId })
.ExecuteAsync();
}
async static Task TestSaga()
{
var orderId = Guid.NewGuid();
await DB.Cloud.StartSaga(orderId.ToString(), "支付购买SAGA事务",
new SagaOptions
{
MaxRetryCount = 10,
RetryInterval = TimeSpan.FromSeconds(10)
})
.Then<Saga1>(DbEnum.db1, new SagaUnit1State { UserId = 1, Point = 10, GoodsId = 1, OrderId = orderId })
.Then<Saga2>(DbEnum.db2, new SagaUnit2State { UserId = 1, Point = 10, GoodsId = 1, OrderId = orderId })
.Then<Saga3>(DbEnum.db3, new SagaUnit3State { UserId = 1, Point = 10, GoodsId = 1, OrderId = orderId })
.ExecuteAsync();
}
async static Task TestHttpSaga()
{
var orderId = Guid.NewGuid();
await DB.Cloud.StartSaga(orderId.ToString(), "支付购买webapi(saga)",
new SagaOptions
{
MaxRetryCount = 10,
RetryInterval = TimeSpan.FromSeconds(10)
})
.Then<HttpSaga>(default, new HttpUnitState
{
Url = "https://192.168.1.100/saga/UserPoint",
Data = "UserId=1&Point=10&GoodsId=1&OrderId=" + orderId
})
.Then<HttpSaga>(default, new HttpUnitState
{
Url = "https://192.168.1.100/saga/GoodsStock",
Data = "UserId=1&Point=10&GoodsId=1&OrderId=" + orderId
})
.Then<HttpSaga>(default, new HttpUnitState
{
Url = "https://192.168.1.100/saga/OrderNew",
Data = "UserId=1&Point=10&GoodsId=1&OrderId=" + orderId
})
.ExecuteAsync();
}
class HttpSaga : SagaUnit<HttpUnitState>
{
public override Task Commit()
{
//Console.WriteLine("请求 webapi:" + State.Url + "/Commit" + State.Data);
return Task.CompletedTask;
}
public override Task Cancel()
{
//Console.WriteLine("请求 webapi:" + State.Url + "/Cancel" + State.Data);
return Task.CompletedTask;
}
}
async static Task TestHttpTcc()
{
var orderId = Guid.NewGuid();
await DB.Cloud.StartTcc(orderId.ToString(), "支付购买webapi",
new TccOptions
{
MaxRetryCount = 10,
RetryInterval = TimeSpan.FromSeconds(10)
})
.Then<HttpTcc>(default, new HttpUnitState
{
Url = "https://192.168.1.100/tcc/UserPoint",
Data = "UserId=1&Point=10&GoodsId=1&OrderId=" + orderId
})
.Then<HttpTcc>(default, new HttpUnitState
{
Url = "https://192.168.1.100/tcc/GoodsStock",
Data = "UserId=1&Point=10&GoodsId=1&OrderId=" + orderId
})
.Then<HttpTcc>(default, new HttpUnitState
{
Url = "https://192.168.1.100/tcc/OrderNew",
Data = "UserId=1&Point=10&GoodsId=1&OrderId=" + orderId
})
.ExecuteAsync();
}
class HttpTcc : TccUnit<HttpUnitState>
{
public override Task Try()
{
//Console.WriteLine("请求 webapi:" + State.Url + "/Try" + State.Data);
return Task.CompletedTask;
}
public override Task Confirm()
{
//Console.WriteLine("请求 webapi:" + State.Url + "/Confirm" + State.Data);
return Task.CompletedTask;
}
public override Task Cancel()
{
//Console.WriteLine("请求 webapi:" + State.Url + "/Cancel" + State.Data);
return Task.CompletedTask;
}
}
class HttpUnitState
{
public string Url { get; set; }
public string Data { get; set; }
}
}
} |
2881099/FreeSql.Cloud | 939 | examples/ConsoleApp45/Properties/AssemblyInfo.cs | using System.Reflection;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
// 有关程序集的一般信息由以下
// 控制。更改这些特性值可修改
// 与程序集关联的信息。
[assembly: AssemblyTitle("ConsoleApp45")]
[assembly: AssemblyDescription("")]
[assembly: AssemblyConfiguration("")]
[assembly: AssemblyCompany("")]
[assembly: AssemblyProduct("ConsoleApp45")]
[assembly: AssemblyCopyright("Copyright © 2024")]
[assembly: AssemblyTrademark("")]
[assembly: AssemblyCulture("")]
// 将 ComVisible 设置为 false 会使此程序集中的类型
//对 COM 组件不可见。如果需要从 COM 访问此程序集中的类型
//请将此类型的 ComVisible 特性设置为 true。
[assembly: ComVisible(false)]
// 如果此项目向 COM 公开,则下列 GUID 用于类型库的 ID
[assembly: Guid("d6601bfc-dd3a-42ec-ac97-216a1ba34a14")]
// 程序集的版本信息由下列四个值组成:
//
// 主版本
// 次版本
// 生成号
// 修订号
//
//可以指定所有这些值,也可以使用“生成号”和“修订号”的默认值
//通过使用 "*",如下所示:
// [assembly: AssemblyVersion("1.0.*")]
[assembly: AssemblyVersion("1.0.0.0")]
[assembly: AssemblyFileVersion("1.0.0.0")]
|
2881099/FreeSql.Cloud | 1,540 | src/FreeSql.Cloud/FreeSql.Cloud.csproj | <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<TargetFrameworks>netstandard20;net461;net40</TargetFrameworks>
<Version>2.0.1</Version>
<GeneratePackageOnBuild>true</GeneratePackageOnBuild>
<Authors>FreeSql;ncc;YeXiangQin</Authors>
<Description>提供跨数据库访问,分布式事务TCC、SAGA解决方案,支持 .NET Core 2.1+, .NET Framework 4.0+.</Description>
<PackageProjectUrl>https://github.com/2881099/FreeSql.Cloud</PackageProjectUrl>
<RepositoryUrl>https://github.com/2881099/FreeSql.Cloud</RepositoryUrl>
<RepositoryType>git</RepositoryType>
<PackageLicenseExpression>MIT</PackageLicenseExpression>
<PackageTags>FreeSql;TCC;SAGA;Distributed;Transaction</PackageTags>
<PackageId>$(AssemblyName)</PackageId>
<Title>$(AssemblyName)</Title>
<IsPackable>true</IsPackable>
<GenerateAssemblyInfo>true</GenerateAssemblyInfo>
<SignAssembly>true</SignAssembly>
<AssemblyOriginatorKeyFile>key.snk</AssemblyOriginatorKeyFile>
<DelaySign>false</DelaySign>
<PackageReadmeFile>readme.md</PackageReadmeFile>
</PropertyGroup>
<ItemGroup>
<None Include="../../readme.md" Pack="true" PackagePath="\" />
</ItemGroup>
<ItemGroup>
<PackageReference Include="FreeScheduler" Version="2.0.33" />
<PackageReference Include="FreeSql.DbContext" Version="3.5.201" />
<PackageReference Include="IdleBus" Version="1.5.3" />
<PackageReference Include="Newtonsoft.Json" Version="13.0.1" />
</ItemGroup>
<PropertyGroup Condition="'$(TargetFramework)' == 'net40'">
<DefineConstants>net40</DefineConstants>
</PropertyGroup>
</Project>
|
2881099/FreeSql.Cloud | 12,935 | src/FreeSql.Cloud/FreeSqlCloud.cs | using FreeSql.Cloud.Abstract;
using FreeSql.Cloud.Model;
using FreeSql.Cloud.Saga;
using FreeSql.Cloud.Tcc;
using FreeSql.Internal;
using System;
using System.Collections.Generic;
using System.Data;
namespace FreeSql
{
//public class FreeSqlCloud : FreeSqlCloud<string> { }
public partial class FreeSqlCloud<TDBKey> : FreeSqlCloudBase, IFreeSql
{
#if !net40
internal override string GetDBKey() => _dbkey.ToInvariantCultureToString();
public override IFreeSql Change(DBKeyString dbkey) => Change((TDBKey)typeof(TDBKey).FromObject(dbkey?.ToString()));
#endif
public override IFreeSql Use(DBKeyString dbkey) => Use((TDBKey)typeof(TDBKey).FromObject(dbkey?.ToString()));
public string DistributeKey { get; }
public Action<string> DistributeTrace;
#region EntitySteering
/// <summary>
/// 实体类型转向配置,如 User -> db2,之后直接使用 fsqlc.Select<User>(),而不需要 fsqlc.Change("db2").Select<User>()
/// </summary>
public Action<object, EntitySteeringEventArgs> EntitySteering;
public class EntitySteeringEventArgs
{
public string MethodName { get; internal set; }
public Type EntityType { get; internal set; }
/// <summary>
/// 可用的目标DBKey
/// </summary>
public TDBKey[] AvailableDBKeys { get; internal set; }
internal bool _dbkeyChanged;
internal TDBKey _dbkey;
/// <summary>
/// 转向的目标DBKey,重写该值可转向到其他目标DBKey
/// </summary>
public TDBKey DBKey
{
get => _dbkey;
set
{
_dbkeyChanged = true;
_dbkey = value;
}
}
}
#endregion
internal TDBKey _dbkeyMaster;
internal AsyncLocalAccessor<TDBKey> _dbkeyCurrent;
internal TDBKey _dbkey
{
get
{
var val = _dbkeyCurrent.Value;
if (typeof(TDBKey) == typeof(string) && val == null) return _dbkeyMaster;
return val;
}
}
internal IFreeSql _ormMaster => _ib.Get(_dbkeyMaster);
internal IFreeSql _ormCurrent => _ib.Get(_dbkey);
internal IdleBus<TDBKey, IFreeSql> _ib;
internal FreeScheduler.Scheduler _scheduler;
internal bool _distributeTraceEnable => DistributeTrace != null;
internal void _distributedTraceCall(string log)
{
DistributeTrace?.Invoke($"{DateTime.UtcNow.ToString("yyyy-MM-dd HH:mm:ss")} 【{(DistributeKey ?? "FreeSql.Cloud")}】{log}");
}
public FreeSqlCloud() : this(null) { }
public FreeSqlCloud(string distributeKey)
{
DistributeKey = distributeKey?.Trim();
if (string.IsNullOrWhiteSpace(DistributeKey)) DistributeKey = null;
_ib = new IdleBus<TDBKey, IFreeSql>(TimeSpan.FromMinutes(3));
_ib.Notice += (_, __) => { };
_dbkeyCurrent = new AsyncLocalAccessor<TDBKey>(() =>
{
if (typeof(TDBKey) == typeof(string) && _dbkeyMaster == null) return (TDBKey)typeof(TDBKey).FromObject("");
return _dbkeyMaster;
});
}
#if !net40
/// <summary>
/// 切换数据库(同一线程,或异步await 后续操作有效)<para></para>
/// 注意:单次有效请使用 Use(dbkey)
/// </summary>
/// <param name="dbkey"></param>
/// <returns></returns>
public IFreeSql Change(TDBKey dbkey)
{
var oldkey = _dbkey;
if (_distributeTraceEnable && object.Equals(dbkey, oldkey) == false) _distributedTraceCall($"数据库切换[Change] {oldkey} -> {dbkey}");
_dbkeyCurrent.Value = dbkey;
return new FreeSqlCloundSnapshot<TDBKey>(this, dbkey, () => _dbkeyCurrent.Value = oldkey);
}
#endif
/// <summary>
/// 临时使用数据库(单次有效)
/// </summary>
/// <param name="dbkey"></param>
/// <returns></returns>
public IFreeSql Use(TDBKey dbkey)
{
var oldkey = _dbkey;
if (_distributeTraceEnable && object.Equals(dbkey, oldkey) == false) _distributedTraceCall($"数据库使用[Use] {dbkey}");
return new FreeSqlCloundSnapshot<TDBKey>(this, dbkey, null);
}
internal IFreeSql GetBySnapshot(TDBKey dbkey)
{
return _ib.Get(dbkey);
}
public bool RemoveRegister(TDBKey dbkey, bool now) => _ib.TryRemove(dbkey, now);
public bool ExistsRegister(TDBKey dbkey) => _ib.Exists(dbkey);
public FreeSqlCloud<TDBKey> Register(TDBKey dbkey, Func<IFreeSql> create, TimeSpan? idle = null)
{
if (idle == null || idle <= TimeSpan.Zero) idle = TimeSpan.FromMinutes(3);
if (_ib.TryRegister(dbkey, create, idle.Value))
{
if (!string.IsNullOrWhiteSpace(DistributeKey))
{
var orm = _ib.Get(dbkey);
orm.Aop.ConfigEntity += (_, e) =>
{
if (e.EntityType == typeof(UnitInvokedInfo)) e.ModifyResult.Name = $"unit_invoked_{DistributeKey}";
};
orm.CodeFirst.SyncStructure<UnitInvokedInfo>(); //StartTcc(tid).Then<TccUnit1>(DbEnum.db2, null) 幂等判断表
//orm.CodeFirst.ConfigEntity<SagaUnitInvokeInfo>(a => a.Name($"saga_{DistributeKey}_unit_invoke"));
//orm.CodeFirst.SyncStructure<SagaUnitInvokeInfo>();
}
if (_ib.GetKeys().Length == 1)
{
_dbkeyMaster = dbkey;
_dbkeyCurrent.Value = dbkey;
if (!string.IsNullOrWhiteSpace(DistributeKey))
{
if (_distributeTraceEnable) _distributedTraceCall($"{dbkey} 注册成功, 并存储 TCC/SAGA 事务相关数据");
_scheduler = new FreeSchedulerBuilder()
.OnExecuting(task => { })
.Build();
_ormMaster.Aop.ConfigEntity += (_, e) =>
{
if (e.EntityType == typeof(TccMasterInfo)) e.ModifyResult.Name = $"tcc_{DistributeKey}";
if (e.EntityType == typeof(TccUnitInfo)) e.ModifyResult.Name = $"tcc_{DistributeKey}_unit";
if (e.EntityType == typeof(SagaMasterInfo)) e.ModifyResult.Name = $"saga_{DistributeKey}";
if (e.EntityType == typeof(SagaUnitInfo)) e.ModifyResult.Name = $"saga_{DistributeKey}_unit";
};
_ormMaster.CodeFirst.SyncStructure<TccMasterInfo>();
_ormMaster.CodeFirst.SyncStructure<TccUnitInfo>();
_ormMaster.CodeFirst.SyncStructure<SagaMasterInfo>();
_ormMaster.CodeFirst.SyncStructure<SagaUnitInfo>();
#region 加载历史未未成 TCC 事务
var tccPendings = _ormMaster.Select<TccMasterInfo>()
.Where(a => a.Status == TccMasterStatus.Pending && a.RetryCount < a.MaxRetryCount)
.OrderBy(a => a.CreateTime)
.ToList();
foreach (var pending in tccPendings)
_scheduler.AddTempTask(TimeSpan.FromSeconds(pending.RetryInterval), TccMaster<TDBKey>.GetTempTask(this, pending.Tid, pending.Title, pending.RetryInterval));
if (_distributeTraceEnable) _distributedTraceCall($"成功加载历史未完成 TCC 事务 {tccPendings.Count} 个");
#endregion
#region 加载历史未未成 SAGA 事务
var sagaPendings = _ormMaster.Select<SagaMasterInfo>()
.Where(a => a.Status == SagaMasterStatus.Pending && a.RetryCount < a.MaxRetryCount)
.OrderBy(a => a.CreateTime)
.ToList();
foreach (var pending in sagaPendings)
_scheduler.AddTempTask(TimeSpan.FromSeconds(pending.RetryInterval), SagaMaster<TDBKey>.GetTempTask(this, pending.Tid, pending.Title, pending.RetryInterval));
if (_distributeTraceEnable) _distributedTraceCall($"成功加载历史未完成 SAGA 事务 {sagaPendings.Count} 个");
#endregion
}
}
}
return this;
}
public TccMaster<TDBKey> StartTcc(string tid, string title, TccOptions options = null)
{
if (string.IsNullOrWhiteSpace(DistributeKey)) throw new Exception("未开启 TCC 事务,请检查 ctor 构造方法");
if (_scheduler.QuantityTempTask > 10_0000)
{
if (_distributeTraceEnable) _distributedTraceCall($"TCC({tid}, {title}) 系统繁忙创建失败, 当前未完成事务 {_scheduler.QuantityTempTask} 个");
throw new Exception($"TCC({tid}, {title}) 系统繁忙创建失败, 当前未完成事务 {_scheduler.QuantityTempTask} 个");
}
return new TccMaster<TDBKey>(this, tid, title, options);
}
public SagaMaster<TDBKey> StartSaga(string tid, string title, SagaOptions options = null)
{
if (string.IsNullOrWhiteSpace(DistributeKey)) throw new Exception("未开启 TCC 事务,请检查 ctor 构造方法");
if (_scheduler.QuantityTempTask > 10_0000)
{
if (_distributeTraceEnable) _distributedTraceCall($"SAGA({tid}, {title}) 系统繁忙创建失败, 当前未完成事务 {_scheduler.QuantityTempTask} 个");
throw new Exception($"SAGA({tid}, {title}) 系统繁忙创建失败, 当前未完成事务 {_scheduler.QuantityTempTask} 个");
}
return new SagaMaster<TDBKey>(this, tid, title, options);
}
public IAdo Ado => _ormCurrent.Ado;
public IAop Aop => _ormCurrent.Aop;
public ICodeFirst CodeFirst => _ormCurrent.CodeFirst;
public IDbFirst DbFirst => _ormCurrent.DbFirst;
public GlobalFilter GlobalFilter => _ormCurrent.GlobalFilter;
public void Dispose()
{
if (_distributeTraceEnable && _scheduler != null) _distributedTraceCall($"准备释放, 当前未完成事务 {_scheduler.QuantityTempTask} 个");
_scheduler?.Dispose();
_ib.Dispose();
if (_distributeTraceEnable) _distributedTraceCall($"成功释放");
}
public void Transaction(Action handler) => _ormCurrent.Transaction(handler);
public void Transaction(IsolationLevel isolationLevel, Action handler) => _ormCurrent.Transaction(isolationLevel, handler);
internal TDBKey GetEntitySteeringDBKey(string methodName, Type entityType, TDBKey defaultValue)
{
if (EntitySteering != null)
{
var args = new EntitySteeringEventArgs
{
MethodName = methodName,
EntityType = entityType,
AvailableDBKeys = _ib.GetKeys(a => a == null || a.Ado.MasterPool.IsAvailable),
_dbkey = defaultValue
};
EntitySteering(this, args);
if (args._dbkeyChanged) return args.DBKey;
}
return defaultValue;
}
IFreeSql GetCrudOrm(string methodName, Type entityType) => _ib.Get(GetEntitySteeringDBKey(methodName, entityType, _dbkey));
public ISelect<T1> Select<T1>() where T1 : class
{
return GetCrudOrm(nameof(Select), typeof(T1)).Select<T1>();
}
public ISelect<T1> Select<T1>(object dywhere) where T1 : class => Select<T1>().WhereDynamic(dywhere);
public IDelete<T1> Delete<T1>() where T1 : class
{
return GetCrudOrm(nameof(Delete), typeof(T1)).Delete<T1>();
}
public IDelete<T1> Delete<T1>(object dywhere) where T1 : class => Delete<T1>().WhereDynamic(dywhere);
public IUpdate<T1> Update<T1>() where T1 : class
{
return GetCrudOrm(nameof(Update), typeof(T1)).Update<T1>();
}
public IUpdate<T1> Update<T1>(object dywhere) where T1 : class => Update<T1>().WhereDynamic(dywhere);
public IInsert<T1> Insert<T1>() where T1 : class
{
return GetCrudOrm(nameof(Insert), typeof(T1)).Insert<T1>();
}
public IInsert<T1> Insert<T1>(T1 source) where T1 : class => Insert<T1>().AppendData(source);
public IInsert<T1> Insert<T1>(T1[] source) where T1 : class => Insert<T1>().AppendData(source);
public IInsert<T1> Insert<T1>(List<T1> source) where T1 : class => Insert<T1>().AppendData(source);
public IInsert<T1> Insert<T1>(IEnumerable<T1> source) where T1 : class => Insert<T1>().AppendData(source);
public IInsertOrUpdate<T1> InsertOrUpdate<T1>() where T1 : class
{
return GetCrudOrm(nameof(InsertOrUpdate), typeof(T1)).InsertOrUpdate<T1>();
}
}
}
|
2881099/FreeSql.Cloud | 20,184 | src/FreeSql.Cloud/FreesqlCloudInternalExtensions.cs | using System;
using System.Collections;
using System.Collections.Concurrent;
using System.Collections.Generic;
using System.Globalization;
using System.Linq;
using System.Linq.Expressions;
using System.Numerics;
using System.Reflection;
using System.Text;
static class FreesqlCloudInternalExtensions
{
#region ExpressionTree
static int SetSetPropertyOrFieldValueSupportExpressionTreeFlag = 1;
static ConcurrentDictionary<Type, ConcurrentDictionary<string, Action<object, string, object>>> _dicSetPropertyOrFieldValue = new ConcurrentDictionary<Type, ConcurrentDictionary<string, Action<object, string, object>>>();
public static void SetPropertyOrFieldValue(this Type entityType, object entity, string propertyName, object value)
{
if (entity == null) return;
if (entityType == null) entityType = entity.GetType();
if (SetSetPropertyOrFieldValueSupportExpressionTreeFlag == 0)
{
if (GetPropertiesDictIgnoreCase(entityType).TryGetValue(propertyName, out var prop))
{
prop.SetValue(entity, value, null);
return;
}
if (GetFieldsDictIgnoreCase(entityType).TryGetValue(propertyName, out var field))
{
field.SetValue(entity, value);
return;
}
throw new Exception($"The property({propertyName}) was not found in the type({entityType.DisplayCsharp()})");
}
Action<object, string, object> func = null;
try
{
func = _dicSetPropertyOrFieldValue
.GetOrAdd(entityType, et => new ConcurrentDictionary<string, Action<object, string, object>>())
.GetOrAdd(propertyName, pn =>
{
var t = entityType;
MemberInfo memberinfo = entityType.GetPropertyOrFieldIgnoreCase(pn);
var parm1 = Expression.Parameter(typeof(object));
var parm2 = Expression.Parameter(typeof(string));
var parm3 = Expression.Parameter(typeof(object));
var var1Parm = Expression.Variable(t);
var exps = new List<Expression>(new Expression[] {
Expression.Assign(var1Parm, Expression.TypeAs(parm1, t))
});
if (memberinfo != null)
{
exps.Add(
Expression.Assign(
Expression.MakeMemberAccess(var1Parm, memberinfo),
Expression.Convert(
parm3,
memberinfo.GetPropertyOrFieldType()
)
)
);
}
return Expression.Lambda<Action<object, string, object>>(Expression.Block(new[] { var1Parm }, exps), new[] { parm1, parm2, parm3 }).Compile();
});
}
catch
{
System.Threading.Interlocked.Exchange(ref SetSetPropertyOrFieldValueSupportExpressionTreeFlag, 0);
SetPropertyOrFieldValue(entityType, entity, propertyName, value);
return;
}
func(entity, propertyName, value);
}
#endregion
#region 常用缓存的反射方法
static ConcurrentDictionary<Type, Dictionary<string, PropertyInfo>> _dicGetPropertiesDictIgnoreCase = new ConcurrentDictionary<Type, Dictionary<string, PropertyInfo>>();
public static Dictionary<string, PropertyInfo> GetPropertiesDictIgnoreCase(this Type that) => that == null ? null : _dicGetPropertiesDictIgnoreCase.GetOrAdd(that, tp =>
{
var props = that.GetProperties().GroupBy(p => p.DeclaringType).Reverse().SelectMany(p => p); //将基类的属性位置放在前面 #164
var dict = new Dictionary<string, PropertyInfo>(StringComparer.CurrentCultureIgnoreCase);
foreach (var prop in props)
{
if (dict.TryGetValue(prop.Name, out var existsProp))
{
if (existsProp.DeclaringType != prop) dict[prop.Name] = prop;
continue;
}
dict.Add(prop.Name, prop);
}
return dict;
});
static ConcurrentDictionary<Type, Dictionary<string, FieldInfo>> _dicGetFieldsDictIgnoreCase = new ConcurrentDictionary<Type, Dictionary<string, FieldInfo>>();
public static Dictionary<string, FieldInfo> GetFieldsDictIgnoreCase(this Type that) => that == null ? null : _dicGetFieldsDictIgnoreCase.GetOrAdd(that, tp =>
{
var fields = that.GetFields().GroupBy(p => p.DeclaringType).Reverse().SelectMany(p => p); //将基类的属性位置放在前面 #164
var dict = new Dictionary<string, FieldInfo>(StringComparer.CurrentCultureIgnoreCase);
foreach (var field in fields)
{
if (dict.ContainsKey(field.Name)) dict[field.Name] = field;
else dict.Add(field.Name, field);
}
return dict;
});
public static MemberInfo GetPropertyOrFieldIgnoreCase(this Type that, string name)
{
if (GetPropertiesDictIgnoreCase(that).TryGetValue(name, out var prop)) return prop;
if (GetFieldsDictIgnoreCase(that).TryGetValue(name, out var field)) return field;
return null;
}
public static Type GetPropertyOrFieldType(this MemberInfo that)
{
if (that is PropertyInfo prop) return prop.PropertyType;
if (that is FieldInfo field) return field.FieldType;
return null;
}
public static string GetDescription(this Type that)
{
object[] attrs = null;
try
{
attrs = that.GetCustomAttributes(false).ToArray(); //.net core 反射存在版本冲突问题,导致该方法异常
}
catch { }
var dyattr = attrs?.Where(a => {
return ((a as Attribute)?.TypeId as Type)?.Name == "DescriptionAttribute";
}).FirstOrDefault();
if (dyattr != null)
{
var valueProp = dyattr.GetType().GetProperties().Where(a => a.PropertyType == typeof(string)).FirstOrDefault();
var comment = valueProp?.GetValue(dyattr, null)?.ToString();
if (string.IsNullOrEmpty(comment) == false)
return comment;
}
return null;
}
#endregion
#region 类型转换
internal static string ToInvariantCultureToString(this object obj) => obj is string objstr ? objstr : string.Format(CultureInfo.InvariantCulture, @"{0}", obj);
public static T MapToClass<T>(this object[] list, Encoding encoding)
{
if (list == null) return default(T);
if (list.Length % 2 != 0) throw new ArgumentException(nameof(list));
var ttype = typeof(T);
var ret = (T)ttype.CreateInstanceGetDefaultValue();
for (var a = 0; a < list.Length; a += 2)
{
var name = list[a].ToString().Replace("-", "_");
var prop = ttype.GetPropertyOrFieldIgnoreCase(name);
if (prop == null) throw new ArgumentException($"{typeof(T).DisplayCsharp()} undefined Property {list[a]}");
if (list[a + 1] == null) continue;
ttype.SetPropertyOrFieldValue(ret, prop.Name, prop.GetPropertyOrFieldType().FromObject(list[a + 1], encoding));
}
return ret;
}
public static Dictionary<string, T> MapToHash<T>(this object[] list, Encoding encoding)
{
if (list == null) return null;
if (list.Length % 2 != 0) throw new ArgumentException($"Array {nameof(list)} length is not even");
var dic = new Dictionary<string, T>();
for (var a = 0; a < list.Length; a += 2)
{
var key = list[a].ToInvariantCultureToString();
if (dic.ContainsKey(key)) continue;
var val = list[a + 1];
if (val == null) dic.Add(key, default(T));
else dic.Add(key, val is T conval ? conval : (T)typeof(T).FromObject(val, encoding));
}
return dic;
}
public static List<KeyValuePair<string, T>> MapToKvList<T>(this object[] list, Encoding encoding)
{
if (list == null) return null;
if (list.Length % 2 != 0) throw new ArgumentException($"Array {nameof(list)} length is not even");
var ret = new List<KeyValuePair<string, T>>();
for (var a = 0; a < list.Length; a += 2)
{
var key = list[a].ToInvariantCultureToString();
var val = list[a + 1];
if (val == null) ret.Add(new KeyValuePair<string, T>(key, default(T)));
else ret.Add(new KeyValuePair<string, T>(key, val is T conval ? conval : (T)typeof(T).FromObject(val, encoding)));
}
return ret;
}
public static List<T> MapToList<T>(this object[] list, Func<object, object, T> selector)
{
if (list == null) return null;
if (list.Length % 2 != 0) throw new ArgumentException($"Array {nameof(list)} length is not even");
var ret = new List<T>();
for (var a = 0; a < list.Length; a += 2)
{
var selval = selector(list[a], list[a + 1]);
if (selval != null) ret.Add(selval);
}
return ret;
}
internal static T ConvertTo<T>(this object value) => (T)typeof(T).FromObject(value);
static ConcurrentDictionary<Type, Func<string, object>> _dicFromObject = new ConcurrentDictionary<Type, Func<string, object>>();
public static object FromObject(this Type targetType, object value, Encoding encoding = null)
{
if (targetType == typeof(object)) return value;
if (encoding == null) encoding = Encoding.UTF8;
var valueIsNull = value == null;
var valueType = valueIsNull ? typeof(string) : value.GetType();
if (valueType == targetType) return value;
if (valueType == typeof(byte[])) //byte[] -> guid
{
if (targetType == typeof(Guid))
{
var bytes = value as byte[];
return Guid.TryParse(BitConverter.ToString(bytes, 0, Math.Min(bytes.Length, 36)).Replace("-", ""), out var tryguid) ? tryguid : Guid.Empty;
}
if (targetType == typeof(Guid?))
{
var bytes = value as byte[];
return Guid.TryParse(BitConverter.ToString(bytes, 0, Math.Min(bytes.Length, 36)).Replace("-", ""), out var tryguid) ? (Guid?)tryguid : null;
}
}
if (targetType == typeof(string))
{
if (valueIsNull) return null;
if (valueType == typeof(byte[])) return encoding.GetString(value as byte[]);
return value.ToInvariantCultureToString();
}
else if (targetType == typeof(byte[])) //guid -> byte[]
{
if (valueIsNull) return null;
if (valueType == typeof(Guid) || valueType == typeof(Guid?))
{
var bytes = new byte[16];
var guidN = ((Guid)value).ToString("N");
for (var a = 0; a < guidN.Length; a += 2)
bytes[a / 2] = byte.Parse($"{guidN[a]}{guidN[a + 1]}", NumberStyles.HexNumber);
return bytes;
}
return encoding.GetBytes(value.ToInvariantCultureToString());
}
else if (targetType.IsArray)
{
if (value is Array valueArr)
{
var targetElementType = targetType.GetElementType();
var sourceArrLen = valueArr.Length;
var target = Array.CreateInstance(targetElementType, sourceArrLen);
for (var a = 0; a < sourceArrLen; a++) target.SetValue(targetElementType.FromObject(valueArr.GetValue(a), encoding), a);
return target;
}
//if (value is IList valueList)
//{
// var targetElementType = targetType.GetElementType();
// var sourceArrLen = valueList.Count;
// var target = Array.CreateInstance(targetElementType, sourceArrLen);
// for (var a = 0; a < sourceArrLen; a++) target.SetValue(targetElementType.FromObject(valueList[a], encoding), a);
// return target;
//}
}
var func = _dicFromObject.GetOrAdd(targetType, tt =>
{
if (tt == typeof(object)) return vs => vs;
if (tt == typeof(string)) return vs => vs;
if (tt == typeof(char[])) return vs => vs == null ? null : vs.ToCharArray();
if (tt == typeof(char)) return vs => vs == null ? default(char) : vs.ToCharArray(0, 1).FirstOrDefault();
if (tt == typeof(bool)) return vs =>
{
if (vs == null) return false;
switch (vs.ToLower())
{
case "true":
case "1":
return true;
}
return false;
};
if (tt == typeof(bool?)) return vs =>
{
if (vs == null) return false;
switch (vs.ToLower())
{
case "true":
case "1":
return true;
case "false":
case "0":
return false;
}
return null;
};
if (tt == typeof(byte)) return vs => vs == null ? 0 : (byte.TryParse(vs, NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat, out var tryval) ? tryval : 0);
if (tt == typeof(byte?)) return vs => vs == null ? null : (byte.TryParse(vs, NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat, out var tryval) ? (byte?)tryval : null);
if (tt == typeof(decimal)) return vs => vs == null ? 0 : (decimal.TryParse(vs, NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat, out var tryval) ? tryval : 0);
if (tt == typeof(decimal?)) return vs => vs == null ? null : (decimal.TryParse(vs, NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat, out var tryval) ? (decimal?)tryval : null);
if (tt == typeof(double)) return vs => vs == null ? 0 : (double.TryParse(vs, NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat, out var tryval) ? tryval : 0);
if (tt == typeof(double?)) return vs => vs == null ? null : (double.TryParse(vs, NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat, out var tryval) ? (double?)tryval : null);
if (tt == typeof(float)) return vs => vs == null ? 0 : (float.TryParse(vs, NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat, out var tryval) ? tryval : 0);
if (tt == typeof(float?)) return vs => vs == null ? null : (float.TryParse(vs, NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat, out var tryval) ? (float?)tryval : null);
if (tt == typeof(int)) return vs => vs == null ? 0 : (int.TryParse(vs, NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat, out var tryval) ? tryval : 0);
if (tt == typeof(int?)) return vs => vs == null ? null : (int.TryParse(vs, NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat, out var tryval) ? (int?)tryval : null);
if (tt == typeof(long)) return vs => vs == null ? 0 : (long.TryParse(vs, NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat, out var tryval) ? tryval : 0);
if (tt == typeof(long?)) return vs => vs == null ? null : (long.TryParse(vs, NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat, out var tryval) ? (long?)tryval : null);
if (tt == typeof(sbyte)) return vs => vs == null ? 0 : (sbyte.TryParse(vs, NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat, out var tryval) ? tryval : 0);
if (tt == typeof(sbyte?)) return vs => vs == null ? null : (sbyte.TryParse(vs, NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat, out var tryval) ? (sbyte?)tryval : null);
if (tt == typeof(short)) return vs => vs == null ? 0 : (short.TryParse(vs, NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat, out var tryval) ? tryval : 0);
if (tt == typeof(short?)) return vs => vs == null ? null : (short.TryParse(vs, NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat, out var tryval) ? (short?)tryval : null);
if (tt == typeof(uint)) return vs => vs == null ? 0 : (uint.TryParse(vs, NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat, out var tryval) ? tryval : 0);
if (tt == typeof(uint?)) return vs => vs == null ? null : (uint.TryParse(vs, NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat, out var tryval) ? (uint?)tryval : null);
if (tt == typeof(ulong)) return vs => vs == null ? 0 : (ulong.TryParse(vs, NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat, out var tryval) ? tryval : 0);
if (tt == typeof(ulong?)) return vs => vs == null ? null : (ulong.TryParse(vs, NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat, out var tryval) ? (ulong?)tryval : null);
if (tt == typeof(ushort)) return vs => vs == null ? 0 : (ushort.TryParse(vs, NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat, out var tryval) ? tryval : 0);
if (tt == typeof(ushort?)) return vs => vs == null ? null : (ushort.TryParse(vs, NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat, out var tryval) ? (ushort?)tryval : null);
if (tt == typeof(DateTime)) return vs => vs == null ? DateTime.MinValue : (DateTime.TryParse(vs, out var tryval) ? tryval : DateTime.MinValue);
if (tt == typeof(DateTime?)) return vs => vs == null ? null : (DateTime.TryParse(vs, out var tryval) ? (DateTime?)tryval : null);
if (tt == typeof(DateTimeOffset)) return vs => vs == null ? DateTimeOffset.MinValue : (DateTimeOffset.TryParse(vs, out var tryval) ? tryval : DateTimeOffset.MinValue);
if (tt == typeof(DateTimeOffset?)) return vs => vs == null ? null : (DateTimeOffset.TryParse(vs, out var tryval) ? (DateTimeOffset?)tryval : null);
if (tt == typeof(TimeSpan)) return vs => vs == null ? TimeSpan.Zero : (TimeSpan.TryParse(vs, out var tryval) ? tryval : TimeSpan.Zero);
if (tt == typeof(TimeSpan?)) return vs => vs == null ? null : (TimeSpan.TryParse(vs, out var tryval) ? (TimeSpan?)tryval : null);
if (tt == typeof(Guid)) return vs => vs == null ? Guid.Empty : (Guid.TryParse(vs, out var tryval) ? tryval : Guid.Empty);
if (tt == typeof(Guid?)) return vs => vs == null ? null : (Guid.TryParse(vs, out var tryval) ? (Guid?)tryval : null);
if (tt == typeof(BigInteger)) return vs => vs == null ? 0 : (BigInteger.TryParse(vs, NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat, out var tryval) ? tryval : 0);
if (tt == typeof(BigInteger?)) return vs => vs == null ? null : (BigInteger.TryParse(vs, NumberStyles.Any, CultureInfo.InvariantCulture.NumberFormat, out var tryval) ? (BigInteger?)tryval : null);
if (tt.NullableTypeOrThis().IsEnum)
{
var tttype = tt.NullableTypeOrThis();
var ttdefval = tt.CreateInstanceGetDefaultValue();
return vs =>
{
if (string.IsNullOrWhiteSpace(vs)) return ttdefval;
return Enum.Parse(tttype, vs, true);
};
}
var localTargetType = targetType;
var localValueType = valueType;
return vs =>
{
if (vs == null) return null;
throw new NotSupportedException($"convert failed {localValueType.DisplayCsharp()} -> {localTargetType.DisplayCsharp()}");
};
});
if (valueIsNull) return func(null);
if (valueType == typeof(byte[])) return func(encoding.GetString(value as byte[]));
var valueType2 = valueType.NullableTypeOrThis();
if (valueType2.IsEnum && targetType.IsIntegerType()) return func(Convert.ChangeType(value, valueType2.GetEnumUnderlyingType()).ToInvariantCultureToString());
return func(value.ToInvariantCultureToString());
}
#endregion
}
|
2881099/FreeSql.Cloud | 3,051 | src/FreeSql.Cloud/FreeSqlCloundSnapshot.cs | using FreeSql.Internal;
using FreeSql.Internal.CommonProvider;
using FreeSql.Internal.Model;
using FreeSql.Internal.ObjectPool;
using System;
using System.Collections.Generic;
using System.Data;
using System.Data.Common;
namespace FreeSql
{
class FreeSqlCloundSnapshot<TDBKey> : IFreeSql
{
readonly FreeSqlCloud<TDBKey> _fsqlc;
readonly TDBKey _current;
readonly Action _dispose;
public FreeSqlCloundSnapshot(FreeSqlCloud<TDBKey> fsqlc, TDBKey current, Action dispose)
{
_fsqlc = fsqlc;
_current = current;
_dispose = dispose;
}
public IAdo Ado => _fsqlc.GetBySnapshot(_current).Ado;
public IAop Aop => _fsqlc.GetBySnapshot(_current).Aop;
public ICodeFirst CodeFirst => _fsqlc.GetBySnapshot(_current).CodeFirst;
public IDbFirst DbFirst => _fsqlc.GetBySnapshot(_current).DbFirst;
public GlobalFilter GlobalFilter => _fsqlc.GetBySnapshot(_current).GlobalFilter;
public void Dispose()
{
//示例
//using (_fsqlc.Change("db2"))
//{
//}
_dispose?.Invoke();
}
public void Transaction(Action handler) => _fsqlc.GetBySnapshot(_current).Transaction(handler);
public void Transaction(IsolationLevel isolationLevel, Action handler) => _fsqlc.GetBySnapshot(_current).Transaction(isolationLevel, handler);
public ISelect<T1> Select<T1>() where T1 : class => _fsqlc.GetBySnapshot(_fsqlc.GetEntitySteeringDBKey(nameof(Select), typeof(T1), _current)).Select<T1>();
public ISelect<T1> Select<T1>(object dywhere) where T1 : class => Select<T1>().WhereDynamic(dywhere);
public IDelete<T1> Delete<T1>() where T1 : class => _fsqlc.GetBySnapshot(_fsqlc.GetEntitySteeringDBKey(nameof(Delete), typeof(T1), _current)).Delete<T1>();
public IDelete<T1> Delete<T1>(object dywhere) where T1 : class => Delete<T1>().WhereDynamic(dywhere);
public IUpdate<T1> Update<T1>() where T1 : class => _fsqlc.GetBySnapshot(_fsqlc.GetEntitySteeringDBKey(nameof(Update), typeof(T1), _current)).Update<T1>();
public IUpdate<T1> Update<T1>(object dywhere) where T1 : class => Update<T1>().WhereDynamic(dywhere);
public IInsert<T1> Insert<T1>() where T1 : class => _fsqlc.GetBySnapshot(_fsqlc.GetEntitySteeringDBKey(nameof(Insert), typeof(T1), _current)).Insert<T1>();
public IInsert<T1> Insert<T1>(T1 source) where T1 : class => Insert<T1>().AppendData(source);
public IInsert<T1> Insert<T1>(T1[] source) where T1 : class => Insert<T1>().AppendData(source);
public IInsert<T1> Insert<T1>(List<T1> source) where T1 : class => Insert<T1>().AppendData(source);
public IInsert<T1> Insert<T1>(IEnumerable<T1> source) where T1 : class => Insert<T1>().AppendData(source);
public IInsertOrUpdate<T1> InsertOrUpdate<T1>() where T1 : class => _fsqlc.GetBySnapshot(_fsqlc.GetEntitySteeringDBKey(nameof(InsertOrUpdate), typeof(T1), _current)).InsertOrUpdate<T1>();
}
}
|
2881099/FreeSql.Cloud | 4,468 | src/FreeSql.Cloud/FreeSqlTransaction.cs | using FreeSql.Internal;
using FreeSql.Internal.CommonProvider;
using FreeSql.Internal.Model;
using FreeSql.Internal.ObjectPool;
using System;
using System.Collections.Generic;
using System.Data;
using System.Data.Common;
namespace FreeSql
{
class FreeSqlTransaction : IFreeSql
{
readonly IFreeSql _orm;
readonly Func<DbTransaction> _resolveTran;
FreeSqlTransaction(IFreeSql fsql, Func<DbTransaction> resolveTran)
{
_orm = fsql;
_resolveTran = resolveTran;
Ado = new ScopeTransactionAdo(_orm.Ado as AdoProvider, resolveTran);
}
public static FreeSqlTransaction Create(IFreeSql fsql, Func<DbTransaction> resolveTran)
{
if (fsql == null) return null;
var scopedfsql = fsql as FreeSqlTransaction;
if (scopedfsql == null) return new FreeSqlTransaction(fsql, resolveTran);
return Create(scopedfsql._orm, resolveTran);
}
class ScopeTransactionAdo : AdoProvider
{
AdoProvider _ado;
public ScopeTransactionAdo(AdoProvider ado, Func<DbTransaction> resolveTran) : base(ado.DataType, null, null)
{
_ado = ado;
base.ResolveTransaction = resolveTran;
base.ConnectionString = ado.ConnectionString;
base.SlaveConnectionStrings = ado.SlaveConnectionStrings;
base.Identifier = ado.Identifier;
base.MasterPool = ado.MasterPool;
base._util = ado._util;
}
public override object AddslashesProcessParam(object param, Type mapType, ColumnInfo mapColumn) => _ado.AddslashesProcessParam(param, mapType, mapColumn);
public override DbCommand CreateCommand() => _ado.CreateCommand();
public override DbParameter[] GetDbParamtersByObject(string sql, object obj) => _ado.GetDbParamtersByObject(sql, obj);
public override void ReturnConnection(IObjectPool<DbConnection> pool, Object<DbConnection> conn, Exception ex) => _ado.ReturnConnection(pool, conn, ex);
}
public IAdo Ado { get; private set; }
public IAop Aop => throw new NotSupportedException("IFreeSql 对象被重写,支持事务且只能使用 CRUD 方法");
public ICodeFirst CodeFirst => throw new NotSupportedException("IFreeSql 对象被重写,支持事务且只能使用 CRUD 方法");
public IDbFirst DbFirst => throw new NotSupportedException("IFreeSql 对象被重写,支持事务且只能使用 CRUD 方法");
public GlobalFilter GlobalFilter => throw new NotSupportedException("IFreeSql 对象被重写,支持事务且只能使用 CRUD 方法");
public void Dispose() { }
public void Transaction(Action handler) => throw new NotSupportedException("IFreeSql 对象被重写,支持事务且只能使用 CRUD 方法");
public void Transaction(IsolationLevel isolationLevel, Action handler) => throw new NotSupportedException("IFreeSql 对象被重写,支持事务且只能使用 CRUD 方法");
public ISelect<T1> Select<T1>() where T1 : class
{
return _orm.Select<T1>().WithTransaction(_resolveTran?.Invoke());
}
public ISelect<T1> Select<T1>(object dywhere) where T1 : class => Select<T1>().WhereDynamic(dywhere);
public IDelete<T1> Delete<T1>() where T1 : class
{
return _orm.Delete<T1>().WithTransaction(_resolveTran?.Invoke());
}
public IDelete<T1> Delete<T1>(object dywhere) where T1 : class => Delete<T1>().WhereDynamic(dywhere);
public IUpdate<T1> Update<T1>() where T1 : class
{
return _orm.Update<T1>().WithTransaction(_resolveTran?.Invoke());
}
public IUpdate<T1> Update<T1>(object dywhere) where T1 : class => Update<T1>().WhereDynamic(dywhere);
public IInsert<T1> Insert<T1>() where T1 : class
{
return _orm.Insert<T1>().WithTransaction(_resolveTran?.Invoke());
}
public IInsert<T1> Insert<T1>(T1 source) where T1 : class => Insert<T1>().AppendData(source);
public IInsert<T1> Insert<T1>(T1[] source) where T1 : class => Insert<T1>().AppendData(source);
public IInsert<T1> Insert<T1>(List<T1> source) where T1 : class => Insert<T1>().AppendData(source);
public IInsert<T1> Insert<T1>(IEnumerable<T1> source) where T1 : class => Insert<T1>().AppendData(source);
public IInsertOrUpdate<T1> InsertOrUpdate<T1>() where T1 : class
{
return _orm.InsertOrUpdate<T1>().WithTransaction(_resolveTran?.Invoke());
}
}
}
|
2881099/FreeSql.Cloud | 2,305 | src/FreeSql.Cloud/Tcc/TccEntity.cs | using FreeSql.DataAnnotations;
using System;
using System.Data;
namespace FreeSql.Cloud.Tcc
{
[Index("{tablename}_idx1", "status")]
public class TccMasterInfo
{
[Column(Name = "tid", IsPrimary = true, StringLength = 128)]
public string Tid { get; set; }
[Column(Name = "title")]
public string Title { get; set; }
[Column(Name = "total")]
public int Total { get; set; }
[Column(Name = "create_time", ServerTime = DateTimeKind.Utc, CanUpdate = false)]
public DateTime CreateTime { get; set; } = DateTime.UtcNow;
[Column(Name = "finish_time")]
public DateTime FinishTime { get; set; }
[Column(Name = "status", MapType = typeof(string), StringLength = 10)]
public TccMasterStatus Status { get; set; }
[Column(Name = "max_retry_count")]
public int MaxRetryCount { get; set; } = 30;
[Column(Name = "retry_interval")]
public int RetryInterval { get; set; } = 60;
[Column(Name = "retry_count")]
public int RetryCount { get; set; }
[Column(Name = "retry_time")]
public DateTime RetryTime { get; set; }
}
public enum TccMasterStatus { Pending, Confirmed, Canceled, ManualOperation }
public class TccUnitInfo
{
[Column(Name = "tid", IsPrimary = true, StringLength = 128)]
public string Tid { get; set; }
[Column(Name = "index", IsPrimary = true)]
public int Index { get; set; }
[Column(Name = "description")]
public string Description { get; set; }
[Column(Name = "stage", MapType = typeof(string), StringLength = 8)]
public TccUnitStage Stage { get; set; }
[Column(Name = "type_name")]
public string TypeName { get; set; }
[Column(Name = "state", StringLength = - 1)]
public string State { get; set; }
[Column(Name = "state_type_name")]
public string StateTypeName { get; set; }
[Column(Name = "create_time", ServerTime = DateTimeKind.Utc, CanUpdate = false)]
public DateTime CreateTime { get; set; } = DateTime.UtcNow;
[Column(Name = "db_key", StringLength = 128)]
public string DbKey { get; set; }
}
public enum TccUnitStage { Try, Confirm, Cancel }
}
|
2881099/FreeSql.Cloud | 18,313 | src/FreeSql.Cloud/Tcc/TccMaster.cs | using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Linq;
using System.Reflection;
using System.Threading.Tasks;
namespace FreeSql.Cloud.Tcc
{
public partial class TccMaster<TDBKey>
{
FreeSqlCloud<TDBKey> _cloud;
string _tid;
string _title;
TccOptions _options;
List<TccUnitInfo> _thenUnitInfos = new List<TccUnitInfo>();
List<ITccUnit> _thenUnits = new List<ITccUnit>();
internal TccMaster(FreeSqlCloud<TDBKey> cloud, string tid, string title, TccOptions options)
{
if (string.IsNullOrWhiteSpace(tid)) throw new ArgumentNullException(nameof(tid));
_cloud = cloud;
_tid = tid;
_title = title;
if (options == null) options = new TccOptions();
_options = new TccOptions
{
MaxRetryCount = options.MaxRetryCount,
RetryInterval = options.RetryInterval
};
}
/// <summary>
/// 编排分布式事务单元<para></para>
/// * Try/Confirm/Cancel 使用 Orm 属性统一了事务;<para></para>
/// * Confirm/Cancel 内部已经过滤了重复执行;
/// </summary>
/// <typeparam name="TUnit"></typeparam>
/// <param name="dbkey">选择数据库,Try/Confirm/Cancel 使用 Orm 属性统一了事务,并且内部处理了幂等操作</param>
/// <param name="state">无状态数据</param>
/// <returns></returns>
public TccMaster<TDBKey> Then<TUnit>(TDBKey dbkey, object state = null) where TUnit : ITccUnit => Then(typeof(TUnit), dbkey, true, state);
TccMaster<TDBKey> Then(Type tccUnitType, TDBKey dbkey, bool isdbkey, object state)
{
if (tccUnitType == null) throw new ArgumentNullException(nameof(tccUnitType));
var unitTypeBase = typeof(TccUnit<>);
if (state == null && tccUnitType.BaseType.GetGenericTypeDefinition() == typeof(TccUnit<>)) unitTypeBase = unitTypeBase.MakeGenericType(tccUnitType.BaseType.GetGenericArguments()[0]);
else unitTypeBase = unitTypeBase.MakeGenericType(state.GetType());
if (unitTypeBase.IsAssignableFrom(tccUnitType) == false) throw new ArgumentException($"{tccUnitType.DisplayCsharp(false)} 必须继承 {unitTypeBase.DisplayCsharp(false)}");
var unitCtors = tccUnitType.GetConstructors();
if (unitCtors.Length != 1 && unitCtors[0].GetParameters().Length > 0) throw new ArgumentException($"{tccUnitType.FullName} 不能使用构造函数");
var unitTypeConved = Type.GetType(tccUnitType.AssemblyQualifiedName);
if (unitTypeConved == null) throw new ArgumentException($"{tccUnitType.FullName} 无效");
var unit = unitTypeConved.CreateInstanceGetDefaultValue() as ITccUnit;
(unit as ITccUnitSetter)?.SetState(state);
_thenUnits.Add(unit);
_thenUnitInfos.Add(new TccUnitInfo
{
Description = unitTypeConved.GetDescription(),
Index = _thenUnitInfos.Count + 1,
Stage = TccUnitStage.Try,
State = state == null ? null : Newtonsoft.Json.JsonConvert.SerializeObject(state),
StateTypeName = state?.GetType().AssemblyQualifiedName,
Tid = _tid,
TypeName = tccUnitType.AssemblyQualifiedName,
});
if (isdbkey) _thenUnitInfos.Last().DbKey = dbkey.ToString();
return this;
}
/// <summary>
/// 执行 TCC 事务<para></para>
/// 返回值 true: 事务完成并且 Confirm 成功<para></para>
/// 返回值 false: 事务完成但是 Cancel 已取消<para></para>
/// 返回值 null: 等待最终一致性
/// </summary>
/// <returns></returns>
#if net40
public bool? Execute()
#else
async public Task<bool?> ExecuteAsync()
#endif
{
if (_cloud._ib.Quantity == 0) throw new ArgumentException($"必须注册可用的数据库");
var units = _thenUnits.ToArray();
var masterInfo = new TccMasterInfo
{
Tid = _tid,
Title = _title,
Total = _thenUnitInfos.Count,
Status = TccMasterStatus.Pending,
RetryCount = 0,
MaxRetryCount = _options.MaxRetryCount,
RetryInterval = (int)_options.RetryInterval.TotalSeconds,
};
#if net40
_cloud._ormMaster.Insert(masterInfo).ExecuteAffrows();
#else
await _cloud._ormMaster.Insert(masterInfo).ExecuteAffrowsAsync();
#endif
if (_cloud._distributeTraceEnable) _cloud._distributedTraceCall($"TCC ({masterInfo.Tid}, {masterInfo.Title}) Created successful, retry count: {_options.MaxRetryCount}, interval: {_options.RetryInterval.TotalSeconds}S");
var unitInfos = new List<TccUnitInfo>();
Exception unitException = null;
for (var idx = 0; idx < _thenUnitInfos.Count; idx++)
{
try
{
var ormMaster = _cloud._ormMaster;
#if net40
using (var conn = ormMaster.Ado.MasterPool.Get())
#else
using (var conn = await ormMaster.Ado.MasterPool.GetAsync())
#endif
{
var tran = conn.Value.BeginTransaction();
var tranIsCommited = false;
try
{
(units[idx] as ITccUnitSetter)?.SetUnit(_thenUnitInfos[idx]);
var tranOrm = FreeSqlTransaction.Create(ormMaster, () => tran);
#if net40
tranOrm.Insert(_thenUnitInfos[idx]).ExecuteAffrows();
InvokeUnit(_cloud, _thenUnitInfos[idx], units[idx], InvokeUnitMethod.Try, tranOrm);
#else
await tranOrm.Insert(_thenUnitInfos[idx]).ExecuteAffrowsAsync();
await InvokeUnitAsync(_cloud, _thenUnitInfos[idx], units[idx], InvokeUnitMethod.Try, tranOrm);
#endif
tran.Commit();
tranIsCommited = true;
unitInfos.Add(_thenUnitInfos[idx]);
}
finally
{
if (tranIsCommited == false)
tran.Rollback();
}
}
if (_cloud._distributeTraceEnable) _cloud._distributedTraceCall($"TCC ({masterInfo.Tid}, {masterInfo.Title}) Unit{_thenUnitInfos[idx].Index}{(string.IsNullOrWhiteSpace(_thenUnitInfos[idx].Description) ? "" : $"({_thenUnitInfos[idx].Description})")} TRY successful\r\n State: {_thenUnitInfos[idx].State}\r\n Type: {_thenUnitInfos[idx].TypeName}");
}
catch (Exception ex)
{
unitException = ex.InnerException?.InnerException ?? ex.InnerException ?? ex;
if (_cloud._distributeTraceEnable) _cloud._distributedTraceCall($"TCC ({masterInfo.Tid}, {masterInfo.Title}) Unit{_thenUnitInfos[idx].Index}{(string.IsNullOrWhiteSpace(_thenUnitInfos[idx].Description) ? "" : $"({_thenUnitInfos[idx].Description})")} TRY failed, ready to CANCEL, -ERR {unitException.Message}\r\n State: {_thenUnitInfos[idx].State}\r\n Type: {_thenUnitInfos[idx].TypeName}");
break;
}
}
#if net40
return ConfimCancel(_cloud, masterInfo, unitInfos, units, true);
#else
return await ConfimCancelAsync(_cloud, masterInfo, unitInfos, units, true);
#endif
}
static void SetTccState(ITccUnit unit, TccUnitInfo unitInfo)
{
if (string.IsNullOrWhiteSpace(unitInfo.StateTypeName)) return;
if (unitInfo.State == null) return;
var stateType = Type.GetType(unitInfo.StateTypeName);
if (stateType == null) return;
(unit as ITccUnitSetter)?.SetState(Newtonsoft.Json.JsonConvert.DeserializeObject(unitInfo.State, stateType));
}
#if net40
static void ConfimCancel(FreeSqlCloud<TDBKey> cloud, string tid, bool retry)
{
var masterInfo = cloud._ormMaster.Select<TccMasterInfo>().Where(a => a.Tid == tid && a.Status == TccMasterStatus.Pending && a.RetryCount <= a.MaxRetryCount).First();
if (masterInfo == null) return;
var unitInfos = cloud._ormMaster.Select<TccUnitInfo>().Where(a => a.Tid == tid).OrderBy(a => a.Index).ToList();
var units = LocalGetUnits();
ConfimCancel(cloud, masterInfo, unitInfos, units, retry);
#else
async static Task ConfimCancelAsync(FreeSqlCloud<TDBKey> cloud, string tid, bool retry)
{
var masterInfo = await cloud._ormMaster.Select<TccMasterInfo>().Where(a => a.Tid == tid && a.Status == TccMasterStatus.Pending && a.RetryCount <= a.MaxRetryCount).FirstAsync();
if (masterInfo == null) return;
var unitInfos = await cloud._ormMaster.Select<TccUnitInfo>().Where(a => a.Tid == tid).OrderBy(a => a.Index).ToListAsync();
var units = LocalGetUnits();
await ConfimCancelAsync(cloud, masterInfo, unitInfos, units, retry);
#endif
ITccUnit[] LocalGetUnits() => unitInfos.Select(unitInfo =>
{
try
{
var unitTypeDefault = Type.GetType(unitInfo.TypeName).CreateInstanceGetDefaultValue() as ITccUnit;
if (unitTypeDefault == null)
{
if (cloud._distributeTraceEnable) cloud._distributedTraceCall($"TCC ({masterInfo.Tid}, {masterInfo.Title}) Data error, cannot create as ITccUnit, {unitInfo.TypeName}");
throw new ArgumentException($"TCC ({masterInfo.Tid}, {masterInfo.Title}) Data error, cannot create as ITccUnit, {unitInfo.TypeName}");
}
return unitTypeDefault;
}
catch
{
if (cloud._distributeTraceEnable) cloud._distributedTraceCall($"TCC ({masterInfo.Tid}, {masterInfo.Title}) Data error, cannot create as ITccUnit, {unitInfo.TypeName}");
throw new ArgumentException($"TCC ({masterInfo.Tid}, {masterInfo.Title}) Data error, cannot create as ITccUnit, {unitInfo.TypeName}");
}
})
.ToArray();
}
#if net40
static bool? ConfimCancel(FreeSqlCloud<TDBKey> cloud, TccMasterInfo masterInfo, List<TccUnitInfo> unitInfos, ITccUnit[] units, bool retry)
#else
async static Task<bool?> ConfimCancelAsync(FreeSqlCloud<TDBKey> cloud, TccMasterInfo masterInfo, List<TccUnitInfo> unitInfos, ITccUnit[] units, bool retry)
#endif
{
var isConfirm = unitInfos.Count == masterInfo.Total;
var successCount = 0;
for (var idx = masterInfo.Total - 1; idx >= 0; idx--)
{
var unitInfo = unitInfos.Where(tt => tt.Index == idx + 1 && tt.Stage == TccUnitStage.Try).FirstOrDefault();
try
{
if (unitInfo != null)
{
if ((units[idx] as ITccUnitSetter)?.StateIsValued != true)
SetTccState(units[idx], unitInfo);
var ormMaster = cloud._ormMaster;
#if net40
using (var conn = ormMaster.Ado.MasterPool.Get())
#else
using (var conn = await ormMaster.Ado.MasterPool.GetAsync())
#endif
{
var tran = conn.Value.BeginTransaction();
var tranIsCommited = false;
try
{
var tranOrm = FreeSqlTransaction.Create(ormMaster, () => tran);
(units[idx] as ITccUnitSetter)?.SetUnit(unitInfo);
var update = tranOrm.Update<TccUnitInfo>()
.Where(a => a.Tid == masterInfo.Tid && a.Index == idx + 1 && a.Stage == TccUnitStage.Try)
.Set(a => a.Stage, isConfirm ? TccUnitStage.Confirm : TccUnitStage.Cancel);
#if net40
if (update.ExecuteAffrows() == 1)
{
if (isConfirm) InvokeUnit(cloud, unitInfo, units[idx], InvokeUnitMethod.Confirm, tranOrm);
else InvokeUnit(cloud, unitInfo, units[idx], InvokeUnitMethod.Cancel, tranOrm);
}
#else
if (await update.ExecuteAffrowsAsync() == 1)
{
if (isConfirm) await InvokeUnitAsync(cloud, unitInfo, units[idx], InvokeUnitMethod.Confirm, tranOrm);
else await InvokeUnitAsync(cloud, unitInfo, units[idx], InvokeUnitMethod.Cancel, tranOrm);
}
#endif
tran.Commit();
tranIsCommited = true;
}
finally
{
if (tranIsCommited == false)
tran.Rollback();
}
}
if (cloud._distributeTraceEnable) cloud._distributedTraceCall($"TCC ({masterInfo.Tid}, {masterInfo.Title}) Unit{unitInfo.Index}{(string.IsNullOrWhiteSpace(unitInfo.Description) ? "" : $"({unitInfo.Description})")} {(isConfirm ? "CONFIRM" : "CANCEL")} successful{(masterInfo.RetryCount > 0 ? $" after {masterInfo.RetryCount} retries" : "")}\r\n State: {unitInfo.State}\r\n Type: {unitInfo.TypeName}");
}
successCount++;
}
catch(Exception ex)
{
if (unitInfo != null)
if (cloud._distributeTraceEnable) cloud._distributedTraceCall($"TCC ({masterInfo.Tid}, {masterInfo.Title}) Unit{unitInfo.Index}{(string.IsNullOrWhiteSpace(unitInfo.Description) ? "" : $"({unitInfo.Description})")} {(isConfirm ? "CONFIRM" : "CANCEL")} failed{(masterInfo.RetryCount > 0 ? $" after {masterInfo.RetryCount} retries" : "")}, -ERR {ex.Message}\r\n State: {unitInfo.State}\r\n Type: {unitInfo.TypeName}");
}
}
if (successCount == masterInfo.Total)
{
var update = cloud._ormMaster.Update<TccMasterInfo>()
.Where(a => a.Tid == masterInfo.Tid && a.Status == TccMasterStatus.Pending)
.Set(a => a.RetryCount + 1)
.Set(a => a.RetryTime == DateTime.UtcNow)
.Set(a => a.Status, isConfirm ? TccMasterStatus.Confirmed : TccMasterStatus.Canceled)
.Set(a => a.FinishTime == DateTime.UtcNow);
#if net40
update.ExecuteAffrows();
#else
await update.ExecuteAffrowsAsync();
#endif
if (cloud._distributeTraceEnable) cloud._distributedTraceCall($"TCC ({masterInfo.Tid}, {masterInfo.Title}) Completed, all units {(isConfirm ? "CONFIRM" : "CANCEL")} successfully{(masterInfo.RetryCount > 0 ? $" after {masterInfo.RetryCount} retries" : "")}");
return isConfirm;
}
else
{
var update = cloud._ormMaster.Update<TccMasterInfo>()
.Where(a => a.Tid == masterInfo.Tid && a.Status == TccMasterStatus.Pending && a.RetryCount < a.MaxRetryCount)
.Set(a => a.RetryCount + 1)
.Set(a => a.RetryTime == DateTime.UtcNow);
#if net40
var affrows = update.ExecuteAffrows();
#else
var affrows = await update.ExecuteAffrowsAsync();
#endif
if (affrows == 1)
{
if (retry)
{
//if (cloud.TccTraceEnable) cloud.OnTccTrace($"TCC ({tcc.Tid}, {tcc.Title}) Not completed, waiting to try again, current tasks {cloud._scheduler.QuantityTempTask}");
cloud._scheduler.AddTempTask(TimeSpan.FromSeconds(masterInfo.RetryInterval), GetTempTask(cloud, masterInfo.Tid, masterInfo.Title, masterInfo.RetryInterval));
}
}
else
{
update = cloud._ormMaster.Update<TccMasterInfo>()
.Where(a => a.Tid == masterInfo.Tid && a.Status == TccMasterStatus.Pending)
.Set(a => a.Status, TccMasterStatus.ManualOperation);
#if net40
update.ExecuteAffrows();
#else
await update.ExecuteAffrowsAsync();
#endif
if (cloud._distributeTraceEnable) cloud._distributedTraceCall($"TCC ({masterInfo.Tid}, {masterInfo.Title}) Not completed, waiting for manual operation 【人工干预】");
}
return null;
}
}
internal static Action GetTempTask(FreeSqlCloud<TDBKey> cloud, string tid, string title, int retryInterval)
{
return () =>
{
try
{
#if net40
ConfimCancel(cloud, tid, true);
#else
ConfimCancelAsync(cloud, tid, true).Wait();
#endif
}
catch
{
try
{
cloud._ormMaster.Update<TccMasterInfo>()
.Where(a => a.Tid == tid && a.Status == TccMasterStatus.Pending)
.Set(a => a.RetryCount + 1)
.Set(a => a.RetryTime == DateTime.UtcNow)
.ExecuteAffrows();
}
catch { }
//if (cloud.TccTraceEnable) cloud.OnTccTrace($"TCC ({tid}, {title}) Not completed, waiting to try again, current tasks {cloud._scheduler.QuantityTempTask}");
cloud._scheduler.AddTempTask(TimeSpan.FromSeconds(retryInterval), GetTempTask(cloud, tid, title, retryInterval));
}
};
}
}
}
|
2881099/FreeSql.Cloud | 2,372 | src/FreeSql.Cloud/Tcc/TccUnit.cs | using FreeSql.Cloud.Tcc;
using System;
using System.Data.Common;
using System.Threading.Tasks;
namespace FreeSql
{
public class TccOptions
{
/// <summary>
/// 重试次数
/// </summary>
public int MaxRetryCount { get; set; } = 10;
/// <summary>
/// 重试间隔
/// </summary>
public TimeSpan RetryInterval { get; set; } = TimeSpan.FromSeconds(60);
}
public abstract class TccUnit<TState> : ITccUnit, ITccUnitSetter
{
/// <summary>
/// TccUnit 持久化数据
/// </summary>
protected TccUnitInfo Unit { get; private set; }
/// <summary>
/// 要求 StartTcc Then 方法设置 DBKey<para></para>
/// Try/Confirm/Cancel 将使用 DBKey 对应的事务<para></para>
/// 使用属性 Orm 可保持事务一致,它是被重新实现的 IFreeSql
/// </summary>
protected IFreeSql Orm { get; private set; }
/// <summary>
/// 要求 StartTcc Then 方法设置 State<para></para>
/// Try/Confirm/Cancel 将使用 State 进行无状态工作<para></para>
/// 因为最终会脱离执行上下文
/// </summary>
protected TState State { get; private set; }
#if net40
public abstract void Try();
public abstract void Confirm();
public abstract void Cancel();
#else
public abstract Task Try();
public abstract Task Confirm();
public abstract Task Cancel();
#endif
ITccUnitSetter ITccUnitSetter.SetUnit(TccUnitInfo value)
{
Unit = value;
return this;
}
ITccUnitSetter ITccUnitSetter.SetOrm(IFreeSql value)
{
Orm = value;
return this;
}
ITccUnitSetter ITccUnitSetter.SetState(object value)
{
State = (TState)value;
_StateIsValued = true;
return this;
}
bool _StateIsValued;
bool ITccUnitSetter.StateIsValued => _StateIsValued;
}
}
namespace FreeSql.Cloud.Tcc
{
public interface ITccUnit
{
#if net40
void Try();
void Confirm();
void Cancel();
#else
Task Try();
Task Confirm();
Task Cancel();
#endif
}
public interface ITccUnitSetter
{
ITccUnitSetter SetUnit(TccUnitInfo value);
ITccUnitSetter SetOrm(IFreeSql value);
ITccUnitSetter SetState(object value);
bool StateIsValued { get; }
}
}
|
2881099/FreeSql.Cloud | 3,959 | src/FreeSql.Cloud/Tcc/TccMaster_InvokeUnit.cs | using FreeSql.Cloud.Model;
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Linq;
using System.Reflection;
using System.Threading.Tasks;
namespace FreeSql.Cloud.Tcc
{
partial class TccMaster<TDBKey>
{
enum InvokeUnitMethod { Try, Confirm, Cancel }
#if net40
static void InvokeUnit(FreeSqlCloud<TDBKey> cloud, TccUnitInfo unitInfo, ITccUnit unit, InvokeUnitMethod method, IFreeSql masterTranOrm)
{
void LocalInvokeUnit(IFreeSql orm)
#else
async static Task InvokeUnitAsync(FreeSqlCloud<TDBKey> cloud, TccUnitInfo unitInfo, ITccUnit unit, InvokeUnitMethod method, IFreeSql masterTranOrm)
{
async Task LocalInvokeUnitAsync(IFreeSql orm)
#endif
{
if (orm != null)
{
try
{
switch (method)
{
case InvokeUnitMethod.Confirm:
case InvokeUnitMethod.Cancel:
var insert = orm.Insert(new UnitInvokedInfo { Id = $"TCC:{unitInfo.Tid},{unitInfo.Index},{method}" });
#if net40
insert.ExecuteAffrows();
#else
await insert.ExecuteAffrowsAsync();
#endif
break;
}
}
catch
{
return; //利用唯一约束做幂等判断,已经执行过
}
}
#if net40
switch (method)
{
case InvokeUnitMethod.Try: unit.Try(); break;
case InvokeUnitMethod.Confirm: unit.Confirm(); break;
case InvokeUnitMethod.Cancel: unit.Cancel(); break;
}
#else
switch (method)
{
case InvokeUnitMethod.Try: await unit.Try(); break;
case InvokeUnitMethod.Confirm: await unit.Confirm(); break;
case InvokeUnitMethod.Cancel: await unit.Cancel(); break;
}
#endif
}
if (string.IsNullOrWhiteSpace(unitInfo.DbKey))
{
#if net40
LocalInvokeUnit(null);
#else
await LocalInvokeUnitAsync(null);
#endif
return;
}
var dbkey = (TDBKey)typeof(TDBKey).FromObject(unitInfo.DbKey);
var unitSetter = unit as ITccUnitSetter;
if (object.Equals(cloud._dbkeyMaster, dbkey))
{
try
{
unitSetter?.SetOrm(masterTranOrm);
#if net40
LocalInvokeUnit(masterTranOrm);
#else
await LocalInvokeUnitAsync(masterTranOrm);
#endif
}
finally
{
unitSetter?.SetOrm(null);
}
return;
}
var unitFsql = cloud.Use(dbkey);
#if net40
using (var conn = unitFsql.Ado.MasterPool.Get())
#else
using (var conn = await unitFsql.Ado.MasterPool.GetAsync())
#endif
{
var tran = conn.Value.BeginTransaction();
var TranIsCommited = false;
try
{
var tranOrm = FreeSqlTransaction.Create(unitFsql, () => tran);
unitSetter?.SetOrm(tranOrm);
#if net40
LocalInvokeUnit(tranOrm);
#else
await LocalInvokeUnitAsync(tranOrm);
#endif
tran.Commit();
TranIsCommited = true;
}
finally
{
unitSetter?.SetOrm(null);
if (TranIsCommited == false)
tran.Rollback();
}
}
}
}
}
|
2881099/FreeSql.Cloud | 2,044 | src/FreeSql.Cloud/Abstract/FreeSqlCloudBase.cs | using System;
using System.Collections.Generic;
using System.Text;
using System.Threading;
namespace FreeSql.Cloud.Abstract
{
public abstract class FreeSqlCloudBase
{
#if !net40
internal abstract string GetDBKey();
public abstract IFreeSql Change(DBKeyString dbkey);
#endif
public abstract IFreeSql Use(DBKeyString dbkey);
}
public class DBKeyString
{
string _dbkey;
public override string ToString() => _dbkey;
public static implicit operator DBKeyString(string dbkey) => string.IsNullOrWhiteSpace(dbkey) ? null : new DBKeyString { _dbkey = dbkey };
public static implicit operator string(DBKeyString dbkey) => dbkey?.ToString();
}
public class AsyncLocalAccessor<T>
{
Func<T> _defaultValue;
public AsyncLocalAccessor(Func<T> defaultValue)
{
_defaultValue = defaultValue;
if (_asyncLocal.Value == null) _asyncLocal.Value = new ValueHolder { DefaultValue = _defaultValue };
}
public T Value
{
get
{
if (_asyncLocal.Value != null) return _asyncLocal.Value.GetValue();
return default;
}
set
{
if (_asyncLocal.Value == null) _asyncLocal.Value = new ValueHolder { DefaultValue = _defaultValue };
_asyncLocal.Value.SetValue(value);
}
}
class ValueHolder
{
T _rawValue;
bool _rawValueChanged = false;
public Func<T> DefaultValue { get; set; }
public T GetValue() => _rawValueChanged ? _rawValue : DefaultValue();
public void SetValue(T value)
{
_rawValueChanged = true;
_rawValue = value;
}
}
#if !net40
AsyncLocal<ValueHolder> _asyncLocal = new AsyncLocal<ValueHolder>();
#else
AsyncLocalFake<ValueHolder> _asyncLocal = new AsyncLocalFake<ValueHolder>();
class AsyncLocalFake<T2>
{
public T2 Value { get; set; }
}
#endif
}
}
|
2881099/FreeSql.Cloud | 2,270 | src/FreeSql.Cloud/Saga/SagaUnit.cs | using FreeSql.Cloud.Saga;
using System;
using System.Data.Common;
using System.Threading.Tasks;
namespace FreeSql
{
public class SagaOptions
{
/// <summary>
/// 重试次数
/// </summary>
public int MaxRetryCount { get; set; } = 10;
/// <summary>
/// 重试间隔
/// </summary>
public TimeSpan RetryInterval { get; set; } = TimeSpan.FromSeconds(60);
}
public abstract class SagaUnit<TState> : ISagaUnit, ISagaUnitSetter
{
/// <summary>
/// SagaUnit 持久化数据
/// </summary>
protected SagaUnitInfo Unit { get; private set; }
/// <summary>
/// 要求 StartSaga Then 方法设置 DBKey<para></para>
/// Commit/Cancel 将使用 DBKey 对应的事务<para></para>
/// 使用属性 Orm 可保持事务一致,它是被重新实现的 IFreeSql
/// </summary>
protected IFreeSql Orm { get; private set; }
/// <summary>
/// 要求 StartSaga Then 方法设置 State<para></para>
/// Commit/Cancel 将使用 State 进行无状态工作<para></para>
/// 因为最终会脱离执行上下文
/// </summary>
protected TState State { get; private set; }
#if net40
public abstract void Commit();
public abstract void Cancel();
#else
public abstract Task Commit();
public abstract Task Cancel();
#endif
ISagaUnitSetter ISagaUnitSetter.SetUnit(SagaUnitInfo value)
{
Unit = value;
return this;
}
ISagaUnitSetter ISagaUnitSetter.SetOrm(IFreeSql value)
{
Orm = value;
return this;
}
ISagaUnitSetter ISagaUnitSetter.SetState(object value)
{
State = (TState)value;
_StateIsValued = true;
return this;
}
bool _StateIsValued;
bool ISagaUnitSetter.StateIsValued => _StateIsValued;
}
}
namespace FreeSql.Cloud.Saga
{
public interface ISagaUnit
{
#if net40
void Commit();
void Cancel();
#else
Task Commit();
Task Cancel();
#endif
}
public interface ISagaUnitSetter
{
ISagaUnitSetter SetUnit(SagaUnitInfo value);
ISagaUnitSetter SetOrm(IFreeSql value);
ISagaUnitSetter SetState(object value);
bool StateIsValued { get; }
}
}
|
2881099/FreeSql.Cloud | 18,268 | src/FreeSql.Cloud/Saga/SagaMaster.cs | using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Linq;
using System.Reflection;
using System.Threading.Tasks;
namespace FreeSql.Cloud.Saga
{
public partial class SagaMaster<TDBKey>
{
FreeSqlCloud<TDBKey> _cloud;
string _tid;
string _title;
SagaOptions _options;
List<SagaUnitInfo> _thenUnitInfos = new List<SagaUnitInfo>();
List<ISagaUnit> _thenUnits = new List<ISagaUnit>();
internal SagaMaster(FreeSqlCloud<TDBKey> cloud, string tid, string title, SagaOptions options)
{
if (string.IsNullOrWhiteSpace(tid)) throw new ArgumentNullException(nameof(tid));
_cloud = cloud;
_tid = tid;
_title = title;
if (options == null) options = new SagaOptions();
_options = new SagaOptions
{
MaxRetryCount = options.MaxRetryCount,
RetryInterval = options.RetryInterval
};
}
/// <summary>
/// 编排分布式事务单元<para></para>
/// * Commit/Cancel 使用 Orm 属性统一了事务;<para></para>
/// * Cancel 内部已经过滤了重复执行;
/// </summary>
/// <typeparam name="TUnit"></typeparam>
/// <param name="dbkey">选择数据库,Commit/Cancel 使用 Orm 属性统一了事务,并且内部处理了幂等操作</param>
/// <param name="state">无状态数据</param>
/// <returns></returns>
public SagaMaster<TDBKey> Then<TUnit>(TDBKey dbkey, object state = null) where TUnit : ISagaUnit => Then(typeof(TUnit), dbkey, true, state);
SagaMaster<TDBKey> Then(Type sagaUnitType, TDBKey dbkey, bool isdbkey, object state)
{
if (sagaUnitType == null) throw new ArgumentNullException(nameof(sagaUnitType));
var unitTypeBase = typeof(SagaUnit<>);
if (state == null && sagaUnitType.BaseType.GetGenericTypeDefinition() == typeof(SagaUnit<>)) unitTypeBase = unitTypeBase.MakeGenericType(sagaUnitType.BaseType.GetGenericArguments()[0]);
else unitTypeBase = unitTypeBase.MakeGenericType(state.GetType());
if (unitTypeBase.IsAssignableFrom(sagaUnitType) == false) throw new ArgumentException($"{sagaUnitType.DisplayCsharp(false)} 必须继承 {unitTypeBase.DisplayCsharp(false)}");
var unitCtors = sagaUnitType.GetConstructors();
if (unitCtors.Length != 1 && unitCtors[0].GetParameters().Length > 0) throw new ArgumentException($"{sagaUnitType.FullName} 不能使用构造函数");
var unitTypeConved = Type.GetType(sagaUnitType.AssemblyQualifiedName);
if (unitTypeConved == null) throw new ArgumentException($"{sagaUnitType.FullName} 无效");
var unit = unitTypeConved.CreateInstanceGetDefaultValue() as ISagaUnit;
(unit as ISagaUnitSetter)?.SetState(state);
_thenUnits.Add(unit);
_thenUnitInfos.Add(new SagaUnitInfo
{
Description = unitTypeConved.GetDescription(),
Index = _thenUnitInfos.Count + 1,
Stage = SagaUnitStage.Commit,
State = state == null ? null : Newtonsoft.Json.JsonConvert.SerializeObject(state),
StateTypeName = state?.GetType().AssemblyQualifiedName,
Tid = _tid,
TypeName = sagaUnitType.AssemblyQualifiedName,
});
if (isdbkey) _thenUnitInfos.Last().DbKey = dbkey.ToString();
return this;
}
/// <summary>
/// 执行 SAGA 事务<para></para>
/// 返回值 true: 事务完成并且 Commit 成功<para></para>
/// 返回值 false: 事务完成但是 Cancel 已取消<para></para>
/// 返回值 null: 等待最终一致性
/// </summary>
/// <returns></returns>
#if net40
public bool? Execute()
#else
async public Task<bool?> ExecuteAsync()
#endif
{
if (_cloud._ib.Quantity == 0) throw new ArgumentException($"必须注册可用的数据库");
var units = _thenUnits.ToArray();
var masterInfo = new SagaMasterInfo
{
Tid = _tid,
Title = _title,
Total = _thenUnitInfos.Count,
Status = SagaMasterStatus.Pending,
RetryCount = 0,
MaxRetryCount = _options.MaxRetryCount,
RetryInterval = (int)_options.RetryInterval.TotalSeconds,
};
#if net40
_cloud._ormMaster.Insert(masterInfo).ExecuteAffrows();
#else
await _cloud._ormMaster.Insert(masterInfo).ExecuteAffrowsAsync();
#endif
if (_cloud._distributeTraceEnable) _cloud._distributedTraceCall($"SAGA({masterInfo.Tid}, {masterInfo.Title}) Created successful, retry count: {_options.MaxRetryCount}, interval: {_options.RetryInterval.TotalSeconds}S");
var unitInfos = new List<SagaUnitInfo>();
Exception unitException = null;
for (var idx = 0; idx < _thenUnitInfos.Count; idx++)
{
try
{
var ormMaster = _cloud._ormMaster;
#if net40
using (var conn = ormMaster.Ado.MasterPool.Get())
#else
using (var conn = await ormMaster.Ado.MasterPool.GetAsync())
#endif
{
var tran = conn.Value.BeginTransaction();
var tranIsCommited = false;
try
{
(units[idx] as ISagaUnitSetter)?.SetUnit(_thenUnitInfos[idx]);
var tranOrm = FreeSqlTransaction.Create(ormMaster, () => tran);
#if net40
tranOrm.Insert(_thenUnitInfos[idx]).ExecuteAffrows();
InvokeUnit(_cloud, _thenUnitInfos[idx], units[idx], InvokeUnitMethod.Commit, tranOrm);
#else
await tranOrm.Insert(_thenUnitInfos[idx]).ExecuteAffrowsAsync();
await InvokeUnitAsync(_cloud, _thenUnitInfos[idx], units[idx], InvokeUnitMethod.Commit, tranOrm);
#endif
tran.Commit();
tranIsCommited = true;
unitInfos.Add(_thenUnitInfos[idx]);
}
finally
{
if (tranIsCommited == false)
tran.Rollback();
}
}
if (_cloud._distributeTraceEnable) _cloud._distributedTraceCall($"SAGA({masterInfo.Tid}, {masterInfo.Title}) Unit{_thenUnitInfos[idx].Index}{(string.IsNullOrWhiteSpace(_thenUnitInfos[idx].Description) ? "" : $"({_thenUnitInfos[idx].Description})")} COMMIT successful\r\n State: {_thenUnitInfos[idx].State}\r\n Type: {_thenUnitInfos[idx].TypeName}");
}
catch (Exception ex)
{
unitException = ex.InnerException?.InnerException ?? ex.InnerException ?? ex;
if (_cloud._distributeTraceEnable) _cloud._distributedTraceCall($"SAGA({masterInfo.Tid}, {masterInfo.Title}) Unit{_thenUnitInfos[idx].Index}{(string.IsNullOrWhiteSpace(_thenUnitInfos[idx].Description) ? "" : $"({_thenUnitInfos[idx].Description})")} COMMIT failed, ready to CANCEL, -ERR {unitException.Message}\r\n State: {_thenUnitInfos[idx].State}\r\n Type: {_thenUnitInfos[idx].TypeName}");
break;
}
}
#if net40
return Cancel(_cloud, masterInfo, unitInfos, units, true);
#else
return await CancelAsync(_cloud, masterInfo, unitInfos, units, true);
#endif
}
static void SetSagaState(ISagaUnit unit, SagaUnitInfo unitInfo)
{
if (string.IsNullOrWhiteSpace(unitInfo.StateTypeName)) return;
if (unitInfo.State == null) return;
var stateType = Type.GetType(unitInfo.StateTypeName);
if (stateType == null) return;
(unit as ISagaUnitSetter)?.SetState(Newtonsoft.Json.JsonConvert.DeserializeObject(unitInfo.State, stateType));
}
#if net40
static void Cancel(FreeSqlCloud<TDBKey> cloud, string tid, bool retry)
{
var masterInfo = cloud._ormMaster.Select<SagaMasterInfo>().Where(a => a.Tid == tid && a.Status == SagaMasterStatus.Pending && a.RetryCount <= a.MaxRetryCount).First();
if (masterInfo == null) return;
var unitInfos = cloud._ormMaster.Select<SagaUnitInfo>().Where(a => a.Tid == tid).OrderBy(a => a.Index).ToList();
var units = LocalGetUnits();
Cancel(cloud, masterInfo, unitInfos, units, retry);
#else
async static Task CancelAsync(FreeSqlCloud<TDBKey> cloud, string tid, bool retry)
{
var masterInfo = await cloud._ormMaster.Select<SagaMasterInfo>().Where(a => a.Tid == tid && a.Status == SagaMasterStatus.Pending && a.RetryCount <= a.MaxRetryCount).FirstAsync();
if (masterInfo == null) return;
var unitInfos = await cloud._ormMaster.Select<SagaUnitInfo>().Where(a => a.Tid == tid).OrderBy(a => a.Index).ToListAsync();
var units = LocalGetUnits();
await CancelAsync(cloud, masterInfo, unitInfos, units, retry);
#endif
ISagaUnit[] LocalGetUnits() => unitInfos.Select(unitInfo =>
{
try
{
var unitTypeDefault = Type.GetType(unitInfo.TypeName).CreateInstanceGetDefaultValue() as ISagaUnit;
if (unitTypeDefault == null)
{
if (cloud._distributeTraceEnable) cloud._distributedTraceCall($"SAGA({masterInfo.Tid}, {masterInfo.Title}) Data error, cannot create as ISagaUnit, {unitInfo.TypeName}");
throw new ArgumentException($"SAGA({masterInfo.Tid}, {masterInfo.Title}) Data error, cannot create as ISagaUnit, {unitInfo.TypeName}");
}
return unitTypeDefault;
}
catch
{
if (cloud._distributeTraceEnable) cloud._distributedTraceCall($"SAGA({masterInfo.Tid}, {masterInfo.Title}) Data error, cannot create as ISagaUnit, {unitInfo.TypeName}");
throw new ArgumentException($"SAGA({masterInfo.Tid}, {masterInfo.Title}) Data error, cannot create as ISagaUnit, {unitInfo.TypeName}");
}
})
.ToArray();
}
#if net40
static bool? Cancel(FreeSqlCloud<TDBKey> cloud, SagaMasterInfo masterInfo, List<SagaUnitInfo> unitInfos, ISagaUnit[] units, bool retry)
#else
async static Task<bool?> CancelAsync(FreeSqlCloud<TDBKey> cloud, SagaMasterInfo masterInfo, List<SagaUnitInfo> unitInfos, ISagaUnit[] units, bool retry)
#endif
{
var isCommited = unitInfos.Count == masterInfo.Total;
var isCanceled = false;
if (isCommited == false)
{
var cancelCount = 0;
for (var idx = masterInfo.Total - 1; idx >= 0; idx--)
{
var unitInfo = unitInfos.Where(tt => tt.Index == idx + 1 && tt.Stage == SagaUnitStage.Commit).FirstOrDefault();
try
{
if (unitInfo != null)
{
if ((units[idx] as ISagaUnitSetter)?.StateIsValued != true)
SetSagaState(units[idx], unitInfo);
var ormMaster = cloud._ormMaster;
#if net40
using (var conn = ormMaster.Ado.MasterPool.Get())
#else
using (var conn = await ormMaster.Ado.MasterPool.GetAsync())
#endif
{
var tran = conn.Value.BeginTransaction();
var tranIsCommited = false;
try
{
var tranOrm = FreeSqlTransaction.Create(ormMaster, () => tran);
(units[idx] as ISagaUnitSetter)?.SetUnit(unitInfo);
var update = tranOrm.Update<SagaUnitInfo>()
.Where(a => a.Tid == masterInfo.Tid && a.Index == idx + 1 && a.Stage == SagaUnitStage.Commit)
.Set(a => a.Stage, SagaUnitStage.Cancel);
#if net40
if (update.ExecuteAffrows() == 1)
InvokeUnit(cloud, unitInfo, units[idx], InvokeUnitMethod.Cancel, tranOrm);
#else
if (await update.ExecuteAffrowsAsync() == 1)
await InvokeUnitAsync(cloud, unitInfo, units[idx], InvokeUnitMethod.Cancel, tranOrm);
#endif
tran.Commit();
tranIsCommited = true;
}
finally
{
if (tranIsCommited == false)
tran.Rollback();
}
}
if (cloud._distributeTraceEnable) cloud._distributedTraceCall($"SAGA({masterInfo.Tid}, {masterInfo.Title}) Unit{unitInfo.Index}{(string.IsNullOrWhiteSpace(unitInfo.Description) ? "" : $"({unitInfo.Description})")} {(isCommited ? "COMMIT" : "CANCEL")} successful{(masterInfo.RetryCount > 0 ? $" after {masterInfo.RetryCount} retries" : "")}\r\n State: {unitInfo.State}\r\n Type: {unitInfo.TypeName}");
}
cancelCount++;
}
catch (Exception ex)
{
if (unitInfo != null)
if (cloud._distributeTraceEnable) cloud._distributedTraceCall($"SAGA({masterInfo.Tid}, {masterInfo.Title}) Unit{unitInfo.Index}{(string.IsNullOrWhiteSpace(unitInfo.Description) ? "" : $"({unitInfo.Description})")} {(isCommited ? "COMMIT" : "CANCEL")} failed{(masterInfo.RetryCount > 0 ? $" after {masterInfo.RetryCount} retries" : "")}, -ERR {ex.Message}\r\n State: {unitInfo.State}\r\n Type: {unitInfo.TypeName}");
}
}
isCanceled = cancelCount == masterInfo.Total;
}
if (isCommited || isCanceled)
{
var update = cloud._ormMaster.Update<SagaMasterInfo>()
.Where(a => a.Tid == masterInfo.Tid && a.Status == SagaMasterStatus.Pending)
.Set(a => a.RetryCount + 1)
.Set(a => a.RetryTime == DateTime.UtcNow)
.Set(a => a.Status, isCommited ? SagaMasterStatus.Commited : SagaMasterStatus.Canceled)
.Set(a => a.FinishTime == DateTime.UtcNow);
#if net40
update.ExecuteAffrows();
#else
await update.ExecuteAffrowsAsync();
#endif
if (cloud._distributeTraceEnable) cloud._distributedTraceCall($"SAGA({masterInfo.Tid}, {masterInfo.Title}) Completed, all units {(isCommited ? "COMMIT" : "CANCEL")} successfully{(masterInfo.RetryCount > 0 ? $" after {masterInfo.RetryCount} retries" : "")}");
return isCommited;
}
else
{
var update = cloud._ormMaster.Update<SagaMasterInfo>()
.Where(a => a.Tid == masterInfo.Tid && a.Status == SagaMasterStatus.Pending && a.RetryCount < a.MaxRetryCount)
.Set(a => a.RetryCount + 1)
.Set(a => a.RetryTime == DateTime.UtcNow);
#if net40
var affrows = update.ExecuteAffrows();
#else
var affrows = await update.ExecuteAffrowsAsync();
#endif
if (affrows == 1)
{
if (retry)
{
//if (cloud._distributeTraceEnable) cloud._distributedTraceCall($"SAGA({saga.Tid}, {saga.Title}) Not completed, waiting to try again, current tasks {cloud._scheduler.QuantityTempTask}");
cloud._scheduler.AddTempTask(TimeSpan.FromSeconds(masterInfo.RetryInterval), GetTempTask(cloud, masterInfo.Tid, masterInfo.Title, masterInfo.RetryInterval));
}
}
else
{
update = cloud._ormMaster.Update<SagaMasterInfo>()
.Where(a => a.Tid == masterInfo.Tid && a.Status == SagaMasterStatus.Pending)
.Set(a => a.Status, SagaMasterStatus.ManualOperation);
#if net40
update.ExecuteAffrows();
#else
await update.ExecuteAffrowsAsync();
#endif
if (cloud._distributeTraceEnable) cloud._distributedTraceCall($"SAGA({masterInfo.Tid}, {masterInfo.Title}) Not completed, waiting for manual operation 【人工干预】");
}
return null;
}
}
internal static Action GetTempTask(FreeSqlCloud<TDBKey> cloud, string tid, string title, int retryInterval)
{
return () =>
{
try
{
#if net40
Cancel(cloud, tid, true);
#else
CancelAsync(cloud, tid, true).Wait();
#endif
}
catch
{
try
{
cloud._ormMaster.Update<SagaMasterInfo>()
.Where(a => a.Tid == tid && a.Status == SagaMasterStatus.Pending)
.Set(a => a.RetryCount + 1)
.Set(a => a.RetryTime == DateTime.UtcNow)
.ExecuteAffrows();
}
catch { }
//if (cloud._distributeTraceEnable) cloud._distributedTraceCall($"SAGA({tid}, {title}) Not completed, waiting to try again, current tasks {cloud._scheduler.QuantityTempTask}");
cloud._scheduler.AddTempTask(TimeSpan.FromSeconds(retryInterval), GetTempTask(cloud, tid, title, retryInterval));
}
};
}
}
}
|
2881099/FreeSql.Cloud | 3,818 | src/FreeSql.Cloud/Saga/SagaMaster_InvokeUnit.cs | using FreeSql.Cloud.Model;
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Linq;
using System.Reflection;
using System.Threading.Tasks;
namespace FreeSql.Cloud.Saga
{
partial class SagaMaster<TDBKey>
{
enum InvokeUnitMethod { Commit, Cancel }
#if net40
static void InvokeUnit(FreeSqlCloud<TDBKey> cloud, SagaUnitInfo unitInfo, ISagaUnit unit, InvokeUnitMethod method, IFreeSql masterTranOrm)
{
void LocalInvokeUnit(IFreeSql orm)
#else
async static Task InvokeUnitAsync(FreeSqlCloud<TDBKey> cloud, SagaUnitInfo unitInfo, ISagaUnit unit, InvokeUnitMethod method, IFreeSql masterTranOrm)
{
async Task LocalInvokeUnitAsync(IFreeSql orm)
#endif
{
if (orm != null)
{
try
{
switch (method)
{
case InvokeUnitMethod.Commit:
case InvokeUnitMethod.Cancel:
var insert = orm.Insert(new UnitInvokedInfo { Id = $"SAGA:{unitInfo.Tid},{unitInfo.Index},{method}" });
#if net40
insert.ExecuteAffrows();
#else
await insert.ExecuteAffrowsAsync();
#endif
break;
}
}
catch
{
return; //利用唯一约束做幂等判断,已经执行过
}
}
#if net40
switch (method)
{
case InvokeUnitMethod.Commit: unit.Commit(); break;
case InvokeUnitMethod.Cancel: unit.Cancel(); break;
}
#else
switch (method)
{
case InvokeUnitMethod.Commit: await unit.Commit(); break;
case InvokeUnitMethod.Cancel: await unit.Cancel(); break;
}
#endif
}
if (string.IsNullOrWhiteSpace(unitInfo.DbKey))
{
#if net40
LocalInvokeUnit(null);
#else
await LocalInvokeUnitAsync(null);
#endif
return;
}
var dbkey = (TDBKey)typeof(TDBKey).FromObject(unitInfo.DbKey);
var unitSetter = unit as ISagaUnitSetter;
if (object.Equals(cloud._dbkeyMaster, dbkey))
{
try
{
unitSetter?.SetOrm(masterTranOrm);
#if net40
LocalInvokeUnit(masterTranOrm);
#else
await LocalInvokeUnitAsync(masterTranOrm);
#endif
}
finally
{
unitSetter?.SetOrm(null);
}
return;
}
var unitFsql = cloud.Use(dbkey);
#if net40
using (var conn = unitFsql.Ado.MasterPool.Get())
#else
using (var conn = await unitFsql.Ado.MasterPool.GetAsync())
#endif
{
var tran = conn.Value.BeginTransaction();
var TranIsCommited = false;
try
{
var tranOrm = FreeSqlTransaction.Create(unitFsql, () => tran);
unitSetter?.SetOrm(tranOrm);
#if net40
LocalInvokeUnit(tranOrm);
#else
await LocalInvokeUnitAsync(tranOrm);
#endif
tran.Commit();
TranIsCommited = true;
}
finally
{
unitSetter?.SetOrm(null);
if (TranIsCommited == false)
tran.Rollback();
}
}
}
}
}
|
2881099/FreeSql.Cloud | 2,305 | src/FreeSql.Cloud/Saga/SagaEntity.cs | using FreeSql.DataAnnotations;
using System;
using System.Data;
namespace FreeSql.Cloud.Saga
{
[Index("{tablename}_idx1", "status")]
public class SagaMasterInfo
{
[Column(Name = "tid", IsPrimary = true, StringLength = 128)]
public string Tid { get; set; }
[Column(Name = "title")]
public string Title { get; set; }
[Column(Name = "total")]
public int Total { get; set; }
[Column(Name = "create_time", ServerTime = DateTimeKind.Utc, CanUpdate = false)]
public DateTime CreateTime { get; set; } = DateTime.UtcNow;
[Column(Name = "finish_time")]
public DateTime FinishTime { get; set; }
[Column(Name = "status", MapType = typeof(string), StringLength = 10)]
public SagaMasterStatus Status { get; set; }
[Column(Name = "max_retry_count")]
public int MaxRetryCount { get; set; } = 30;
[Column(Name = "retry_interval")]
public int RetryInterval { get; set; } = 60;
[Column(Name = "retry_count")]
public int RetryCount { get; set; }
[Column(Name = "retry_time")]
public DateTime RetryTime { get; set; }
}
public enum SagaMasterStatus { Pending, Commited, Canceled, ManualOperation }
public class SagaUnitInfo
{
[Column(Name = "tid", IsPrimary = true, StringLength = 128)]
public string Tid { get; set; }
[Column(Name = "index", IsPrimary = true)]
public int Index { get; set; }
[Column(Name = "description")]
public string Description { get; set; }
[Column(Name = "stage", MapType = typeof(string), StringLength = 8)]
public SagaUnitStage Stage { get; set; }
[Column(Name = "type_name")]
public string TypeName { get; set; }
[Column(Name = "state", StringLength = - 1)]
public string State { get; set; }
[Column(Name = "state_type_name")]
public string StateTypeName { get; set; }
[Column(Name = "create_time", ServerTime = DateTimeKind.Utc, CanUpdate = false)]
public DateTime CreateTime { get; set; } = DateTime.UtcNow;
[Column(Name = "db_key", StringLength = 128)]
public string DbKey { get; set; }
}
public enum SagaUnitStage { Commit, Cancel }
}
|
2881099/FreeSql.Cloud | 11,417 | src/FreeSql.Cloud/RepositoryCloud/RepositoryCloud.cs | using FreeSql;
using FreeSql.Cloud.Abstract;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Linq.Expressions;
using System.Threading;
using System.Threading.Tasks;
#if !net40
public static class FreesqlCloudGlobalExtensions
{
/// <summary>
/// 创建特殊仓储对象,实现随时跟随 FreeSqlCloud Change 方法切换到对应的数据库<para></para>
/// _<para></para>
/// 区别说明:其他方式创建的仓储对象,初始化已经固定 IFreeSql(无法跟随切换)
/// </summary>
/// <typeparam name="TEntity"></typeparam>
/// <param name="that"></param>
/// <returns></returns>
public static IBaseRepository<TEntity> GetCloudRepository<TEntity>(this FreeSqlCloudBase that) where TEntity : class
{
return new RepositoryCloud<TEntity>(that);
}
}
namespace FreeSql
{
/// <summary>
/// 跟随切换数据库的仓储实现<para></para>
/// _<para></para>
/// UnitOfWorkManagerCloud.Begin(优先)<para></para>
/// 或者 FreeSqlCloud.Change 都会切换 RepositoryCloud 到对应的数据库<para></para>
/// _<para></para>
/// FreeSql.Repository 默认的仓储对象,初始化就固定了 IFreeSql(无法跟随切换)
/// </summary>
/// <typeparam name="TEntity"></typeparam>
class RepositoryCloud<TEntity> : IBaseRepository<TEntity> where TEntity : class
{
protected virtual IBaseRepository<TEntity> CreateRepository(IFreeSql fsql)
{
return fsql.GetRepository<TEntity>();
}
internal readonly FreeSqlCloudBase _cloud;
internal readonly UnitOfWorkManagerCloud _uowManager;
internal readonly Dictionary<string, IBaseRepository<TEntity>> _repos = new Dictionary<string, IBaseRepository<TEntity>>();
/// <summary>
/// 跟随 cloud.Change 切换
/// </summary>
/// <param name="cloud"></param>
public RepositoryCloud(FreeSqlCloudBase cloud)
{
_cloud = cloud;
}
/// <summary>
/// 跟随 uowManager.Begin 工作单元切换(优先)<para></para>
/// 或者<para></para>
/// 跟随 cloud.Change 切换
/// </summary>
/// <param name="cloud"></param>
/// <param name="uowManager"></param>
public RepositoryCloud(FreeSqlCloudBase cloud, UnitOfWorkManagerCloud uowManager)
{
_cloud = uowManager?.Cloud ?? cloud;
_uowManager = uowManager;
}
public void Dispose()
{
ForEachRepos(repo => repo.Dispose());
_repos.Clear();
}
protected void ForEachRepos(Action<IBaseRepository<TEntity>> action)
{
foreach (var repo in _repos.Values) action(repo);
}
IBaseRepository<TEntity> _firstRepository;
protected IBaseRepository<TEntity> CurrentRepository
{
get
{
if (_uowManager == null && _cloud == null) return _repos.Values.First();
var dbkey = _uowManager != null ? _uowManager.GetDBKey() : _cloud.GetDBKey();
if (_repos.TryGetValue(dbkey, out var repo) == false)
{
_repos.Add(dbkey, repo = CreateRepository(_cloud.Use(dbkey)));
if (_uowManager != null) _uowManager.GetUnitOfWorkManager(dbkey).Binding(repo);
if (_firstRepository == null) _firstRepository = repo;
else
{
repo.DbContextOptions = _firstRepository.DbContextOptions;
if (_asTypeEntityType != null) repo.AsType(_asTypeEntityType);
if (_asTablePriv != null) repo.AsTable(_asTablePriv);
}
}
return repo;
}
}
public DbContextOptions DbContextOptions
{
get => CurrentRepository.DbContextOptions;
set => ForEachRepos(repo => repo.DbContextOptions = value);
}
Type _asTypeEntityType;
public void AsType(Type entityType)
{
_asTypeEntityType = entityType;
ForEachRepos(repo => repo.AsType(entityType));
}
internal Func<Type, string, string> _asTablePriv;
public void AsTable(Func<string, string> rule)
{
if (rule == null)
_asTablePriv = null;
else
_asTablePriv = (a, b) => a == EntityType ? rule(b) : null;
ForEachRepos(repo => repo.AsTable(_asTablePriv));
}
public void AsTable(Func<Type, string, string> rule)
{
_asTablePriv = rule;
ForEachRepos(repo => repo.AsTable(_asTablePriv));
}
public IUnitOfWork UnitOfWork
{
get => CurrentRepository.UnitOfWork;
set => CurrentRepository.UnitOfWork = value;
}
public IFreeSql Orm => CurrentRepository.Orm;
public Type EntityType => CurrentRepository.EntityType;
public RepositoryDataFilter DataFilter => CurrentRepository.DataFilter;
public ISelect<TEntity> Select => CurrentRepository.Select;
public IUpdate<TEntity> UpdateDiy => CurrentRepository.UpdateDiy;
public ISelect<TEntity> Where(Expression<Func<TEntity, bool>> exp) => CurrentRepository.Where(exp);
public ISelect<TEntity> WhereIf(bool condition, Expression<Func<TEntity, bool>> exp) => CurrentRepository.WhereIf(condition, exp);
public void Attach(TEntity entity) => CurrentRepository.Attach(entity);
public void Attach(IEnumerable<TEntity> entity) => CurrentRepository.Attach(entity);
public IBaseRepository<TEntity> AttachOnlyPrimary(TEntity data) => CurrentRepository.AttachOnlyPrimary(data);
public Dictionary<string, object[]> CompareState(TEntity newdata) => CurrentRepository.CompareState(newdata);
public void FlushState() => CurrentRepository.FlushState();
public void BeginEdit(List<TEntity> data) => CurrentRepository.BeginEdit(data);
public int EndEdit(List<TEntity> data = null) => CurrentRepository.EndEdit(data);
public TEntity Insert(TEntity entity) => CurrentRepository.Insert(entity);
public List<TEntity> Insert(IEnumerable<TEntity> entitys) => CurrentRepository.Insert(entitys);
public TEntity InsertOrUpdate(TEntity entity) => CurrentRepository.InsertOrUpdate(entity);
public void SaveMany(TEntity entity, string propertyName) => CurrentRepository.SaveMany(entity, propertyName);
public int Update(TEntity entity) => CurrentRepository.Update(entity);
public int Update(IEnumerable<TEntity> entitys) => CurrentRepository.Update(entitys);
public int Delete(TEntity entity) => CurrentRepository.Delete(entity);
public int Delete(IEnumerable<TEntity> entitys) => CurrentRepository.Delete(entitys);
public int Delete(Expression<Func<TEntity, bool>> predicate) => CurrentRepository.Delete(predicate);
public List<object> DeleteCascadeByDatabase(Expression<Func<TEntity, bool>> predicate) => CurrentRepository.DeleteCascadeByDatabase(predicate);
public Task<TEntity> InsertAsync(TEntity entity, CancellationToken cancellationToken = default) => CurrentRepository.InsertAsync(entity, cancellationToken);
public Task<List<TEntity>> InsertAsync(IEnumerable<TEntity> entitys, CancellationToken cancellationToken = default) => CurrentRepository.InsertAsync(entitys, cancellationToken);
public Task<TEntity> InsertOrUpdateAsync(TEntity entity, CancellationToken cancellationToken = default) => CurrentRepository.InsertOrUpdateAsync(entity, cancellationToken);
public Task SaveManyAsync(TEntity entity, string propertyName, CancellationToken cancellationToken = default) => CurrentRepository.SaveManyAsync(entity, propertyName, cancellationToken);
public Task<int> UpdateAsync(TEntity entity, CancellationToken cancellationToken = default) => CurrentRepository.UpdateAsync(entity, cancellationToken);
public Task<int> UpdateAsync(IEnumerable<TEntity> entitys, CancellationToken cancellationToken = default) => CurrentRepository.UpdateAsync(entitys, cancellationToken);
public Task<int> DeleteAsync(TEntity entity, CancellationToken cancellationToken = default) => CurrentRepository.DeleteAsync(entity, cancellationToken);
public Task<int> DeleteAsync(IEnumerable<TEntity> entitys, CancellationToken cancellationToken = default) => CurrentRepository.DeleteAsync(entitys, cancellationToken);
public Task<int> DeleteAsync(Expression<Func<TEntity, bool>> predicate, CancellationToken cancellationToken = default) => CurrentRepository.DeleteAsync(predicate, cancellationToken);
public Task<List<object>> DeleteCascadeByDatabaseAsync(Expression<Func<TEntity, bool>> predicate, CancellationToken cancellationToken = default) => CurrentRepository.DeleteCascadeByDatabaseAsync(predicate, cancellationToken);
}
// class RepositoryCloud<TDBKey, TEntity, TKey> : RepositoryCloud<TDBKey, TEntity>, IBaseRepository<TEntity, TKey>
// where TEntity : class
// {
// /// <summary>
// /// 跟随 cloud.Change 切换
// /// </summary>
// /// <param name="cloud"></param>
// public RepositoryCloud(FreeSqlCloud<TDBKey> cloud) : base(cloud) { }
// /// <summary>
// /// 跟随 uowManager.Begin 工作单元切换(优先)<para></para>
// /// 或者<para></para>
// /// 跟随 cloud.Change 切换
// /// </summary>
// /// <param name="cloud"></param>
// /// <param name="uowManager"></param>
// public RepositoryCloud(FreeSqlCloud<TDBKey> cloud, UnitOfWorkManagerCloud<TDBKey> uowManager) : base(cloud, uowManager) { }
// TEntity CheckTKeyAndReturnIdEntity(TKey id)
// {
// var repo = CurrentRepository;
// var tb = repo.Orm.CodeFirst.GetTableByEntity(repo.EntityType);
// if (tb.Primarys.Length != 1) throw new Exception(DbContextStrings.EntityType_PrimaryKeyIsNotOne(repo.EntityType.Name));
// if (tb.Primarys[0].CsType.NullableTypeOrThis() != typeof(TKey).NullableTypeOrThis()) throw new Exception(DbContextStrings.EntityType_PrimaryKeyError(repo.EntityType.Name, typeof(TKey).FullName));
// var obj = tb.Type.CreateInstanceGetDefaultValue();
// repo.Orm.SetEntityValueWithPropertyName(tb.Type, obj, tb.Primarys[0].CsName, id);
// var ret = obj as TEntity;
// if (ret == null) throw new Exception(DbContextStrings.EntityType_CannotConvert(repo.EntityType.Name, typeof(TEntity).Name));
// return ret;
// }
// public virtual TEntity Get(TKey id) => Select.WhereDynamic(CheckTKeyAndReturnIdEntity(id)).ToOne();
// public virtual TEntity Find(TKey id) => Select.WhereDynamic(CheckTKeyAndReturnIdEntity(id)).ToOne();
// public virtual int Delete(TKey id) => Delete(CheckTKeyAndReturnIdEntity(id));
//#if net40
//#else
// public Task<TEntity> GetAsync(TKey id, CancellationToken cancellationToken = default) => Select.WhereDynamic(CheckTKeyAndReturnIdEntity(id)).ToOneAsync(cancellationToken);
// public Task<TEntity> FindAsync(TKey id, CancellationToken cancellationToken = default) => Select.WhereDynamic(CheckTKeyAndReturnIdEntity(id)).ToOneAsync(cancellationToken);
// public Task<int> DeleteAsync(TKey id, CancellationToken cancellationToken = default) => DeleteAsync(CheckTKeyAndReturnIdEntity(id), cancellationToken);
//#endif
// }
}
#endif |
2881099/FreeSql.Cloud | 1,720 | src/FreeSql.Cloud/RepositoryCloud/UnitOfWorkManagerCloud.cs | using FreeSql.Cloud.Abstract;
using System;
using System.Collections.Generic;
using System.Data;
using System.Threading;
namespace FreeSql
{
#if !net40
class UnitOfWorkManagerCloud
{
public FreeSqlCloudBase Cloud { get; }
internal readonly Dictionary<string, UnitOfWorkManager> _uowManagers = new Dictionary<string, UnitOfWorkManager>();
public UnitOfWorkManagerCloud(FreeSqlCloudBase cloud)
{
Cloud = cloud;
_dbkeyCurrent = new AsyncLocalAccessor<string>(Cloud.GetDBKey);
}
public void Dispose()
{
ForEachUowManagers(uowm => uowm.Dispose());
_uowManagers.Clear();
}
protected void ForEachUowManagers(Action<UnitOfWorkManager> action)
{
foreach (var uowm in _uowManagers.Values) action(uowm);
}
internal AsyncLocalAccessor<string> _dbkeyCurrent;
internal string GetDBKey()
{
if (string.IsNullOrWhiteSpace(_dbkeyCurrent.Value) || GetUnitOfWorkManager(_dbkeyCurrent.Value).Current == null) return Cloud.GetDBKey();
return _dbkeyCurrent.Value;
}
public IUnitOfWork Begin(string dbkey, Propagation propagation = Propagation.Required, IsolationLevel? isolationLevel = null)
{
_dbkeyCurrent.Value = dbkey;
return GetUnitOfWorkManager(dbkey).Begin(propagation, isolationLevel);
}
public UnitOfWorkManager GetUnitOfWorkManager(string dbkey)
{
if (_uowManagers.TryGetValue(dbkey, out var uowm) == false)
_uowManagers.Add(dbkey, uowm = new UnitOfWorkManager(Cloud.Use(dbkey)));
return uowm;
}
}
#endif
}
|
2881099/FreeRedis | 1,827 | azure-pipelines.yml | jobs:
- job: build_and_run_ut_by_linux
displayName: Build and Run Unit Test By (Ubuntu:Latest)
timeoutInMinutes: 120
pool:
vmImage: ubuntu-latest
steps:
- task: DockerCompose@0
displayName: "Deploy Docker Compose"
inputs:
action: 'Run a Docker Compose command'
containerregistrytype: 'Container Registry'
dockerComposeFile: '$(Build.SourcesDirectory)/docker-compose.yml'
dockerComposeFileArgs: |
workDirectory=$(Build.SourcesDirectory)
dockerComposeCommand: up -d
- task: UseDotNet@2
displayName: "Install .NET Core SDK"
inputs:
version: 5.0.100
- task: UseDotNet@2
displayName: "Install .NET Core SDK"
inputs:
version: 3.1.403
- script: bash scripts/build.sh
displayName: "Build"
- script: bash scripts/test.sh
displayName: "Run Unit Test"
- task: Palmmedia.reportgenerator.reportgenerator-build-release-task.reportgenerator@4
displayName: ReportGenerator
inputs:
reports: "$(Build.SourcesDirectory)/test/Unit/*/TestResults/*/coverage.cobertura.xml"
targetdir: "$(Build.SourcesDirectory)/CodeCoverage"
reporttypes: "Cobertura"
assemblyfilters: "-xunit*"
- script: bash <(curl -s https://codecov.io/bash)
displayName: "Upload to codecov.io"
- task: DockerCompose@0
displayName : "Down Docker Compose"
inputs:
action: 'Run a Docker Compose command'
containerregistrytype: 'Container Registry'
dockerComposeFile: '$(Build.SourcesDirectory)/docker-compose.yml'
dockerComposeFileArgs: |
workDirectory=$(Build.SourcesDirectory)
dockerComposeCommand: down --remove-orphans
|
2881099/FreeRedis | 6,352 | index.md | <h1 align="center"> 🦄 FreeRedis </h1>
<div align="center">
FreeRedis is .NET redis client, supports .NETCore 2.1+, .NETFramework 4.0+, And Xamarin
[](https://www.nuget.org/packages/FreeRedis)
[](https://www.nuget.org/stats/packages/FreeRedis?groupby=Version)
[](https://raw.githubusercontent.com/2881099/FreeRedis/master/LICENSE.txt)
</div>
- 🌈 RedisClient Keep all method names consistent with redis-cli
- 🌌 Support Redis Cluster (requires redis-server 3.2 and above)
- ⛳ Support Redis Sentinel
- 🎣 Support Redis Master-Slave
- 📡 Support Redis Pub-Sub
- 📃 Support Redis Lua Scripting
- 💻 Support Pipeline
- 📰 Support Transaction
- 🌴 Support Geo type commands (requires redis-server 3.2 and above)
- 🌲 Support Streams type commands (requires redis-server 5.0 and above)
- ⚡ Support Client-side-cahing (requires redis-server 6.0 and above)
- 🌳 Support Redis 6 RESP3 Protocol
QQ群:4336577(已满)、8578575(在线)、52508226(在线)
#### 🌈 Single machine redis (单机)
```csharp
public static RedisClient cli = new RedisClient("127.0.0.1:6379,password=123,defaultDatabase=13");
//cli.Serialize = obj => JsonConvert.SerializeObject(obj);
//cli.Deserialize = (json, type) => JsonConvert.DeserializeObject(json, type);
cli.Notice += (s, e) => Console.WriteLine(e.Log); //print command log
cli.Set("key1", "value1");
cli.MSet("key1", "value1", "key2", "value2");
string value1 = cli.Get("key1");
string[] vals = cli.MGet("key1", "key2");
```
> Supports strings, hashes, lists, sets, sorted sets, bitmaps, hyperloglogs, geo, streams And BloomFilter.
| Parameter | Default | Explain |
| :---------------- | --------: | :------------------- |
| protocol | RESP2 | If you use RESP3, you need redis 6.0 environment |
| user | \<empty\> | Redis server username, requires redis-server 6.0 |
| password | \<empty\> | Redis server password |
| defaultDatabase | 0 | Redis server database |
| max poolsize | 100 | Connection max pool size |
| min poolsize | 5 | Connection min pool size |
| idleTimeout | 20000 | Idle time of elements in the connection pool (MS), suitable for connecting to remote redis server |
| connectTimeout | 10000 | Connection timeout (MS) |
| receiveTimeout | 10000 | Receive timeout (MS) |
| sendTimeout | 10000 | Send timeout (MS) |
| encoding | utf-8 | string charset |
| ssl | false | Enable encrypted transmission |
| name | \<empty\> | Connection name, use client list command to view |
| prefix | \<empty\> | key前辍,所有方法都会附带此前辍,cli.Set(prefix + "key", 111); |
> IPv6: [fe80::b164:55b3:4b4f:7ce6%15]:6379
-----
#### 🎣 Master-Slave (读写分离)
```csharp
public static RedisClient cli = new RedisClient(
"127.0.0.1:6379,password=123,defaultDatabase=13",
"127.0.0.1:6380,password=123,defaultDatabase=13",
"127.0.0.1:6381,password=123,defaultDatabase=13"
);
var value = cli.Get("key1");
```
> 写入时连接 127.0.0.1:6379,读取时随机连接 6380 6381
#### ⛳ Redis Sentinel (哨兵高可用)
```csharp
public static RedisClient cli = new RedisClient(
"mymaster,password=123",
new [] { "192.169.1.10:26379", "192.169.1.11:26379", "192.169.1.12:26379" },
true //是否读写分离
);
```
#### 🌌 Redis Cluster (集群)
假如你有一个 Redis Cluster 集群,其中有三个主节点(7001-7003)、三个从节点(7004-7006),则连接此集群的代码:
```csharp
public static RedisClient cli = new RedisClient(
new ConnectionStringBuilder[] { "192.168.0.2:7001", "192.168.0.2:7001", "192.168.0.2:7003" }
);
```
-----
#### ⚡ Client-side-cahing (本地缓存)
> requires redis-server 6.0 and above
```csharp
cli.UseClientSideCaching(new ClientSideCachingOptions
{
//本地缓存的容量
Capacity = 3,
//过滤哪些键能被本地缓存
KeyFilter = key => key.StartsWith("Interceptor"),
//检查长期未使用的缓存
CheckExpired = (key, dt) => DateTime.Now.Subtract(dt) > TimeSpan.FromSeconds(2)
});
```
#### 📡 Subscribe (订阅)
```csharp
using (cli.Subscribe("abc", ondata)) //wait .Dispose()
{
Console.ReadKey();
}
void ondata(string channel, string data) =>
Console.WriteLine($"{channel} -> {data}");
```
#### 📃 Scripting (脚本)
```csharp
var r1 = cli.Eval("return {KEYS[1],KEYS[2],ARGV[1],ARGV[2]}",
new[] { "key1", "key2" }, "first", "second") as object[];
var r2 = cli.Eval("return {1,2,{3,'Hello World!'}}") as object[];
cli.Eval("return redis.call('set',KEYS[1],'bar')",
new[] { Guid.NewGuid().ToString() })
```
#### 💻 Pipeline (管道)
```csharp
using (var pipe = cli.StartPipe())
{
pipe.IncrBy("key1", 10);
pipe.Set("key2", Null);
pipe.Get("key1");
object[] ret = pipe.EndPipe();
Console.WriteLine(ret[0] + ", " + ret[2]);
}
// or Async Callback
using (var pipe = cli.StartPipe())
{
var tasks = new List<Task>();
long t0 = 0;
task.Add(pipe.IncrByAsync("key1", 10).ContinueWith(t => t0 = t.Result)); //callback
pipe.SetAsync("key2", Null);
string t2 = null;
task.Add(pipe.GetAsync("key1").ContinueWith(t => t2 = t.Result)); //callback
pipe.EndPipe();
Task.WaitAll(tasks.ToArray()); //wait all callback
Console.WriteLine(t0 + ", " + t2);
}
```
#### 📰 Transaction (事务)
```csharp
using (var tran = cli.Multi())
{
tran.IncrBy("key1", 10);
tran.Set("key2", Null);
tran.Get("key1");
object[] ret = tran.Exec();
Console.WriteLine(ret[0] + ", " + ret[2]);
}
// or Async Callback
using (var tran = cli.Multi())
{
var tasks = new List<Task>();
long t0 = 0;
task.Add(tran.IncrByAsync("key1", 10).ContinueWith(t => t0 = t.Result)); //callback
tran.SetAsync("key2", Null);
string t2 = null;
task.Add(tran.GetAsync("key1").ContinueWith(t => t2 = t.Result)); //callback
tran.Exec();
Task.WaitAll(tasks.ToArray()); //wait all callback
Console.WriteLine(t0 + ", " + t2);
}
```
#### 📯 GetDatabase (切库)
```csharp
using (var db = cli.GetDatabase(10))
{
db.Set("key1", 10);
var val1 = db.Get("key1");
}
```
#### 💕 Donation (捐赠)
> Thank you for your donation
- [Alipay](https://www.cnblogs.com/FreeSql/gallery/image/338860.html)
- [WeChat](https://www.cnblogs.com/FreeSql/gallery/image/338859.html)
|
2881099/FreeRedis | 9,839 | README.md | <h1 align="center"> 🦄 FreeRedis </h1>
<div align="center">
FreeRedis is a redis client based on .NET, supports .NET Core 2.1+, .NET Framework 4.0+, Xamarin, and AOT.
[](https://www.nuget.org/packages/FreeRedis)
[](https://www.nuget.org/stats/packages/FreeRedis?groupby=Version)
[](https://raw.githubusercontent.com/2881099/FreeRedis/master/LICENSE.txt)
<p>
<span>English</span> |
<a href="README.zh-CN.md">中文</a>
</p>
</div>
- 🌈 RedisClient Keep all method names consistent with redis-cli
- 🌌 Support Redis Cluster (requires redis-server 3.2 and above)
- ⛳ Support Redis Sentinel
- 🎣 Support Redis Master-Slave
- 📡 Support Redis Pub-Sub
- 📃 Support Redis Lua Scripting
- 💻 Support Pipeline, Transaction, DelayQueue, RediSearch
- 🌴 Support Geo type commands (requires redis-server 3.2 and above)
- 🌲 Support Streams type commands (requires redis-server 5.0 and above)
- ⚡ Support Client-side-caching (requires redis-server 6.0 and above)
- 🌳 Support Redis 6 RESP3 Protocol
QQ Groups:4336577(full)、**8578575(available)**、**52508226(available)**
## 🚀 Quick start
```csharp
public static RedisClient cli = new RedisClient("127.0.0.1:6379,password=123,defaultDatabase=13");
cli.Serialize = obj => JsonConvert.SerializeObject(obj);
cli.Deserialize = (json, type) => JsonConvert.DeserializeObject(json, type);
cli.Notice += (s, e) => Console.WriteLine(e.Log); //print command log
cli.Set("key1", "value1");
cli.MSet("key1", "value1", "key2", "value2");
string value1 = cli.Get("key1");
string[] vals = cli.MGet("key1", "key2");
```
> Supports strings, hashes, lists, sets, sorted sets, bitmaps, hyperloglogs, geo, streams And BloomFilter.
| Parameter | Default | Explain |
| :---------------- | --------: | :------------------- |
| protocol | RESP2 | If you use RESP3, you need redis 6.0 environment |
| user | \<empty\> | Redis server username, requires redis-server 6.0 |
| password | \<empty\> | Redis server password |
| defaultDatabase | 0 | Redis server database |
| max poolsize | 100 | Connection max pool size |
| min poolsize | 5 | Connection min pool size |
| idleTimeout | 20000 | Idle time of elements in the connection pool (MS), suitable for connecting to remote redis server |
| connectTimeout | 10000 | Connection timeout (MS) |
| receiveTimeout | 10000 | Receive timeout (MS) |
| sendTimeout | 10000 | Send timeout (MS) |
| encoding | utf-8 | string charset |
| retry | 0 | Protocol error retry execution times |
| ssl | false | Enable encrypted transmission |
| name | \<empty\> | Connection name, use client list command to view |
| prefix | \<empty\> | The prefix of the key, all methods will have this prefix. cli.Set(prefix + "key", 111); |
| exitAutoDisposePool | true | AppDomain.CurrentDomain.ProcessExit/Console.CancelKeyPress auto disposed |
| subscribeReadbytes | false | Subscribe read bytes |
> IPv6: [fe80::b164:55b3:4b4f:7ce6%15]:6379
```csharp
//FreeRedis.DistributedCache
//services.AddSingleton<IDistributedCache>(new FreeRedis.DistributedCache(cli));
```
### 🎣 Master-Slave
```csharp
public static RedisClient cli = new RedisClient(
"127.0.0.1:6379,password=123,defaultDatabase=13",
"127.0.0.1:6380,password=123,defaultDatabase=13",
"127.0.0.1:6381,password=123,defaultDatabase=13"
);
var value = cli.Get("key1");
```
> Write data at 127.0.0.1:6379; randomly read data from port 6380 or 6381.
### ⛳ Redis Sentinel
```csharp
public static RedisClient cli = new RedisClient(
"mymaster,password=123",
new [] { "192.169.1.10:26379", "192.169.1.11:26379", "192.169.1.12:26379" },
true //This variable indicates whether to use the read-write separation mode.
);
```
### 🌌 Redis Cluster
Suppose, a Redis cluster has three master nodes (7001-7003) and three slave nodes (7004-7006), then use the following code to connect to the cluster:
```csharp
public static RedisClient cli = new RedisClient(
new ConnectionStringBuilder[] { "192.168.0.2:7001", "192.168.0.2:7002", "192.168.0.2:7003" }
);
```
### ⚡ Client-side-caching
> requires redis-server 6.0 and above
```csharp
cli.UseClientSideCaching(new ClientSideCachingOptions
{
//Client cache capacity
Capacity = 3,
//Filtering rules, which specify which keys can be cached locally
KeyFilter = key => key.StartsWith("Interceptor"),
//Check long-term unused cache
CheckExpired = (key, dt) => DateTime.Now.Subtract(dt) > TimeSpan.FromSeconds(2)
});
```
### 📡 Subscribe
```csharp
using (cli.Subscribe("abc", ondata)) //wait .Dispose()
{
Console.ReadKey();
}
void ondata(string channel, string data) =>
Console.WriteLine($"{channel} -> {data}");
```
xadd + xreadgroup:
```csharp
using (cli.SubscribeStream("stream_key", ondata)) //wait .Dispose()
{
Console.ReadKey();
}
void ondata(Dictionary<string, string> streamValue) =>
Console.WriteLine(JsonConvert.SerializeObject(streamValue));
// NoAck xpending
cli.XPending("stream_key", "FreeRedis__group", "-", "+", 10);
```
lpush + blpop:
```csharp
using (cli.SubscribeList("list_key", ondata)) //wait .Dispose()
{
Console.ReadKey();
}
void ondata(string listValue) =>
Console.WriteLine(listValue);
```
### 📃 Scripting
```csharp
var r1 = cli.Eval("return {KEYS[1],KEYS[2],ARGV[1],ARGV[2]}",
new[] { "key1", "key2" }, "first", "second") as object[];
var r2 = cli.Eval("return {1,2,{3,'Hello World!'}}") as object[];
cli.Eval("return redis.call('set',KEYS[1],'bar')",
new[] { Guid.NewGuid().ToString() })
```
### 💻 Pipeline
```csharp
using (var pipe = cli.StartPipe())
{
pipe.IncrBy("key1", 10);
pipe.Set("key2", Null);
pipe.Get("key1");
object[] ret = pipe.EndPipe();
Console.WriteLine(ret[0] + ", " + ret[2]);
}
```
### 📰 Transaction
```csharp
using (var tran = cli.Multi())
{
tran.IncrBy("key1", 10);
tran.Set("key2", Null);
tran.Get("key1");
object[] ret = tran.Exec();
Console.WriteLine(ret[0] + ", " + ret[2]);
}
```
### 📯 GetDatabase: switch database
```csharp
using (var db = cli.GetDatabase(10))
{
db.Set("key1", 10);
var val1 = db.Get("key1");
}
```
### 🔍 Scan
> Support cluster mode
```csharp
foreach (var keys in cli.Scan("*", 10, null))
{
Console.WriteLine(string.Join(", ", keys));
}
```
### 🍡 DelayQueue
```csharp
var delayQueue = cli.DelayQueue("TestDelayQueue");
//Add queue
delayQueue.Enqueue($"Execute in 5 seconds.", TimeSpan.FromSeconds(5));
delayQueue.Enqueue($"Execute in 10 seconds.", DateTime.Now.AddSeconds(10));
delayQueue.Enqueue($"Execute in 15 seconds.", DateTime.Now.AddSeconds(15));
delayQueue.Enqueue($"Execute in 20 seconds.", TimeSpan.FromSeconds(20));
delayQueue.Enqueue($"Execute in 25 seconds.", DateTime.Now.AddSeconds(25));
delayQueue.Enqueue($"Execute in 2024-07-02 14:30:15", DateTime.Parse("2024-07-02 14:30:15"));
//Consumption queue
await delayQueue.DequeueAsync(s =>
{
output.WriteLine($"{DateTime.Now}:{s}");
return Task.CompletedTask;
});
```
### 🐆 RediSearch
```csharp
cli.FtCreate(...).Execute();
cli.FtSearch(...).Execute();
cli.FtAggregate(...).Execute();
//... or ...
[FtDocument("index_post", Prefix = "blog:post:")]
class TestDoc
{
[FtKey]
public int Id { get; set; }
[FtTextField("title", Weight = 5.0)]
public string Title { get; set; }
[FtTextField("category")]
public string Category { get; set; }
[FtTextField("content", Weight = 1.0, NoIndex = true)]
public string Content { get; set; }
[FtTagField("tags")]
public string[] Tags { get; set; } //or string
[FtNumericField("views")]
public int Views { get; set; }
}
var repo = cli.FtDocumentRepository<TestDoc>();
repo.CreateIndex();
repo.Save(new TestDoc { Id = 1, Title = "test title1 word", Category = "class 1", Content = "test content 1 suffix", Tags = "user1,user2", Views = 101 });
repo.Save(new TestDoc { Id = 2, Title = "prefix test title2", Category = "class 2", Content = "test infix content 2", Tags = "user2,user3", Views = 201 });
repo.Save(new TestDoc { Id = 3, Title = "test title3 word", Category = "class 1", Content = "test word content 3", Tags = "user2,user5", Views = 301 });
repo.Delete(1, 2, 3);
repo.Save(new[]
{
new TestDoc { Id = 1, Title = "test title1 word", Category = "class 1", Content = "test content 1 suffix", Tags = "user1,user2", Views = 101 },
new TestDoc { Id = 2, Title = "prefix test title2", Category = "class 2", Content = "test infix content 2", Tags = "user2,user3", Views = 201 },
new TestDoc { Id = 3, Title = "test title3 word", Category = "class 1", Content = "test word content 3", Tags = "user2,user5", Views = 301 }
});
var list = repo.Search("*").InFields(a => new { a.Title }).ToList();
list = repo.Search("*").Return(a => new { a.Title, a.Tags }).ToList();
list = repo.Search("*").Return(a => new { tit1 = a.Title, tgs1 = a.Tags, a.Title, a.Tags }).ToList();
list = repo.Search(a => a.Title == "word" && a.Tags.Contains("user1")).Filter(a => a.Views, 1, 1000).ToList();
list = repo.Search("word").ToList();
list = repo.Search("@title:word").ToList();
```
## 👯 Contributors
<a href="https://github.com/2881099/FreeRedis/graphs/contributors">
<img src="https://contributors-img.web.app/image?repo=2881099/FreeRedis" />
</a>
## 💕 Donation
> Thank you for your donation
- [Alipay](https://www.cnblogs.com/FreeSql/gallery/image/338860.html)
- [WeChat](https://www.cnblogs.com/FreeSql/gallery/image/338859.html)
## 🗄 License
[MIT](LICENSE)
|
2881099/FreeRedis | 8,940 | README.zh-CN.md | <h1 align="center"> 🦄 FreeRedis </h1>
<div align="center">
基于 .NET 的 Redis 客户端,支持 .NET Core 2.1+、.NET Framework 4.0+、Xamarin 以及 AOT。
[](https://www.nuget.org/packages/FreeRedis)
[](https://www.nuget.org/stats/packages/FreeRedis?groupby=Version)
[](https://raw.githubusercontent.com/2881099/FreeRedis/master/LICENSE.txt)
<p align="center">
<a href="README.md">English</a> |
<span>中文</span>
</p>
</div>
- 🌈 所有方法名与 redis-cli 保持一致
- 🌌 支持 Redis 集群(服务端要求 3.2 及以上版本)
- ⛳ 支持 Redis 哨兵模式
- 🎣 支持主从分离(Master-Slave)
- 📡 支持发布订阅(Pub-Sub)
- 📃 支持 Redis Lua 脚本
- 💻 支持管道(Pipeline)、支持事务、延迟队列、RediSearch
- 🌴 支持 GEO 命令(服务端要求 3.2 及以上版本)
- 🌲 支持 STREAM 类型命令(服务端要求 5.0 及以上版本)
- ⚡ 支持本地缓存(Client-side-cahing,服务端要求 6.0 及以上版本)
- 🌳 支持 Redis 6 的 RESP3 协议
QQ群:4336577(已满)、8578575(在线)、52508226(在线)
## 🚀 快速入门
```csharp
public static RedisClient cli = new RedisClient("127.0.0.1:6379,password=123,defaultDatabase=13");
//cli.Serialize = obj => JsonConvert.SerializeObject(obj);
//cli.Deserialize = (json, type) => JsonConvert.DeserializeObject(json, type);
cli.Notice += (s, e) => Console.WriteLine(e.Log); //打印命令日志
cli.Set("key1", "value1");
cli.MSet("key1", "value1", "key2", "value2");
string value1 = cli.Get("key1");
string[] vals = cli.MGet("key1", "key2");
```
> 支持 STRING、HASH、LIST、SET、ZSET、BITMAP、HyperLogLog、GEO、Stream 以及布隆过滤器等。
| 参数 | 默认值 | 说明 |
| :---------------- | --------: | :------------------- |
| protocol | RESP2 | 若使用 RESP3 协议,你需要 Redis 6.0 环境 |
| user | \<empty\> | Redis 服务端用户名,要求 Redis 6.0 环境 |
| password | \<empty\> | Redis 服务端密码 |
| defaultDatabase | 0 | Redis 服务端数据库 |
| max poolsize | 100 | 连接池最大连接数 |
| min poolsize | 5 | 连接池最小连接数 |
| idleTimeout | 20000 | 连接池中元素的空闲时间(单位为毫秒 ms),适用于连接到远程服务器 |
| connectTimeout | 10000 | 连接超时,单位为毫秒(ms) |
| receiveTimeout | 10000 | 接收超时,单位为毫秒(ms) |
| sendTimeout | 10000 | 发送超时,单位为毫秒(ms) |
| encoding | utf-8 | 字符串字符集 |
| retry | 0 | 协议发生错误时,重试执行的次数 |
| ssl | false | 启用加密传输 |
| name | \<empty\> | 连接名,使用 CLIENT LIST 命令查看 |
| prefix | \<empty\> | `key` 前辍,所有方法都会附带此前辍,cli.Set(prefix + "key", 111); |
| exitAutoDisposePool | true | AppDomain.CurrentDomain.ProcessExit/Console.CancelKeyPress 事件自动释放 |
| subscribeReadbytes | false | Subscribe 读取内容为 byte[] |
> IPv6: [fe80::b164:55b3:4b4f:7ce6%15]:6379
```csharp
//FreeRedis.DistributedCache
//services.AddSingleton<IDistributedCache>(new FreeRedis.DistributedCache(cli));
```
### 🎣 Master-Slave (读写分离)
```csharp
public static RedisClient cli = new RedisClient(
"127.0.0.1:6379,password=123,defaultDatabase=13",
"127.0.0.1:6380,password=123,defaultDatabase=13",
"127.0.0.1:6381,password=123,defaultDatabase=13"
);
var value = cli.Get("key1");
```
> 写入时连接 127.0.0.1:6379,读取时随机连接 6380 6381
### ⛳ Redis Sentinel (哨兵高可用)
```csharp
public static RedisClient cli = new RedisClient(
"mymaster,password=123",
new [] { "192.169.1.10:26379", "192.169.1.11:26379", "192.169.1.12:26379" },
true //是否读写分离
);
```
### 🌌 Redis Cluster (集群)
假如你有一个 Redis Cluster 集群,其中有三个主节点(7001-7003)、三个从节点(7004-7006),则连接此集群的代码:
```csharp
public static RedisClient cli = new RedisClient(
new ConnectionStringBuilder[] { "192.168.0.2:7001", "192.168.0.2:7002", "192.168.0.2:7003" }
);
```
### ⚡ Client-side-cahing (本地缓存)
> 服务端要求 6.0 及以上版本
```csharp
cli.UseClientSideCaching(new ClientSideCachingOptions
{
//本地缓存的容量
Capacity = 3,
//过滤哪些键能被本地缓存
KeyFilter = key => key.StartsWith("Interceptor"),
//检查长期未使用的缓存
CheckExpired = (key, dt) => DateTime.Now.Subtract(dt) > TimeSpan.FromSeconds(2)
});
```
### 📡 Subscribe (订阅)
```csharp
using (cli.Subscribe("abc", ondata)) //wait .Dispose()
{
Console.ReadKey();
}
void ondata(string channel, string data) =>
Console.WriteLine($"{channel} -> {data}");
```
xadd + xreadgroup:
```csharp
using (cli.SubscribeStream("stream_key", ondata)) //wait .Dispose()
{
Console.ReadKey();
}
void ondata(Dictionary<string, string> streamValue) =>
Console.WriteLine(JsonConvert.SerializeObject(streamValue));
// NoAck xpending
cli.XPending("stream_key", "FreeRedis__group", "-", "+", 10);
```
lpush + blpop:
```csharp
using (cli.SubscribeList("list_key", ondata)) //wait .Dispose()
{
Console.ReadKey();
}
void ondata(string listValue) =>
Console.WriteLine(listValue);
```
### 📃 Scripting (脚本)
```csharp
var r1 = cli.Eval("return {KEYS[1],KEYS[2],ARGV[1],ARGV[2]}",
new[] { "key1", "key2" }, "first", "second") as object[];
var r2 = cli.Eval("return {1,2,{3,'Hello World!'}}") as object[];
cli.Eval("return redis.call('set',KEYS[1],'bar')",
new[] { Guid.NewGuid().ToString() })
```
### 💻 Pipeline (管道)
```csharp
using (var pipe = cli.StartPipe())
{
pipe.IncrBy("key1", 10);
pipe.Set("key2", Null);
pipe.Get("key1");
object[] ret = pipe.EndPipe();
Console.WriteLine(ret[0] + ", " + ret[2]);
}
```
### 📰 Transaction (事务)
```csharp
using (var tran = cli.Multi())
{
tran.IncrBy("key1", 10);
tran.Set("key2", Null);
tran.Get("key1");
object[] ret = tran.Exec();
Console.WriteLine(ret[0] + ", " + ret[2]);
}
```
### 📯 GetDatabase (切库)
```csharp
using (var db = cli.GetDatabase(10))
{
db.Set("key1", 10);
var val1 = db.Get("key1");
}
```
### 🔍 Scan (扫描)
> 支持集群模式
```csharp
foreach (var keys in cli.Scan("*", 10, null))
{
Console.WriteLine(string.Join(", ", keys));
}
```
### 🍡 DelayQueue (延时队列)
```c#
var delayQueue = cli.DelayQueue("TestDelayQueue");
//添加队列
delayQueue.Enqueue($"Execute in 5 seconds.", TimeSpan.FromSeconds(5));
delayQueue.Enqueue($"Execute in 10 seconds.", DateTime.Now.AddSeconds(10));
delayQueue.Enqueue($"Execute in 15 seconds.", DateTime.Now.AddSeconds(15));
delayQueue.Enqueue($"Execute in 20 seconds.", TimeSpan.FromSeconds(20));
delayQueue.Enqueue($"Execute in 25 seconds.", DateTime.Now.AddSeconds(25));
delayQueue.Enqueue($"Execute in 2024-07-02 14:30:15", DateTime.Parse("2024-07-02 14:30:15"));
//消费延时队列
await delayQueue.DequeueAsync(s =>
{
output.WriteLine($"{DateTime.Now}:{s}");
return Task.CompletedTask;
});
```
### 🐆 RediSearch
```csharp
cli.FtCreate(...).Execute();
cli.FtSearch(...).Execute();
cli.FtAggregate(...).Execute();
//... or ...
[FtDocument("index_post", Prefix = "blog:post:")]
class TestDoc
{
[FtKey]
public int Id { get; set; }
[FtTextField("title", Weight = 5.0)]
public string Title { get; set; }
[FtTextField("category")]
public string Category { get; set; }
[FtTextField("content", Weight = 1.0, NoIndex = true)]
public string Content { get; set; }
[FtTagField("tags")]
public string[] Tags { get; set; } //or string
[FtNumericField("views")]
public int Views { get; set; }
}
var repo = cli.FtDocumentRepository<TestDoc>();
repo.CreateIndex();
repo.Save(new TestDoc { Id = 1, Title = "test title1 word", Category = "class 1", Content = "test content 1 suffix", Tags = "user1,user2", Views = 101 });
repo.Save(new TestDoc { Id = 2, Title = "prefix test title2", Category = "class 2", Content = "test infix content 2", Tags = "user2,user3", Views = 201 });
repo.Save(new TestDoc { Id = 3, Title = "test title3 word", Category = "class 1", Content = "test word content 3", Tags = "user2,user5", Views = 301 });
repo.Delete(1, 2, 3);
repo.Save(new[]
{
new TestDoc { Id = 1, Title = "test title1 word", Category = "class 1", Content = "test content 1 suffix", Tags = "user1,user2", Views = 101 },
new TestDoc { Id = 2, Title = "prefix test title2", Category = "class 2", Content = "test infix content 2", Tags = "user2,user3", Views = 201 },
new TestDoc { Id = 3, Title = "test title3 word", Category = "class 1", Content = "test word content 3", Tags = "user2,user5", Views = 301 }
});
var list = repo.Search("*").InFields(a => new { a.Title }).ToList();
list = repo.Search("*").Return(a => new { a.Title, a.Tags }).ToList();
list = repo.Search("*").Return(a => new { tit1 = a.Title, tgs1 = a.Tags, a.Title, a.Tags }).ToList();
list = repo.Search(a => a.Title == "word" && a.Tags.Contains("user1")).Filter(a => a.Views, 1, 1000).ToList();
list = repo.Search("word").ToList();
list = repo.Search("@title:word").ToList();
```
## 👯 Contributors (贡献者)
<a href="https://github.com/2881099/FreeRedis/graphs/contributors">
<img src="https://contributors-img.web.app/image?repo=2881099/FreeRedis" />
</a>
## 💕 Donation (捐赠)
> 感谢你的打赏
- [Alipay](https://www.cnblogs.com/FreeSql/gallery/image/338860.html)
- [WeChat](https://www.cnblogs.com/FreeSql/gallery/image/338859.html)
## 🗄 License (许可证)
[MIT](LICENSE)
|
2881099/FreeRedis | 1,783 | .github/workflows/docfx.yml | name: docfx build
on:
push:
branches:
- master
jobs:
build:
name: Build
runs-on: windows-latest
steps:
# Check out the branch that triggered this workflow to the 'source' subdirectory
- name: Checkout Code
uses: actions/checkout@v2
with:
path: source
- name: install DocFX
run: "& choco install docfx -y"
# Run a build
- name: Build docs
run: "& docfx ./docfx.json"
working-directory: ./source
# Check out gh-pages branch to the 'docs' subdirectory
- name: Checkout docs
uses: actions/checkout@v2
with:
ref: gh-pages
path: docs
# Sync the site
- name: Clear docs repo
run: Get-ChildItem -Force -Exclude .git | ForEach-Object { Remove-Item -Recurse -Verbose -Force $_ }
working-directory: ./docs
- name: Sync new content
run: Copy-Item -Recurse -Verbose -Force "$env:GITHUB_WORKSPACE/source/_site/*" "$env:GITHUB_WORKSPACE/docs"
working-directory: ./docs
# update docs
- name: Commit to gh-pages and push
run: |
$ErrorActionPreference = "Continue"
git add -A
git diff HEAD --exit-code
if ($LASTEXITCODE -eq 0) {
Write-Host "No changes to commit!"
} else {
git config --global user.name "github-actions-docfx[bot]"
git config --global user.email "github-actions-bot@github.com"
git commit -m "Updated docs from commit $env:GITHUB_SHA on $env:GITHUB_REF"
git remote set-url origin https://x-access-token:${{ secrets.GITHUB_TOKEN }}@github.com/${{ github.repository }}
git push origin gh-pages
}
working-directory: ./docs |
2881099/FreeRedis | 5,431 | test/Unit/FreeRedis.Tests/RedisSentinelClientTests.cs | using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using Xunit;
namespace FreeRedis.Tests.RedisSentinelClientTests
{
public class SentinelTests
{
public static RedisSentinelClient GetClient() => new RedisSentinelClient(RedisEnvironmentHelper.GetHost("redis_sentinel"));
[Fact]
public void Ping()
{
using (var cli = GetClient())
{
Assert.Equal("PONG", cli.Ping());
}
}
[Fact]
public void Info()
{
using (var cli = GetClient())
{
var rt = cli.Info();
}
}
[Fact]
public void Role()
{
using (var cli = GetClient())
{
var rt = cli.Role();
Assert.Equal(RoleType.Sentinel, rt.role);
Assert.True(rt.masters.Any());
Assert.Equal("mymaster", rt.masters.FirstOrDefault());
}
}
[Fact]
public void Masters()
{
using (var cli = GetClient())
{
var rt = cli.Masters();
Assert.True(rt.Any());
Assert.Equal("mymaster", rt[0].name);
}
}
[Fact]
public void Master()
{
using (var cli = GetClient())
{
var rt = cli.Master("mymaster");
Assert.NotNull(rt);
Assert.Equal("mymaster", rt.name);
Assert.Equal("ERR No such master with that name",
Assert.Throws<RedisServerException>(() => cli.Master("mymaster222")).Message);
}
}
[Fact(Skip = "Salves")]
public void Salves()
{
using (var cli = GetClient())
{
var rt = cli.Salves("mymaster");
Assert.True(rt.Any());
Assert.Equal("ok", rt[0].master_link_status);
}
}
[Fact(Skip = "Sentinels")]
public void Sentinels()
{
using (var cli = GetClient())
{
var rt = cli.Sentinels("mymaster");
Assert.True(rt.Any());
Assert.Equal("127.0.0.1", rt[0].ip);
}
}
[Fact]
public void GetMasterAddrByName()
{
using (var cli = GetClient())
{
var rt = cli.GetMasterAddrByName("mymaster");
Assert.False(string.IsNullOrEmpty(rt));
}
}
[Fact(Skip = "IsMasterDownByAddr")]
public void IsMasterDownByAddr()
{
using (var cli = GetClient())
{
var st = cli.Sentinels("mymaster");
Assert.True(st.Any());
var rt = cli.IsMasterDownByAddr(st[0].name, st[0].port, st[0].voted_leader_epoch, st[0].runid);
Assert.NotNull(rt);
Assert.False(rt.down_state);
Assert.Equal("*", rt.leader);
Assert.Equal(st[0].voted_leader_epoch, rt.vote_epoch);
}
}
[Fact]
public void Reset()
{
using (var cli = GetClient())
{
var rt = cli.Reset("*");
Assert.True(rt > 0);
}
}
[Fact(Skip = "Failover")]
public void Failover()
{
using (var cli = GetClient())
{
cli.Failover("mymaster");
Assert.Equal("ERR No such master with that name",
Assert.Throws<RedisServerException>(() => cli.Failover("mymaster222")).Message);
}
}
[Fact]
public void PendingScripts()
{
using (var cli = GetClient())
{
var rt = cli.PendingScripts();
}
}
[Fact]
public void FlushConfig()
{
using (var cli = GetClient())
{
cli.FlushConfig();
}
}
[Fact]
public void Remove()
{
using (var cli = GetClient())
{
//cli.Remove("mymaster");
Assert.Equal("ERR No such master with that name",
Assert.Throws<RedisServerException>(() => cli.Remove("mymaster222")).Message);
}
}
[Fact(Skip = "CkQuorum")]
public void CkQuorum()
{
using (var cli = GetClient())
{
var rt = cli.CkQuorum("mymaster");
Assert.Equal("ERR No such master with that name",
Assert.Throws<RedisServerException>(() => cli.CkQuorum("mymaster222")).Message);
}
}
[Fact]
public void Set()
{
using (var cli = GetClient())
{
cli.Set("mymaster", "down-after-milliseconds", "5000");
}
}
[Fact]
public void InfoCache()
{
using (var cli = GetClient())
{
var rt = cli.InfoCache("mymaster");
}
}
[Fact]
public void SimulateFailure()
{
using (var cli = GetClient())
{
cli.SimulateFailure(true, true);
}
}
}
}
|
2881099/FreeRedis | 3,480 | test/Unit/FreeRedis.Tests/CommandFlagsTests.cs | using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using Xunit;
namespace FreeRedis.Tests
{
public class CommandFlagsTests : TestBase
{
[Fact]
public void Test01()
{
var methodsCount = typeof(RedisClient).GetMethods().Count();
}
[Fact]
public void Command()
{
string UFString(string text)
{
if (text.Length <= 1) return text.ToUpper();
else return text.Substring(0, 1).ToUpper() + text.Substring(1, text.Length - 1);
}
var rt = cli.Command();
//var rt = cli.CommandInfo("mset", "mget", "set", "get", "rename");
var flags = new List<string>();
var flags7 = new List<string>();
var diccmd = new Dictionary<string, (string[], string[])>();
var sb = string.Join("\r\n\r\n", (rt).OrderBy(a1 => (a1 as object[])[0].ToString()).Select(a1 =>
{
var a = a1 as object[];
var cmd = a[0].ToString();
var plen = int.Parse(a[1].ToString());
var firstKey = int.Parse(a[3].ToString());
var lastKey = int.Parse(a[4].ToString());
var stepCount = int.Parse(a[5].ToString());
var aflags = (a[2] as object[]).Select(a => a.ToString()).ToArray();
var fopts = (a[6] as object[]).Select(a => a.ToString()).ToArray();
flags.AddRange(aflags);
flags7.AddRange(fopts);
diccmd.Add(cmd.ToUpper(), (aflags, fopts));
var parms = "";
if (plen > 1)
{
for (var x = 1; x < plen; x++)
{
if (x == firstKey) parms += "string key, ";
else parms += $"string arg{x}, ";
}
parms = parms.Remove(parms.Length - 2);
}
if (plen < 0)
{
for (var x = 1; x < -plen; x++)
{
if (x == firstKey)
{
if (firstKey != lastKey) parms += "string[] keys, ";
else parms += "string key, ";
}
else
{
if (firstKey != lastKey) parms += $"string[] arg{x}, ";
else parms += $"string arg{x}, ";
}
}
if (parms.Length > 0)
parms = parms.Remove(parms.Length - 2);
}
return $@"
//{string.Join(", ", a[2] as object[])}
//{string.Join(", ", a[6] as object[])}
public void {UFString(cmd)}({parms}) {{ }}";
}));
flags = flags.Distinct().ToList();
flags7 = flags7.Distinct().ToList();
var sboptions = new StringBuilder();
foreach (var cmd in CommandSets._allCommands)
{
if (diccmd.TryGetValue(cmd, out var tryv))
{
sboptions.Append($@"
[""{cmd}""] = new CommandSets(");
for (var x = 0; x < tryv.Item1.Length; x++)
{
if (x > 0) sboptions.Append(" | ");
sboptions.Append($"ServerFlag.{tryv.Item1[x].Replace("readonly", "@readonly")}");
}
sboptions.Append(", ");
for (var x = 0; x < tryv.Item2.Length; x++)
{
if (x > 0) sboptions.Append(" | ");
sboptions.Append($"ServerTag.{tryv.Item2[x].TrimStart('@').Replace("string", "@string")}");
}
sboptions.Append(", LocalStatus.none),");
}
else
{
sboptions.Append($@"
[""{cmd}""] = new CommandSets(ServerFlag.none, ServerTag.none, LocalStatus.none), ");
}
}
var optioncode = sboptions.ToString();
}
}
}
|
2881099/FreeRedis | 1,904 | test/Unit/FreeRedis.Tests/InterceptorTests.cs | using System;
using System.Collections.Generic;
using System.Collections.Concurrent;
using System.Text;
using Xunit;
using FreeRedis.Internal;
using System.Linq;
namespace FreeRedis.Tests
{
public class InterceptorTests
{
public static RedisClient CreateClient() => new RedisClient(RedisEnvironmentHelper.GetHost("redis_interceptor"));
[Fact]
public void Interceptor()
{
using (var cli = CreateClient())
{
cli.Interceptors.Add(() => new MemoryCacheAop());
cli.Set("Interceptor01", "123123");
var val1 = cli.Get("Interceptor01");
var val2 = cli.Get("Interceptor01");
var val3 = cli.Get("Interceptor01");
Assert.Equal("123123", val1);
Assert.Equal("123123", val2);
Assert.Equal("123123", val3);
}
}
}
class MemoryCacheAop : IInterceptor
{
static ConcurrentDictionary<string, object> _dicStrings = new ConcurrentDictionary<string, object>();
public void After(InterceptorAfterEventArgs args)
{
switch (args.Command._command)
{
case "GET":
if (_iscached == false && args.Exception == null)
_dicStrings.TryAdd(args.Command.GetKey(0), args.Value);
break;
}
}
bool _iscached = false;
public void Before(InterceptorBeforeEventArgs args)
{
switch (args.Command._command)
{
case "GET":
if (_dicStrings.TryGetValue(args.Command.GetKey(0), out var tryval))
{
args.Value = tryval;
_iscached = true;
}
break;
}
}
}
}
|
2881099/FreeRedis | 24,157 | test/Unit/FreeRedis.Tests/AutoMake.cs | using System;
using System.Collections.Generic;
using System.Text;
namespace FreeRedis.Tests
{
class AutoMake
{
//admin, noscript, loading, stale, skip_slowlog
//@admin, @slow, @dangerous
public void Acl(string arg1) { }
//write, denyoom, fast
//@write, @string, @fast
public void Append(string key, string arg2) { }
//fast
//@keyspace, @fast
public void Asking() { }
//noscript, loading, stale, skip_monitor, skip_slowlog, fast, no_auth
//@fast, @connection
public void Auth(string arg1) { }
//admin, noscript
//@admin, @slow, @dangerous
public void Bgrewriteaof() { }
//admin, noscript
//@admin, @slow, @dangerous
public void Bgsave() { }
//readonly
//@read, @bitmap, @slow
public void Bitcount(string key) { }
//write, denyoom
//@write, @bitmap, @slow
public void Bitfield(string key) { }
//readonly, fast
//@read, @bitmap, @fast
public void Bitfield_ro(string key) { }
//write, denyoom
//@write, @bitmap, @slow
public void Bitop(string[] arg1, string[] keys, string[] arg3) { }
//readonly
//@read, @bitmap, @slow
public void Bitpos(string key, string arg2) { }
//write, noscript
//@write, @list, @slow, @blocking
public void Blpop(string[] keys, string[] arg2) { }
//write, noscript
//@write, @list, @slow, @blocking
public void Brpop(string[] keys, string[] arg2) { }
//write, denyoom, noscript
//@write, @list, @slow, @blocking
public void Brpoplpush(string key, string arg2, string arg3) { }
//write, noscript, fast
//@write, @sortedset, @fast, @blocking
public void Bzpopmax(string[] keys, string[] arg2) { }
//write, noscript, fast
//@write, @sortedset, @fast, @blocking
public void Bzpopmin(string[] keys, string[] arg2) { }
//admin, noscript, random, loading, stale
//@admin, @slow, @dangerous, @connection
public void Client(string arg1) { }
//admin, random, stale
//@admin, @slow, @dangerous
public void Cluster(string arg1) { }
//random, loading, stale
//@slow, @connection
public void Command() { }
//admin, noscript, loading, stale
//@admin, @slow, @dangerous
public void Config(string arg1) { }
//readonly, fast
//@keyspace, @read, @fast
public void Dbsize() { }
//admin, noscript, loading, stale
//@admin, @slow, @dangerous
public void Debug(string arg1) { }
//write, denyoom, fast
//@write, @string, @fast
public void Decr(string key) { }
//write, denyoom, fast
//@write, @string, @fast
public void Decrby(string key, string arg2) { }
//write
//@keyspace, @write, @slow
public void Del(string[] keys) { }
//noscript, loading, stale, fast
//@fast, @transaction
public void Discard() { }
//readonly, random
//@keyspace, @read, @slow
public void Dump(string key) { }
//readonly, fast
//@read, @fast, @connection
public void Echo(string arg1) { }
//noscript, movablekeys
//@slow, @scripting
public void Eval(string arg1, string arg2) { }
//noscript, movablekeys
//@slow, @scripting
public void Evalsha(string arg1, string arg2) { }
//noscript, loading, stale, skip_monitor, skip_slowlog
//@slow, @transaction
public void Exec() { }
//readonly, fast
//@keyspace, @read, @fast
public void Exists(string[] keys) { }
//write, fast
//@keyspace, @write, @fast
public void Expire(string key, string arg2) { }
//write, fast
//@keyspace, @write, @fast
public void Expireat(string key, string arg2) { }
//write
//@keyspace, @write, @slow, @dangerous
public void Flushall() { }
//write
//@keyspace, @write, @slow, @dangerous
public void Flushdb() { }
//write, denyoom
//@write, @geo, @slow
public void Geoadd(string key, string arg2, string arg3, string arg4) { }
//readonly
//@read, @geo, @slow
public void Geodist(string key, string arg2, string arg3) { }
//readonly
//@read, @geo, @slow
public void Geohash(string key) { }
//readonly
//@read, @geo, @slow
public void Geopos(string key) { }
//write, movablekeys
//@write, @geo, @slow
public void Georadius(string key, string arg2, string arg3, string arg4, string arg5) { }
//readonly, movablekeys
//@read, @geo, @slow
public void Georadius_ro(string key, string arg2, string arg3, string arg4, string arg5) { }
//write, movablekeys
//@write, @geo, @slow
public void Georadiusbymember(string key, string arg2, string arg3, string arg4) { }
//readonly, movablekeys
//@read, @geo, @slow
public void Georadiusbymember_ro(string key, string arg2, string arg3, string arg4) { }
//readonly, fast
//@read, @string, @fast
public void Get(string key) { }
//readonly, fast
//@read, @bitmap, @fast
public void Getbit(string key, string arg2) { }
//readonly
//@read, @string, @slow
public void Getrange(string key, string arg2, string arg3) { }
//write, denyoom, fast
//@write, @string, @fast
public void Getset(string key, string arg2) { }
//write, fast
//@write, @hash, @fast
public void Hdel(string key, string arg2) { }
//noscript, loading, stale, skip_monitor, skip_slowlog, fast, no_auth
//@fast, @connection
public void Hello(string arg1) { }
//readonly, fast
//@read, @hash, @fast
public void Hexists(string key, string arg2) { }
//readonly, fast
//@read, @hash, @fast
public void Hget(string key, string arg2) { }
//readonly, random
//@read, @hash, @slow
public void Hgetall(string key) { }
//write, denyoom, fast
//@write, @hash, @fast
public void Hincrby(string key, string arg2, string arg3) { }
//write, denyoom, fast
//@write, @hash, @fast
public void Hincrbyfloat(string key, string arg2, string arg3) { }
//readonly, sort_for_script
//@read, @hash, @slow
public void Hkeys(string key) { }
//readonly, fast
//@read, @hash, @fast
public void Hlen(string key) { }
//readonly, fast
//@read, @hash, @fast
public void Hmget(string key, string arg2) { }
//write, denyoom, fast
//@write, @hash, @fast
public void Hmset(string key, string arg2, string arg3) { }
//readonly, loading, stale
//@read, @slow
//public void Host:() { }
//readonly, random
//@read, @hash, @slow
public void Hscan(string key, string arg2) { }
//write, denyoom, fast
//@write, @hash, @fast
public void Hset(string key, string arg2, string arg3) { }
//write, denyoom, fast
//@write, @hash, @fast
public void Hsetnx(string key, string arg2, string arg3) { }
//readonly, fast
//@read, @hash, @fast
public void Hstrlen(string key, string arg2) { }
//readonly, sort_for_script
//@read, @hash, @slow
public void Hvals(string key) { }
//write, denyoom, fast
//@write, @string, @fast
public void Incr(string key) { }
//write, denyoom, fast
//@write, @string, @fast
public void Incrby(string key, string arg2) { }
//write, denyoom, fast
//@write, @string, @fast
public void Incrbyfloat(string key, string arg2) { }
//random, loading, stale
//@slow, @dangerous
public void Info() { }
//readonly, sort_for_script
//@keyspace, @read, @slow, @dangerous
public void Keys(string arg1) { }
//readonly, random, loading, stale, fast
//@read, @admin, @fast, @dangerous
public void Lastsave() { }
//admin, noscript, loading, stale
//@admin, @slow, @dangerous
public void Latency(string arg1) { }
//readonly
//@read, @list, @slow
public void Lindex(string key, string arg2) { }
//write, denyoom
//@write, @list, @slow
public void Linsert(string key, string arg2, string arg3, string arg4) { }
//readonly, fast
//@read, @list, @fast
public void Llen(string key) { }
//readonly, fast
//@read, @fast
public void Lolwut() { }
//write, fast
//@write, @list, @fast
public void Lpop(string key) { }
//write, denyoom, fast
//@write, @list, @fast
public void Lpush(string key, string arg2) { }
//write, denyoom, fast
//@write, @list, @fast
public void Lpushx(string key, string arg2) { }
//readonly
//@read, @list, @slow
public void Lrange(string key, string arg2, string arg3) { }
//write
//@write, @list, @slow
public void Lrem(string key, string arg2, string arg3) { }
//write, denyoom
//@write, @list, @slow
public void Lset(string key, string arg2, string arg3) { }
//write
//@write, @list, @slow
public void Ltrim(string key, string arg2, string arg3) { }
//readonly, random, movablekeys
//@read, @slow
public void Memory(string arg1) { }
//readonly, fast
//@read, @string, @fast
public void Mget(string[] keys) { }
//write, random, movablekeys
//@keyspace, @write, @slow, @dangerous
public void Migrate(string arg1, string arg2, string arg3, string arg4, string arg5) { }
//admin, noscript
//@admin, @slow, @dangerous
public void Module(string arg1) { }
//admin, noscript, loading, stale
//@admin, @slow, @dangerous
public void Monitor() { }
//write, fast
//@keyspace, @write, @fast
public void Move(string key, string arg2) { }
//write, denyoom
//@write, @string, @slow
public void Mset(string[] keys, string[] arg2) { }
//write, denyoom
//@write, @string, @slow
public void Msetnx(string[] keys, string[] arg2) { }
//noscript, loading, stale, fast
//@fast, @transaction
public void Multi() { }
//readonly, random
//@keyspace, @read, @slow
public void Object(string arg1) { }
//write, fast
//@keyspace, @write, @fast
public void Persist(string key) { }
//write, fast
//@keyspace, @write, @fast
public void Pexpire(string key, string arg2) { }
//write, fast
//@keyspace, @write, @fast
public void Pexpireat(string key, string arg2) { }
//write, denyoom, fast
//@write, @hyperloglog, @fast
public void Pfadd(string key) { }
//readonly
//@read, @hyperloglog, @slow
public void Pfcount(string[] keys) { }
//write, admin
//@write, @admin, @slow, @dangerous
public void Pfdebug(string arg1, string arg2) { }
//write, denyoom
//@write, @hyperloglog, @slow
public void Pfmerge(string[] keys) { }
//admin
//@hyperloglog, @admin, @slow, @dangerous
public void Pfselftest() { }
//stale, fast
//@fast, @connection
public void Ping() { }
//readonly, loading, stale
//@read, @slow
public void Post() { }
//write, denyoom
//@write, @string, @slow
public void Psetex(string key, string arg2, string arg3) { }
//pubsub, noscript, loading, stale
//@pubsub, @slow
public void Psubscribe(string arg1) { }
//admin, noscript
//@admin, @slow, @dangerous
public void Psync(string arg1, string arg2) { }
//readonly, random, fast
//@keyspace, @read, @fast
public void Pttl(string key) { }
//pubsub, loading, stale, fast
//@pubsub, @fast
public void Publish(string arg1, string arg2) { }
//pubsub, random, loading, stale
//@pubsub, @slow
public void Pubsub(string arg1) { }
//pubsub, noscript, loading, stale
//@pubsub, @slow
public void Punsubscribe() { }
//readonly, random
//@keyspace, @read, @slow
public void Randomkey() { }
//fast
//@keyspace, @fast
public void Readonly() { }
//fast
//@keyspace, @fast
public void Readwrite() { }
//write
//@keyspace, @write, @slow
public void Rename(string key, string arg2) { }
//write, fast
//@keyspace, @write, @fast
public void Renamenx(string key, string arg2) { }
//admin, noscript, loading, stale
//@admin, @slow, @dangerous
public void Replconf() { }
//admin, noscript, stale
//@admin, @slow, @dangerous
public void Replicaof(string arg1, string arg2) { }
//write, denyoom
//@keyspace, @write, @slow, @dangerous
public void Restore(string key, string arg2, string arg3) { }
//write, denyoom, asking
//@keyspace, @write, @slow, @dangerous
//public void Restore-asking(string key, string arg2, string arg3) { }
//readonly, noscript, loading, stale, fast
//@read, @fast, @dangerous
public void Role() { }
//write, fast
//@write, @list, @fast
public void Rpop(string key) { }
//write, denyoom
//@write, @list, @slow
public void Rpoplpush(string key, string arg2) { }
//write, denyoom, fast
//@write, @list, @fast
public void Rpush(string key, string arg2) { }
//write, denyoom, fast
//@write, @list, @fast
public void Rpushx(string key, string arg2) { }
//write, denyoom, fast
//@write, @set, @fast
public void Sadd(string key, string arg2) { }
//admin, noscript
//@admin, @slow, @dangerous
public void Save() { }
//readonly, random
//@keyspace, @read, @slow
public void Scan(string arg1) { }
//readonly, fast
//@read, @set, @fast
public void Scard(string key) { }
//noscript
//@slow, @scripting
public void Script(string arg1) { }
//readonly, sort_for_script
//@read, @set, @slow
public void Sdiff(string[] keys) { }
//write, denyoom
//@write, @set, @slow
public void Sdiffstore(string[] keys, string[] arg2) { }
//loading, stale, fast
//@keyspace, @fast
public void Select(string arg1) { }
//write, denyoom
//@write, @string, @slow
public void Set(string key, string arg2) { }
//write, denyoom
//@write, @bitmap, @slow
public void Setbit(string key, string arg2, string arg3) { }
//write, denyoom
//@write, @string, @slow
public void Setex(string key, string arg2, string arg3) { }
//write, denyoom, fast
//@write, @string, @fast
public void Setnx(string key, string arg2) { }
//write, denyoom
//@write, @string, @slow
public void Setrange(string key, string arg2, string arg3) { }
//admin, noscript, loading, stale
//@admin, @slow, @dangerous
public void Shutdown() { }
//readonly, sort_for_script
//@read, @set, @slow
public void Sinter(string[] keys) { }
//write, denyoom
//@write, @set, @slow
public void Sinterstore(string[] keys, string[] arg2) { }
//readonly, fast
//@read, @set, @fast
public void Sismember(string key, string arg2) { }
//admin, noscript, stale
//@admin, @slow, @dangerous
public void Slaveof(string arg1, string arg2) { }
//admin, random, loading, stale
//@admin, @slow, @dangerous
public void Slowlog(string arg1) { }
//readonly, sort_for_script
//@read, @set, @slow
public void Smembers(string key) { }
//write, fast
//@write, @set, @fast
public void Smove(string key, string arg2, string arg3) { }
//write, denyoom, movablekeys
//@write, @set, @sortedset, @list, @slow, @dangerous
public void Sort(string key) { }
//write, random, fast
//@write, @set, @fast
public void Spop(string key) { }
//readonly, random
//@read, @set, @slow
public void Srandmember(string key) { }
//write, fast
//@write, @set, @fast
public void Srem(string key, string arg2) { }
//readonly, random
//@read, @set, @slow
public void Sscan(string key, string arg2) { }
//readonly, movablekeys
//@read, @string, @slow
public void Stralgo(string arg1) { }
//readonly, fast
//@read, @string, @fast
public void Strlen(string key) { }
//pubsub, noscript, loading, stale
//@pubsub, @slow
public void Subscribe(string arg1) { }
//readonly
//@read, @string, @slow
public void Substr(string key, string arg2, string arg3) { }
//readonly, sort_for_script
//@read, @set, @slow
public void Sunion(string[] keys) { }
//write, denyoom
//@write, @set, @slow
public void Sunionstore(string[] keys, string[] arg2) { }
//write, fast
//@keyspace, @write, @fast, @dangerous
public void Swapdb(string arg1, string arg2) { }
//admin, noscript
//@admin, @slow, @dangerous
public void Sync() { }
//readonly, random, loading, stale, fast
//@read, @fast
public void Time() { }
//readonly, fast
//@keyspace, @read, @fast
public void Touch(string[] keys) { }
//readonly, random, fast
//@keyspace, @read, @fast
public void Ttl(string key) { }
//readonly, fast
//@keyspace, @read, @fast
public void Type(string key) { }
//write, fast
//@keyspace, @write, @fast
public void Unlink(string[] keys) { }
//pubsub, noscript, loading, stale
//@pubsub, @slow
public void Unsubscribe() { }
//noscript, fast
//@fast, @transaction
public void Unwatch() { }
//noscript
//@keyspace, @slow
public void Wait(string arg1, string arg2) { }
//noscript, fast
//@fast, @transaction
public void Watch(string[] keys) { }
//write, random, fast
//@write, @stream, @fast
public void Xack(string key, string arg2, string arg3) { }
//write, denyoom, random, fast
//@write, @stream, @fast
public void Xadd(string key, string arg2, string arg3, string arg4) { }
//write, random, fast
//@write, @stream, @fast
public void Xclaim(string key, string arg2, string arg3, string arg4, string arg5) { }
//write, fast
//@write, @stream, @fast
public void Xdel(string key, string arg2) { }
//write, denyoom
//@write, @stream, @slow
public void Xgroup(string arg1) { }
//readonly, random
//@read, @stream, @slow
public void Xinfo(string arg1) { }
//readonly, fast
//@read, @stream, @fast
public void Xlen(string key) { }
//readonly, random
//@read, @stream, @slow
public void Xpending(string key, string arg2) { }
//readonly
//@read, @stream, @slow
public void Xrange(string key, string arg2, string arg3) { }
//readonly, movablekeys
//@read, @stream, @slow, @blocking
public void Xread(string key, string arg2, string arg3) { }
//write, movablekeys
//@write, @stream, @slow, @blocking
public void Xreadgroup(string key, string arg2, string arg3, string arg4, string arg5, string arg6) { }
//readonly
//@read, @stream, @slow
public void Xrevrange(string key, string arg2, string arg3) { }
//write, denyoom, fast
//@write, @stream, @fast
public void Xsetid(string key, string arg2) { }
//write, random
//@write, @stream, @slow
public void Xtrim(string key) { }
//write, denyoom, fast
//@write, @sortedset, @fast
public void Zadd(string key, string arg2, string arg3) { }
//readonly, fast
//@read, @sortedset, @fast
public void Zcard(string key) { }
//readonly, fast
//@read, @sortedset, @fast
public void Zcount(string key, string arg2, string arg3) { }
//write, denyoom, fast
//@write, @sortedset, @fast
public void Zincrby(string key, string arg2, string arg3) { }
//write, denyoom, movablekeys
//@write, @sortedset, @slow
public void Zinterstore(string arg1, string arg2, string arg3) { }
//readonly, fast
//@read, @sortedset, @fast
public void Zlexcount(string key, string arg2, string arg3) { }
//write, fast
//@write, @sortedset, @fast
public void Zpopmax(string key) { }
//write, fast
//@write, @sortedset, @fast
public void Zpopmin(string key) { }
//readonly
//@read, @sortedset, @slow
public void Zrange(string key, string arg2, string arg3) { }
//readonly
//@read, @sortedset, @slow
public void Zrangebylex(string key, string arg2, string arg3) { }
//readonly
//@read, @sortedset, @slow
public void Zrangebyscore(string key, string arg2, string arg3) { }
//readonly, fast
//@read, @sortedset, @fast
public void Zrank(string key, string arg2) { }
//write, fast
//@write, @sortedset, @fast
public void Zrem(string key, string arg2) { }
//write
//@write, @sortedset, @slow
public void Zremrangebylex(string key, string arg2, string arg3) { }
//write
//@write, @sortedset, @slow
public void Zremrangebyrank(string key, string arg2, string arg3) { }
//write
//@write, @sortedset, @slow
public void Zremrangebyscore(string key, string arg2, string arg3) { }
//readonly
//@read, @sortedset, @slow
public void Zrevrange(string key, string arg2, string arg3) { }
//readonly
//@read, @sortedset, @slow
public void Zrevrangebylex(string key, string arg2, string arg3) { }
//readonly
//@read, @sortedset, @slow
public void Zrevrangebyscore(string key, string arg2, string arg3) { }
//readonly, fast
//@read, @sortedset, @fast
public void Zrevrank(string key, string arg2) { }
//readonly, random
//@read, @sortedset, @slow
public void Zscan(string key, string arg2) { }
//readonly, fast
//@read, @sortedset, @fast
public void Zscore(string key, string arg2) { }
//write, denyoom, movablekeys
//@write, @sortedset, @slow
public void Zunionstore(string arg1, string arg2, string arg3) { }
}
}
|
2881099/FreeRedis | 1,907 | test/Unit/FreeRedis.Tests/CommandPacketTests.cs | using System;
using System.Collections.Generic;
using System.Collections.Concurrent;
using System.Text;
using Xunit;
namespace FreeRedis.Tests
{
public class CommandPacketTests
{
[Fact]
public void Prefix()
{
var cmd1 = new CommandPacket("GET").InputKey("key1").Prefix("prefix_");
Assert.Equal("GET prefix_key1", cmd1.ToString());
Assert.Equal("GET prefix01_key1", cmd1.Prefix("prefix01_").ToString()); //replace
var cmd2 = new CommandPacket("MGET").InputKey(new[] { "key1", "key2" }).Prefix("prefix_");
Assert.Equal("MGET prefix_key1 prefix_key2", cmd2.ToString());
Assert.Equal("MGET prefix01_key1 prefix01_key2", cmd2.Prefix("prefix01_").ToString()); //replace
}
[Fact]
public void GetKey()
{
var cmd1 = new CommandPacket("GET").InputKey("key1").Prefix("prefix_");
Assert.Equal("prefix_key1", cmd1.GetKey(0));
Assert.Equal("key1", cmd1.GetKey(0, true));
Assert.Equal("prefix01_key1", cmd1.Prefix("prefix01_").GetKey(0)); //replace
Assert.Equal("key1", cmd1.Prefix("prefix01_").GetKey(0, true)); //replace
var cmd2 = new CommandPacket("MGET").InputKey(new[] { "key1", "key2" }).Prefix("prefix_");
Assert.Equal("prefix_key1", cmd2.GetKey(0));
Assert.Equal("key1", cmd2.GetKey(0, true));
Assert.Equal("prefix_key2", cmd2.GetKey(1));
Assert.Equal("key2", cmd2.GetKey(1, true));
Assert.Equal("prefix01_key1", cmd2.Prefix("prefix01_").GetKey(0)); //replace
Assert.Equal("key1", cmd2.Prefix("prefix01_").GetKey(0, true)); //replace
Assert.Equal("prefix01_key2", cmd2.Prefix("prefix01_").GetKey(1)); //replace
Assert.Equal("key2", cmd2.Prefix("prefix01_").GetKey(1, true)); //replace
}
}
}
|
2881099/FreeRedis | 1,913 | test/Unit/FreeRedis.Tests/TestBase.cs | using Newtonsoft.Json;
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Text;
namespace FreeRedis.Tests
{
public class TestBase
{
protected static ConnectionStringBuilder Connection = new ConnectionStringBuilder()
{
Host = "127.0.0.1", // RedisEnvironmentHelper.GetHost("redis_single"),
//Password = "123456",
Database = 1,
MaxPoolSize = 10,
Protocol = RedisProtocol.RESP2,
ClientName = "FreeRedis"
};
//static Lazy<RedisClient> _cliLazy = new Lazy<RedisClient>(() => new RedisClient("127.0.0.1:6379,database=1", "127.0.0.1:6379,database=1"));
static Lazy<RedisClient> _cliLazy = new Lazy<RedisClient>(() =>
{
//var r = new RedisClient(new ConnectionStringBuilder[] { "127.0.0.1:6379,database=1,password=123" }); //redis 3.2 cluster
//var r = new RedisClient("127.0.0.1:6379,database=1"); //redis 3.2
//var r = new RedisClient("127.0.0.1:6379,database=1", "127.0.0.1:6379,database=1");
var r = new RedisClient(Connection); //redis 6.0
// var r = new RedisClient(connectionString); //redis 6.0
r.Serialize = obj => JsonConvert.SerializeObject(obj);
r.Deserialize = (json, type) => JsonConvert.DeserializeObject(json, type);
r.Notice += (s, e) => Trace.WriteLine(e.Log);
return r;
});
public static RedisClient cli => _cliLazy.Value;
protected readonly object Null = null;
protected readonly string String = "我是中国人";
protected readonly byte[] Bytes = Encoding.UTF8.GetBytes("这是一个byte字节");
protected readonly TestClass Class = new TestClass { Id = 1, Name = "Class名称", CreateTime = DateTime.Now, TagId = new[] { 1, 3, 3, 3, 3 } };
public TestBase() {
//rds.NodesServerManager.FlushAll();
}
}
public class TestClass {
public int Id { get; set; }
public string Name { get; set; }
public DateTime CreateTime { get; set; }
public int[] TagId { get; set; }
}
}
|
2881099/FreeRedis | 5,408 | test/Unit/FreeRedis.Tests/RedisClientTests/ConnectionTests.cs | using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using Xunit;
namespace FreeRedis.Tests.RedisClientTests.Other
{
public class ConnectionTests : TestBase
{
[Fact]
public void Auth()
{
using (var db = cli.GetDatabase())
{
db.Auth("123456");
db.Auth("default", "123456");
}
}
[Fact(Skip = "Need special environment")]
public void ClientCaching()
{
cli.ClientCaching(Confirm.yes);
cli.ClientCaching(Confirm.no);
}
[Fact]
public void ClientGetName()
{
using (var db = cli.GetDatabase())
{
db.ClientSetName("xxx-test001");
Assert.Equal("xxx-test001", db.ClientGetName());
}
}
[Fact]
public void ClientGetRedir()
{
var r1 = cli.ClientGetRedir();
}
[Fact]
public void ClientId()
{
var r1 = cli.ClientId();
}
[Fact]
public void ClientKill()
{
try
{
cli.ClientKill("localhost");
cli.ClientKill("localhost", 1, ClientType.master, "default", "127.0.0.1:50618", Confirm.yes);
cli.ClientKill("localhost", 1, ClientType.normal, "default", "127.0.0.1:50618", Confirm.yes);
cli.ClientKill("localhost", 1, ClientType.pubsub, "default", "127.0.0.1:50618", Confirm.yes);
cli.ClientKill("localhost", 1, ClientType.slave, "default", "127.0.0.1:50618", Confirm.yes);
}
catch (RedisServerException ex)
{
Assert.Equal("ERR No such client", ex.Message);
}
}
[Fact]
public void ClientList()
{
var r1 = cli.ClientList();
var r2 = cli.ClientList(ClientType.master);
var r3 = cli.ClientList(ClientType.normal);
var r4 = cli.ClientList(ClientType.pubsub);
var r5 = cli.ClientList(ClientType.slave);
}
[Fact]
public void ClientPause()
{
cli.ClientPause(1000);
}
[Fact]
public void ClientReply()
{
using (var db = cli.GetDatabase())
{
//db.ClientReply(ClientReplyType.On);
db.ClientReply(ClientReplyType.off);
db.ClientReply(ClientReplyType.skip);
db.ClientReply(ClientReplyType.on);
db.SetGetTest();
db.ClientReply(ClientReplyType.off);
var key = Guid.NewGuid().ToString();
db.Set(key, key);
Assert.Null(db.Get(key));
db.ClientReply(ClientReplyType.on);
db.SetGetTest();
}
}
[Fact]
public void ClientSetName()
{
using (var db = cli.GetDatabase())
{
db.ClientSetName("xxx-test002");
Assert.Equal("xxx-test002", db.ClientGetName());
}
}
[Fact]
public void ClientTracking()
{
using (var db = cli.GetDatabase())
{
db.ClientTracking(true, null, null, false, false, false, false);
db.ClientTracking(false, null, null, false, false, false, false);
}
}
[Fact]
public void ClientUnBlock()
{
var r1 = cli.ClientUnBlock(1);
var r2 = cli.ClientUnBlock(11);
var r3 = cli.ClientUnBlock(11, ClientUnBlockType.error);
var r4 = cli.ClientUnBlock(11, ClientUnBlockType.timeout);
}
[Fact]
public void Echo()
{
var txt = Guid.NewGuid().ToString();
Assert.Equal(txt, cli.Echo(txt));
}
[Fact]
public void Hello()
{
RedisScopeExecHelper.ExecScope(new ConnectionStringBuilder()
{
Host = RedisEnvironmentHelper.GetHost("redis_single"),
Password = "123456",
MaxPoolSize = 1
}, (cli) =>
{
var r1 = cli.Hello("3");
var r2 = cli.Hello("3", "default", "123456", "myname-client");
Assert.Equal("myname-client", cli.ClientGetName());
var r3 = cli.Hello("2");
});
}
[Fact]
public void Ping()
{
Assert.Equal("PONG", cli.Ping());
var txt = Guid.NewGuid().ToString();
Assert.Equal(txt, cli.Ping(txt));
}
[Fact(Skip = "Connection pool skip")]
public void Select()
{
using (var db = cli.GetDatabase())
{
db.Select(1);
db.SetGetTest();
Assert.Equal("PONG", db.Ping());
var key = Guid.NewGuid().ToString();
db.Set(key, key);
Assert.Equal(key, db.Get(key));
db.Select(1);
db.SetGetTest();
Assert.Equal("PONG", db.Ping());
Assert.NotEqual(key, db.Get(key));
db.Set(key, key);
Assert.Equal(key, db.Get(key));
}
}
}
}
|
2881099/FreeRedis | 3,658 | test/Unit/FreeRedis.Tests/RedisClientTests/PubSubTests.cs | using Newtonsoft.Json;
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.IO;
using System.Linq;
using System.Text;
using System.Threading;
using Xunit;
namespace FreeRedis.Tests.RedisClientTests.Other
{
public class PubSubTests : TestBase
{
[Fact]
public void PSubscribe()
{
//see Subscribe
}
[Fact]
public void Publish()
{
var key1 = "Publish1";
Assert.Equal(0, cli.Publish(key1, "test"));
}
[Fact]
public void PubSubChannels()
{
RedisScopeExecHelper.ExecScope(Connection, (cli) =>
{
var key1 = "PubSubChannels1";
using (cli.Subscribe(key1, (chan, msg) =>
{
}))
{
var chans = cli.PubSubChannels("PubSubChannels1*");
Assert.Single(chans);
Assert.Equal(key1, chans[0]);
Thread.CurrentThread.Join(500);
}
});
}
[Fact]
public void PubSubNumSub()
{
RedisScopeExecHelper.ExecScope(Connection, (cli) =>
{
var key1 = "PubSubNumSub1";
using (cli.Subscribe(key1, (chan, msg) =>
{
}))
{
var r1 = cli.PubSubNumSub("PubSubNumSub1");
Assert.Equal(1, r1);
var r2 = cli.PubSubNumSub(new[] { "PubSubNumSub1" });
Assert.Single(r2);
Assert.Equal(1, r2[0]);
Thread.CurrentThread.Join(500);
}
});
}
[Fact]
public void PubSubNumPat()
{
cli.PubSubNumPat();
}
[Fact]
public void PUnSubscribe()
{
}
[Fact]
public void Subscribe()
{
var key1 = "Subscribe1";
var key2 = "Subscribe2";
bool isbreak = false;
new Thread(() =>
{
while (isbreak == false)
{
cli.Publish(key1, Guid.NewGuid().ToString());
cli.Publish(key2, Guid.NewGuid().ToString());
cli.Publish("randomSubscribe1", Guid.NewGuid().ToString());
Thread.CurrentThread.Join(100);
}
}).Start();
using (cli.Subscribe(key1, ondata))
{
using (cli.Subscribe(key2, ondata))
{
using (cli.PSubscribe("*", ondata))
{
Thread.CurrentThread.Join(2000);
}
Thread.CurrentThread.Join(2000);
}
Thread.CurrentThread.Join(2000);
}
Trace.WriteLine("one more time");
using (cli.Subscribe(key1, ondata))
{
using (cli.Subscribe(key2, ondata))
{
using (cli.PSubscribe("*", ondata))
{
Thread.CurrentThread.Join(2000);
}
Thread.CurrentThread.Join(2000);
}
Thread.CurrentThread.Join(2000);
}
void ondata(string channel, object data)
{
Trace.WriteLine($"{channel} -> {data}");
}
isbreak = true;
}
[Fact]
public void UnSubscribe()
{
}
}
}
|
2881099/FreeRedis | 5,702 | test/Unit/FreeRedis.Tests/RedisClientTests/GeoTests.cs | using Newtonsoft.Json;
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Text;
using Xunit;
namespace FreeRedis.Tests.RedisClientTests
{
public class GeoTests : TestBase
{
[Fact]
public void GetAdd()
{
cli.Del("TestGeoAdd");
Assert.Equal(3, cli.GeoAdd("TestGeoAdd",
new GeoMember(10, 20, "m1"),
new GeoMember(11, 21, "m2"),
new GeoMember(12, 22, "m3")));
}
[Fact]
public void GeoDist()
{
cli.Del("TestGeoDist");
Assert.Equal(3, cli.GeoAdd("TestGeoDist",
new GeoMember(10, 20, "m1"),
new GeoMember(11, 21, "m2"),
new GeoMember(12, 22, "m3")));
Assert.NotNull(cli.GeoDist("TestGeoDist", "m1", "m2"));
Assert.NotNull(cli.GeoDist("TestGeoDist", "m1", "m3"));
Assert.NotNull(cli.GeoDist("TestGeoDist", "m2", "m3"));
Assert.Null(cli.GeoDist("TestGeoDist", "m1", "m31"));
Assert.Null(cli.GeoDist("TestGeoDist", "m11", "m31"));
}
[Fact]
public void GeoHash()
{
cli.Del("TestGeoHash");
Assert.Equal(3, cli.GeoAdd("TestGeoHash",
new GeoMember(10, 20, "m1"),
new GeoMember(11, 21, "m2"),
new GeoMember(12, 22, "m3")));
Assert.False(string.IsNullOrEmpty(cli.GeoHash("TestGeoHash", "m1")));
Assert.Equal(2, cli.GeoHash("TestGeoHash", new[] { "m1", "m2" }).Select(a => string.IsNullOrEmpty(a) == false).Count());
Assert.Equal(2, cli.GeoHash("TestGeoHash", new[] { "m1", "m2", "m22" }).Where(a => string.IsNullOrEmpty(a) == false).Count());
}
[Fact]
public void GeoPos()
{
cli.Del("TestGeoPos");
Assert.Equal(3, cli.GeoAdd("TestGeoPos",
new GeoMember(10, 20, "m1"),
new GeoMember(11, 21, "m2"),
new GeoMember(12, 22, "m3")));
Assert.Equal(4, cli.GeoPos("TestGeoPos", new[] { "m1", "m2", "m22", "m3" }).Length);
//Assert.Equal((10, 20), rds.GeoPos("TestGeoPos", new[] { "m1", "m2", "m22", "m3" })[0]);
//Assert.Equal((11, 21), rds.GeoPos("TestGeoPos", new[] { "m1", "m2", "m22", "m3" })[1]);
Assert.Null(cli.GeoPos("TestGeoPos", new[] { "m1", "m2", "m22", "m3" })[2]);
//Assert.Equal((12, 22), rds.GeoPos("TestGeoPos", new[] { "m1", "m2", "m22", "m3" })[3]);
}
[Fact]
public void GeoRadius()
{
cli.Del("TestGeoRadius");
Assert.Equal(2, cli.GeoAdd("TestGeoRadius",
new GeoMember(13.361389m, 38.115556m, "Palermo"),
new GeoMember(15.087269m, 37.502669m, "Catania")));
var geopos = cli.GeoPos("TestGeoRadius", new[] { "m1", "Catania", "m2", "Palermo", "Catania2" });
var georadius1 = cli.GeoRadius("TestGeoRadius", 15, 37, 200, GeoUnit.km);
var georadius2 = cli.GeoRadius("TestGeoRadius", 15, 37, 200, GeoUnit.km, true);
var georadius3 = cli.GeoRadius("TestGeoRadius", 15, 37, 200, GeoUnit.km, true, true);
var georadius4 = cli.GeoRadius("TestGeoRadius", 15, 37, 200, GeoUnit.km, true, true, true);
var georadius5 = cli.GeoRadius("TestGeoRadius", 15, 37, 200, GeoUnit.km, true, true, false);
var georadius6 = cli.GeoRadius("TestGeoRadius", 15, 37, 200, GeoUnit.km, true, false);
var georadius7 = cli.GeoRadius("TestGeoRadius", 15, 37, 200, GeoUnit.km, true, false, true);
var georadius8 = cli.GeoRadius("TestGeoRadius", 15, 37, 200, GeoUnit.km, true, false, false);
var georadius9 = cli.GeoRadius("TestGeoRadius", 15, 37, 200, GeoUnit.km, false, true, false);
var georadius10 = cli.GeoRadius("TestGeoRadius", 15, 37, 200, GeoUnit.km, false, true, true);
var georadius11 = cli.GeoRadius("TestGeoRadius", 15, 37, 200, GeoUnit.km, false, false, true);
}
[Fact]
public void GeoRadiusByMember()
{
cli.Del("GeoRadiusByMember");
Assert.Equal(2, cli.GeoAdd("GeoRadiusByMember",
new GeoMember(13.361389m, 38.115556m, "Palermo"),
new GeoMember(15.087269m, 37.502669m, "Catania")));
var geopos = cli.GeoPos("GeoRadiusByMember", new[] { "m1", "Catania", "m2", "Palermo", "Catania2" });
var georadius1 = cli.GeoRadius("GeoRadiusByMember", 15, 37, 200, GeoUnit.km);
var georadius2 = cli.GeoRadius("GeoRadiusByMember", 15, 37, 200, GeoUnit.km, true);
var georadius3 = cli.GeoRadius("GeoRadiusByMember", 15, 37, 200, GeoUnit.km, true, true);
var georadius4 = cli.GeoRadius("GeoRadiusByMember", 15, 37, 200, GeoUnit.km, true, true, true);
var georadius5 = cli.GeoRadius("GeoRadiusByMember", 15, 37, 200, GeoUnit.km, true, true, false);
var georadius6 = cli.GeoRadius("GeoRadiusByMember", 15, 37, 200, GeoUnit.km, true, false);
var georadius7 = cli.GeoRadius("GeoRadiusByMember", 15, 37, 200, GeoUnit.km, true, false, true);
var georadius8 = cli.GeoRadius("GeoRadiusByMember", 15, 37, 200, GeoUnit.km, true, false, false);
var georadius9 = cli.GeoRadius("GeoRadiusByMember", 15, 37, 200, GeoUnit.km, false, true, false);
var georadius10 = cli.GeoRadius("GeoRadiusByMember", 15, 37, 200, GeoUnit.km, false, true, true);
var georadius11 = cli.GeoRadius("GeoRadiusByMember", 15, 37, 200, GeoUnit.km, false, false, true);
}
}
}
|
2881099/FreeRedis | 2,061 | test/Unit/FreeRedis.Tests/RedisClientTests/ClusterTests.cs | using Newtonsoft.Json;
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Text;
using System.Threading;
using Xunit;
namespace FreeRedis.Tests.RedisClientTests
{
public class ClusterTests : TestBase
{
[Fact]
public void ClusterAddSlots()
{
}
[Fact]
public void ClusterBumpEpoch()
{
}
[Fact]
public void ClusterCountFailureReports()
{
}
[Fact]
public void ClusterCountKeysInSlot()
{
}
[Fact]
public void ClusterDelSlots()
{
}
[Fact]
public void ClusterFailOver()
{
}
[Fact]
public void ClusterFlushSlots()
{
}
[Fact]
public void ClusterForget()
{
}
[Fact]
public void ClusterGetKeysInSlot()
{
}
[Fact]
public void ClusterInfo()
{
}
[Fact]
public void ClusterKeySlot()
{
}
[Fact]
public void ClusterMeet()
{
}
[Fact]
public void ClusterMyId()
{
}
[Fact]
public void ClusterNodes()
{
}
[Fact]
public void ClusterReplicas()
{
}
[Fact]
public void ClusterReplicate()
{
}
[Fact]
public void ClusterReset()
{
}
[Fact]
public void ClusterSaveConfig()
{
}
[Fact]
public void ClusterSetConfigEpoch()
{
}
[Fact]
public void ClusterSetSlot()
{
}
[Fact]
public void ClusterSlaves()
{
}
[Fact]
public void ClusterSlots()
{
}
[Fact]
public void ReadOnly()
{
}
[Fact]
public void ReadWrite()
{
}
}
}
|
2881099/FreeRedis | 5,042 | test/Unit/FreeRedis.Tests/RedisClientTests/ScriptingTests.cs | using Newtonsoft.Json;
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Text;
using System.Threading;
using Xunit;
namespace FreeRedis.Tests.RedisClientTests
{
public class ScriptingTests : TestBase
{
[Fact]
public void Eval()
{
using (var sh = cli.GetDatabase())
{
var r1 = sh.Eval("return {KEYS[1],KEYS[2],ARGV[1],ARGV[2]}", new[] { "key1", "key2" }, "first", "second") as object[];
Assert.NotNull(r1);
Assert.True(r1.Length == 4);
Assert.Equal("key1", r1[0]);
Assert.Equal("key2", r1[1]);
Assert.Equal("first", r1[2]);
Assert.Equal("second", r1[3]);
Assert.Equal("OK", sh.Eval($"return redis.call('set','{Guid.NewGuid()}','bar')"));
Assert.Equal("OK", sh.Eval("return redis.call('set',KEYS[1],'bar')", new[] { Guid.NewGuid().ToString() }));
//RESP3
Assert.Equal(10L, sh.Eval("return 10"));
var r2 = sh.Eval("return {1,2,{3,'Hello World!'}}") as object[];
Assert.NotNull(r2);
Assert.True(r2.Length == 3);
Assert.Equal(1L, r2[0]);
Assert.Equal(2L, r2[1]);
var r3 = r2[2] as object[];
Assert.Equal(3L, r3[0]);
Assert.Equal("Hello World!", r3[1]);
var r4 = sh.Eval("return {1,2,3.3333,somekey='somevalue','foo',nil,'bar'}") as object[];
//As you can see 3.333 is converted into 3, somekey is excluded, and the bar string is never returned as there is a nil before.
Assert.NotNull(r4);
Assert.True(r4.Length == 4);
Assert.Equal(1L, r4[0]);
Assert.Equal(2L, r4[1]);
Assert.Equal(3L, r4[2]);
Assert.Equal("foo", r4[3]);
Assert.Equal("My Error", Assert.Throws<RedisServerException>(() => sh.Eval("return {err=\"My Error\"}"))?.Message);
Assert.Equal("My Error222", Assert.Throws<RedisServerException>(() => sh.Eval("return redis.error_reply(\"My Error222\")"))?.Message);
var key1 = Guid.NewGuid().ToString();
Assert.Equal(1, sh.LPush(key1, "a"));
Assert.True(Assert.Throws<RedisServerException>(() => sh.Eval($"return redis.call('get','{key1}')"))?.Message.Contains("ERR Error running script (call to ") == true);
//(error) ERR Error running script (call to f_6b1bf486c81ceb7edf3c093f4c48582e38c0e791): ERR Operation against a key holding the wrong kind of value
}
}
[Fact]
public void EvalSha()
{
var scriptid = cli.ScriptLoad("return {KEYS[1],KEYS[2],ARGV[1],ARGV[2]}");
Assert.True(!string.IsNullOrWhiteSpace(scriptid));
var r1 = cli.EvalSha(scriptid, new[] { "key1", "key2" }, "first", "second") as object[];
Assert.NotNull(r1);
Assert.True(r1.Length == 4);
Assert.Equal("key1", r1[0]);
Assert.Equal("key2", r1[1]);
Assert.Equal("first", r1[2]);
Assert.Equal("second", r1[3]);
}
[Fact]
public void ScriptExists()
{
cli.ScriptFlush();
var r1 = cli.ScriptLoad("return redis.call('get','foo')");
Assert.True(!string.IsNullOrWhiteSpace(r1));
Assert.True(cli.ScriptExists(r1));
Assert.False(cli.ScriptExists("6b1bf486c81ceb7edf3c193f4c48582e38c0e791"));
var r2 = cli.ScriptLoad("return {KEYS[1],KEYS[2],ARGV[1],ARGV[2]}");
Assert.True(!string.IsNullOrWhiteSpace(r2));
Assert.True(cli.ScriptExists(r2));
var r3 = cli.ScriptExists(new[] { r1, "6b1bf486c81ceb7edf3c193f4c48582e38c0e791", r2 });
Assert.Equal(3, r3.Length);
Assert.True(r3[0]);
Assert.False(r3[1]);
Assert.True(r3[2]);
cli.ScriptFlush();
Assert.False(cli.ScriptExists(r1));
Assert.False(cli.ScriptExists("6b1bf486c81ceb7edf3c193f4c48582e38c0e791"));
Assert.False(cli.ScriptExists(r2));
r3 = cli.ScriptExists(new[] { r1, "6b1bf486c81ceb7edf3c193f4c48582e38c0e791", r2 });
Assert.Equal(3, r3.Length);
Assert.False(r3[0]);
Assert.False(r3[1]);
Assert.False(r3[2]);
}
[Fact]
public void ScriptFlush()
{
//cli.ScriptFlush();
}
[Fact]
public void ScriptKill()
{
Assert.Equal("NOTBUSY No scripts in execution right now.", Assert.Throws<RedisServerException>(() => cli.ScriptKill())?.Message);
}
[Fact]
public void ScriptLoad()
{
var r1 = cli.ScriptLoad("return redis.call('get','foo')");
Assert.True(!string.IsNullOrWhiteSpace(r1));
}
}
}
|
2881099/FreeRedis | 1,286 | test/Unit/FreeRedis.Tests/RedisClientTests/DelayQueueTest.cs | using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading;
using System.Threading.Tasks;
using Xunit;
using Xunit.Abstractions;
namespace FreeRedis.Tests.RedisClientTests
{
public class DelayQueueTest(ITestOutputHelper output)
{
static readonly RedisClient _client = new RedisClient("127.0.0.1:6379,password=123");
[Fact]
public async Task Test()
{
var delayQueue = _client.DelayQueue("TestDelayQueue");
//添加队列
delayQueue.Enqueue($"Execute in 5 seconds.", TimeSpan.FromSeconds(5));
delayQueue.Enqueue($"Execute in 10 seconds.", DateTime.Now.AddSeconds(10));
delayQueue.Enqueue($"Execute in 15 seconds.", DateTime.Now.AddSeconds(15));
delayQueue.Enqueue($"Execute in 20 seconds.", TimeSpan.FromSeconds(20));
delayQueue.Enqueue($"Execute in 25 seconds.", DateTime.Now.AddSeconds(25));
delayQueue.Enqueue($"Execute in 2024-07-02 14:30:15", DateTime.Parse("2024-07-02 14:30:15"));
//消费延时队列
await delayQueue.DequeueAsync(s =>
{
output.WriteLine($"{DateTime.Now}:{s}");
return Task.CompletedTask;
});
}
}
} |
2881099/FreeRedis | 22,535 | test/Unit/FreeRedis.Tests/RedisClientTests/StreamsTests.cs | using Newtonsoft.Json;
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Text;
using System.Threading;
using Xunit;
namespace FreeRedis.Tests.RedisClientTests
{
public class StreamsTests : TestBase
{
[Fact]
public void Issues457()
{
var redis = cli;
var key = "key_Issues457";
var group = "group_Issues457";
var consumer = "consumer_Issues457";
var maxLen = 9999;
//删除,重新创建,并加入数据,进行测试
redis.Del(key);
redis.XGroupCreate(key, group, "0", true);
redis.XAdd(key, maxLen, "*", "__data", "my data1");
redis.XAdd(key, maxLen, "*", "__data", "my data2");
//检查pending表的长度
//!!!!!!pending表不存在时,读取会报错!!!!!!!!!
var pending0 = redis.XPending(key, group);
//消费确认前,pending 应该等于0
Assert.True(pending0.count == 0);
//读取未阅读的消息1,读取2次
var new1 = redis.XReadGroup(group, consumer, 1, 1, false, key, ">");
var new2 = redis.XReadGroup(group, consumer, 1, 1, false, key, ">");
Assert.NotNull(new1[0].entries);
Assert.NotEmpty(new1[0].entries);
Assert.NotNull(new2[0].entries);
Assert.NotEmpty(new2[0].entries);
//检查pending表的长度
var pending = redis.XPending(key, group);
//消费确认前,pending 应该等于2
Assert.True(pending.count == 2);
//消费确认
var id1 = new1[0].entries[0].id;
var id2 = new2[0].entries[0].id;
redis.XAck(key, group, id1);
redis.XAck(key, group, id2);
//检查pending表的长度
//!!!!!!pending表不存在时,读取会报错!!!!!!!!!
var pending2 = redis.XPending(key, group);
//消费确认后,pending 应该等于0
//Assert.True(pending2.count == 0);
}
[Fact]
public void XAck()
{
var key1 = "XAck1";
cli.Del(key1);
var id1 = cli.XAdd(key1, new Dictionary<string, object> { ["f1"] = "v1" });
var id2 = cli.XAdd(key1, new Dictionary<string, object> { ["f1"] = "v1", ["f2"] = "v2" });
cli.XGroupCreate(key1, "xack-group1", "0");
var r2 = cli.XReadGroup("xack-group1", "xack-consumer", 2, 1, false, key1, ">");
Assert.NotNull(r2);
Assert.Single(r2);
Assert.Equal(2, r2[0].entries.Length);
Assert.Equal(id1, r2[0].entries[0].id);
Assert.Equal(id2, r2[0].entries[1].id);
var r2ids = r2.Select(a => a.entries.Select(b => b.id)).SelectMany(a => a).ToArray();
var r3 = cli.XAck(key1, "xack-group1", r2ids);
Assert.Equal(2, r3);
var r4 = cli.XReadGroup("xack-group1", "xack-consumer", 1, key1, "0-0");
Assert.Null(r4);
}
[Fact]
public void XAdd()
{
var key1 = "XAdd1";
var key2 = "XAdd2";
var key3 = "XAdd3";
cli.Del(key1, key2, key3);
Assert.Equal("ERR wrong number of arguments for 'xadd' command", Assert.Throws<RedisServerException>(() => cli.XAdd(key1, new Dictionary<string, object>()))?.Message);
Assert.NotNull(cli.XAdd(key1, new Dictionary<string, object> { ["f1"] = "v1" }));
Assert.NotNull(cli.XAdd(key1, new Dictionary<string, object> { ["f1"] = "v1", ["f2"] = "v2" }));
Assert.NotNull(cli.XAdd(key2, 1000, "123321", new Dictionary<string, object> { ["f11"] = "v11" }));
Assert.NotNull(cli.XAdd(key2, 1000, "123322", new Dictionary<string, object> { ["f11"] = "v11", ["f22"] = "v22" }));
Assert.NotNull(cli.XAdd(key3, -1000, "1233211", new Dictionary<string, object> { ["f111"] = "v111" }));
Assert.NotNull(cli.XAdd(key3, -1000, "1233222", new Dictionary<string, object> { ["f111"] = "v111", ["f222"] = "v222" }));
cli.Del(key1, key2, key3);
Assert.NotNull(cli.XAdd(key1, "f1", "v1"));
Assert.NotNull(cli.XAdd(key1, "f1", "v1", "f2", "v2"));
Assert.NotNull(cli.XAdd(key2, 1000, "123321", "f11", "v11"));
Assert.NotNull(cli.XAdd(key2, 1000, "123322", "f11", "v11", "f22", "v22"));
Assert.NotNull(cli.XAdd(key3, -1000, "1233211", "f111", "v111"));
Assert.NotNull(cli.XAdd(key3, -1000, "1233222", "f111", "v111", "f222", "v222"));
}
[Fact]
public void XClaim()
{
var key1 = "XClaim1";
var key2 = "XClaim2";
var key3 = "XClaim3";
cli.Del(key1, key2, key3);
var id1 = cli.XAdd(key1, new Dictionary<string, object> { ["f1"] = "v1" });
cli.XGroupCreate(key1, "XClaim-group1", "0");
var r2 = cli.XReadGroup("XClaim-group1", "XClaim-consumer1", 1, 1, false, key1, ">");
Assert.NotNull(r2);
Assert.Single(r2);
Assert.Single(r2[0].entries);
Assert.Equal(id1, r2[0].entries[0].id);
// Get the pending event
var r3 = cli.XPending(key1, "XClaim-group1");
Assert.NotNull(r3);
Assert.Equal(id1, r3.maxId);
Assert.Equal(id1, r3.minId);
Assert.Equal(1, r3.count);
Assert.Single(r3.consumers);
Assert.Equal(1, r3.consumers[0].count);
Assert.Equal("XClaim-consumer1", r3.consumers[0].consumer);
var r4 = cli.XPending(key1, "XClaim-group1", "-", "+", 3, "XClaim-consumer1");
Assert.Single(r4);
Assert.Equal(id1, r4[0].id);
Assert.Equal("XClaim-consumer1", r4[0].consumer);
Assert.Equal(1, r4[0].deliveredTimes);
// Sleep for 1000ms so we can claim events pending for more than 500ms
Thread.Sleep(1000);
var r5 = cli.XClaim(key1, "XClaim-group1", "XClaim-consumer2", 500, id1);
Assert.Single(r5);
Assert.Equal(id1, r5[0].id);
// Deleted events should return as null on XClaim
Assert.Equal(1, cli.XDel(key1, id1));
var r6 = cli.XClaim(key1, "XClaim-group1", "XClaim-consumer2", 500, id1);
Assert.Empty(r6);
var r7 = cli.XGroupDelConsumer(key1, "XClaim-group1", "XClaim-consumer2");
Assert.Equal(1, r7);
}
[Fact]
public void XClaimJustId()
{
var key1 = "XClaimJustId1";
var key2 = "XClaimJustId2";
var key3 = "XClaimJustId3";
cli.Del(key1, key2, key3);
var id1 = cli.XAdd(key1, new Dictionary<string, object> { ["f1"] = "v1" });
cli.XGroupCreate(key1, "XClaimJustId-group1", "0");
var r2 = cli.XReadGroup("XClaimJustId-group1", "XClaimJustId-consumer1", 1, 1, false, key1, ">");
Assert.NotNull(r2);
Assert.Single(r2);
Assert.Single(r2[0].entries);
Assert.Equal(id1, r2[0].entries[0].id);
// Get the pending event
var r3 = cli.XPending(key1, "XClaimJustId-group1");
Assert.NotNull(r3);
Assert.Equal(id1, r3.maxId);
Assert.Equal(id1, r3.minId);
Assert.Equal(1, r3.count);
Assert.Single(r3.consumers);
Assert.Equal(1, r3.consumers[0].count);
Assert.Equal("XClaimJustId-consumer1", r3.consumers[0].consumer);
var r4 = cli.XPending(key1, "XClaimJustId-group1", "-", "+", 3, "XClaimJustId-consumer1");
Assert.Single(r4);
Assert.Equal(id1, r4[0].id);
Assert.Equal("XClaimJustId-consumer1", r4[0].consumer);
Assert.Equal(1, r4[0].deliveredTimes);
// Sleep for 1000ms so we can claim events pending for more than 500ms
Thread.Sleep(1000);
var r5 = cli.XClaimJustId(key1, "XClaimJustId-group1", "XClaimJustId-consumer2", 500, id1);
Assert.Single(r5);
Assert.Equal(id1, r5[0]);
// Deleted events should return as null on XClaim
Assert.Equal(1, cli.XDel(key1, id1));
var r6 = cli.XClaimJustId(key1, "XClaimJustId-group1", "XClaimJustId-consumer2", 500, id1);
Assert.Empty(r6);
var r66 = cli.XClaim(key1, "XClaimJustId-group1", "XClaimJustId-consumer2", 500, id1);
var r7 = cli.XGroupDelConsumer(key1, "XClaimJustId-group1", "XClaimJustId-consumer2");
Assert.Equal(1, r7);
}
[Fact]
public void XDel()
{
var key1 = "XDel1";
cli.Del(key1);
Assert.Equal(0, cli.XDel(key1, "1603636512916-0"));
Assert.Equal(0, cli.XDel(key1, "1603636512916-0", "1603636512911-0"));
}
[Fact]
public void XGroupCreate()
{
var key1 = "XGroupCreate1";
cli.Del(key1);
cli.XGroupCreate(key1, "XGroupCreate-group1", "0", true);
Assert.True(cli.XGroupDestroy(key1, "XGroupCreate-group1"));
}
[Fact]
public void XGroupSetId()
{
var key1 = "XGroupSetId1";
cli.Del(key1);
cli.XGroupCreate(key1, "XGroupSetId-group1", "0", true);
cli.XGroupSetId(key1, "XGroupSetId-group1", "$");
Assert.True(cli.XGroupDestroy(key1, "XGroupSetId-group1"));
}
[Fact]
public void XGroupDestroy()
{
var key1 = "XGroupDestroy1";
cli.Del(key1);
cli.XGroupCreate(key1, "XGroupDestroy-group1", "0", true);
Assert.True(cli.XGroupDestroy(key1, "XGroupDestroy-group1"));
}
//[Fact]
//public void XGroupCreateConsumer()
//{
// var key1 = "XGroupCreateConsumer1";
// cli.Del(key1);
// cli.XGroupCreate(key1, "XGroupCreateConsumer-group1", "0", true);
// cli.XGroupCreateConsumer(key1, "XGroupCreateConsumer-group1", "XGroupCreateConsumer-consumer1");
// Assert.True(cli.XGroupDestroy(key1, "XGroupCreateConsumer-group1"));
//}
[Fact]
public void XGroupDelConsumer()
{
var key1 = "XGroupDelConsumer1";
cli.Del(key1);
var id1 = cli.XAdd(key1, new Dictionary<string, object> { ["f1"] = "v1" });
cli.XGroupCreate(key1, "XGroupDelConsumer-group1", "0");
var r2 = cli.XReadGroup("XGroupDelConsumer-group1", "XGroupDelConsumer-consumer1", 1, 1, false, key1, ">");
Assert.NotNull(r2);
Assert.Single(r2);
Assert.Single(r2[0].entries);
Assert.Equal(id1, r2[0].entries[0].id);
var r7 = cli.XGroupDelConsumer(key1, "XGroupDelConsumer-group1", "XGroupDelConsumer-consumer1");
Assert.Equal(1, r7);
}
[Fact]
public void XInfoStream()
{
var key1 = "XInfoStream1";
cli.Del(key1);
var id1 = cli.XAdd(key1, new Dictionary<string, object> { ["f1"] = "v1" });
cli.XGroupCreate(key1, "XInfoStream1-group1", "0");
var r1 = cli.XInfoStream(key1);
Assert.NotNull(r1);
Assert.Equal(1, r1.length);
Assert.Equal(1, r1.radix_tree_keys);
Assert.Equal(2, r1.radix_tree_nodes);
Assert.Equal(id1, r1.last_generated_id);
Assert.Equal(1, r1.groups);
Assert.NotNull(r1.first_entry);
Assert.Equal(id1, r1.first_entry.id);
Assert.Equal("f1", r1.first_entry.fieldValues[0]?.ToString());
Assert.Equal("v1", r1.first_entry.fieldValues[1]?.ToString());
Assert.NotNull(r1.last_entry);
Assert.Equal(id1, r1.last_entry.id);
Assert.Equal("f1", r1.last_entry.fieldValues[0]?.ToString());
Assert.Equal("v1", r1.last_entry.fieldValues[1]?.ToString());
Assert.True(cli.XGroupDestroy(key1, "XInfoStream1-group1"));
cli.Del(key1);
cli.XGroupCreate(key1, "XInfoStream1-group1", "0", true);
var r2 = cli.XInfoStream(key1);
Assert.NotNull(r2);
Assert.Equal(0, r2.length);
Assert.Equal(0, r2.radix_tree_keys);
Assert.Equal(1, r2.radix_tree_nodes);
Assert.Equal("0-0", r2.last_generated_id);
Assert.Equal(1, r2.groups);
Assert.Null(r2.first_entry);
Assert.Null(r2.last_entry);
Assert.True(cli.XGroupDestroy(key1, "XInfoStream1-group1"));
}
[Fact]
public void XInfoGroups()
{
var key1 = "XInfoGroups1";
cli.Del(key1);
var id1 = cli.XAdd(key1, new Dictionary<string, object> { ["f1"] = "v1" });
cli.XGroupCreate(key1, "XInfoGroups1-group1", "0");
var r1 = cli.XReadGroup("XInfoGroups1-group1", "XInfoGroups1-consumer1", 1, 1, false, key1, ">");
Assert.NotNull(r1);
Assert.Single(r1);
Assert.Single(r1[0].entries);
Assert.Equal(id1, r1[0].entries[0].id);
var r2 = cli.XInfoGroups(key1);
Assert.Single(r2);
Assert.Equal("XInfoGroups1-group1", r2[0].name);
Assert.Equal(1, r2[0].consumers);
Assert.Equal(1, r2[0].pending);
Assert.Equal(id1, r2[0].last_delivered_id);
Assert.True(cli.XGroupDestroy(key1, "XInfoGroups1-group1"));
cli.Del(key1);
cli.XGroupCreate(key1, "XInfoGroups1-group1", "0", true);
var r3 = cli.XInfoGroups(key1);
Assert.Single(r3);
Assert.Equal("XInfoGroups1-group1", r3[0].name);
Assert.Equal(0, r3[0].consumers);
Assert.Equal(0, r3[0].pending);
Assert.Equal("0-0", r3[0].last_delivered_id);
Assert.True(cli.XGroupDestroy(key1, "XInfoGroups1-group1"));
}
[Fact]
public void XInfoConsumers()
{
var key1 = "XInfoConsumers1";
cli.Del(key1);
var id1 = cli.XAdd(key1, new Dictionary<string, object> { ["f1"] = "v1" });
cli.XGroupCreate(key1, "XInfoConsumers1-group1", "0");
var r1 = cli.XReadGroup("XInfoConsumers1-group1", "XInfoConsumers1-consumer1", 1, 1, false, key1, ">");
Assert.NotNull(r1);
Assert.Single(r1);
Assert.Single(r1[0].entries);
Assert.Equal(id1, r1[0].entries[0].id);
var r2 = cli.XInfoConsumers(key1, "XInfoConsumers1-group1");
Assert.Single(r2);
Assert.Equal("XInfoConsumers1-consumer1", r2[0].name);
Assert.Equal(1, r2[0].pending);
Assert.True(r2[0].idle > 0);
Assert.True(cli.XGroupDestroy(key1, "XInfoConsumers1-group1"));
cli.Del(key1);
cli.XGroupCreate(key1, "XInfoConsumers1-group1", "0", true);
var r3 = cli.XInfoConsumers(key1, "XInfoConsumers1-group1");
Assert.Empty(r3);
Assert.True(cli.XGroupDestroy(key1, "XInfoConsumers1-group1"));
}
[Fact]
public void XInfoStreamFull()
{
var key1 = "XInfoStreamFull1";
cli.Del(key1);
var id1 = cli.XAdd(key1, new Dictionary<string, object> { ["f1"] = "v1" });
cli.XGroupCreate(key1, "XInfoStreamFull1-group1", "0");
var r1 = cli.XReadGroup("XInfoStreamFull1-group1", "XInfoStreamFull1-consumer1", 1, 1, false, key1, ">");
Assert.NotNull(r1);
Assert.Single(r1);
Assert.Single(r1[0].entries);
Assert.Equal(id1, r1[0].entries[0].id);
var r2 = cli.XInfoStreamFull(key1, 11);
Assert.True(cli.XGroupDestroy(key1, "XInfoStreamFull1-group1"));
}
[Fact]
public void XLen()
{
var key1 = "XLen1";
cli.Del(key1);
cli.XAdd(key1, new Dictionary<string, object> { ["f1"] = "v1" });
cli.XAdd(key1, new Dictionary<string, object> { ["f1"] = "v1" });
cli.XAdd(key1, new Dictionary<string, object> { ["f1"] = "v1" });
Assert.Equal(3, cli.XLen(key1));
}
[Fact]
public void XPending()
{
var key1 = "XPending1";
var key2 = "XPending2";
var key3 = "XPending3";
cli.Del(key1, key2, key3);
var id1 = cli.XAdd(key1, new Dictionary<string, object> { ["f1"] = "v1" });
cli.XGroupCreate(key1, "XPending-group1", "0");
var r2 = cli.XReadGroup("XPending-group1", "XPending-consumer1", 1, 1, false, key1, ">");
Assert.NotNull(r2);
Assert.Single(r2);
Assert.Single(r2[0].entries);
Assert.Equal(id1, r2[0].entries[0].id);
// Get the pending event
var r3 = cli.XPending(key1, "XPending-group1");
Assert.NotNull(r3);
Assert.Equal(id1, r3.maxId);
Assert.Equal(id1, r3.minId);
Assert.Equal(1, r3.count);
Assert.Single(r3.consumers);
Assert.Equal(1, r3.consumers[0].count);
Assert.Equal("XPending-consumer1", r3.consumers[0].consumer);
var r4 = cli.XPending(key1, "XPending-group1", "-", "+", 3, "XPending-consumer1");
Assert.Single(r4);
Assert.Equal(id1, r4[0].id);
Assert.Equal("XPending-consumer1", r4[0].consumer);
Assert.Equal(1, r4[0].deliveredTimes);
// Sleep for 1000ms so we can claim events pending for more than 500ms
Thread.Sleep(1000);
}
[Fact]
public void XRange()
{
var key1 = "XRange1";
cli.Del(key1);
var r1 = cli.XRange(key1, null, null);
Assert.Empty(r1);
var id1 = cli.XAdd(key1, new Dictionary<string, object> { ["f1"] = "v1" });
var id2 = cli.XAdd(key1, new Dictionary<string, object> { ["f1"] = "v1" });
var r2 = cli.XRange(key1, null, null, 3);
Assert.Equal(2, r2.Length);
Assert.Equal(id1, r2[0].id);
var r3 = cli.XRange(key1, id1, null, 3);
Assert.Equal(2, r3.Length);
Assert.Equal(id1, r3[0].id);
var r4 = cli.XRange(key1, id1, id2, 1);
Assert.Single(r4);
Assert.Equal(id1, r4[0].id);
var r5 = cli.XRange(key1, id1, id2, 2);
Assert.Equal(2, r5.Length);
Assert.Equal(id1, r5[0].id);
var r6 = cli.XRange(key1, id2, null, 4);
Assert.Single(r6);
Assert.Equal(id2, r6[0].id);
var id3 = cli.XAdd(key1, new Dictionary<string, object> { ["f1"] = "v1" });
var r7 = cli.XRange(key1, id2, id2, 4);
Assert.Single(r7);
Assert.Equal(id2, r7[0].id);
}
[Fact]
public void XRevRange()
{
var key1 = "XRevRange1";
cli.Del(key1);
var r1 = cli.XRevRange(key1, null, null);
Assert.Empty(r1);
var id1 = cli.XAdd(key1, new Dictionary<string, object> { ["f1"] = "v1" });
var id2 = cli.XAdd(key1, new Dictionary<string, object> { ["f1"] = "v1" });
var r2 = cli.XRevRange(key1, null, null, 3);
Assert.Equal(2, r2.Length);
Assert.Equal(id2, r2[0].id);
var r3 = cli.XRevRange(key1, id2, null, 3);
Assert.Equal(2, r3.Length);
Assert.Equal(id2, r3[0].id);
var r4 = cli.XRevRange(key1, id2, id1, 1);
Assert.Single(r4);
Assert.Equal(id2, r4[0].id);
var r5 = cli.XRevRange(key1, id2, id1, 2);
Assert.Equal(2, r5.Length);
Assert.Equal(id2, r5[0].id);
var r6 = cli.XRevRange(key1, id1, null, 4);
Assert.Single(r6);
Assert.Equal(id1, r6[0].id);
var id3 = cli.XAdd(key1, new Dictionary<string, object> { ["f1"] = "v1" });
var r7 = cli.XRevRange(key1, id1, id1, 4);
Assert.Single(r7);
Assert.Equal(id1, r7[0].id);
}
[Fact]
public void XRead()
{
var key1 = "XRead1";
cli.Del(key1);
// Empty Stream
Assert.Null(cli.XRead(0, key1, "0"));
Assert.Empty(cli.XRead(1, 0, key1, "0"));
var id1 = cli.XAdd(key1, new Dictionary<string, object> { ["f1"] = "v1" });
var id2 = cli.XAdd(key1, new Dictionary<string, object> { ["f1"] = "v1" });
// Read only a single Stream
var r1 = cli.XRead(0, key1, "0");
Assert.NotNull(r1);
Assert.Equal(id1, r1.id);
var r2 = cli.XRead(1, 0, key1, "0");
Assert.Single(r2);
Assert.Single(r2[0].entries);
Assert.Equal(id1, r2[0].entries[0].id);
// Read from two Streams
var r3 = cli.XRead(2, 0, key1, "0");
Assert.Single(r3);
Assert.Equal(2, r3[0].entries.Length);
Assert.Equal(id1, r3[0].entries[0].id);
Assert.Equal(id2, r3[0].entries[1].id);
}
[Fact]
public void XReadGroup()
{
var key1 = "XReadGroup1";
cli.Del(key1);
var id1 = cli.XAdd(key1, new Dictionary<string, object> { ["f1"] = "v1" });
cli.XGroupCreate(key1, "XReadGroup-group1", "0");
var r2 = cli.XReadGroup("XReadGroup-group1", "XReadGroup-consumer1", 1, 1, false, key1, ">");
Assert.NotNull(r2);
Assert.Single(r2);
Assert.Single(r2[0].entries);
Assert.Equal(id1, r2[0].entries[0].id);
}
[Fact]
public void XTrim()
{
var key1 = "XTrim1";
cli.Del(key1);
cli.XAdd(key1, new Dictionary<string, object> { ["f1"] = "v1" });
cli.XAdd(key1, new Dictionary<string, object> { ["f1"] = "v1" });
cli.XAdd(key1, new Dictionary<string, object> { ["f1"] = "v1" });
cli.XAdd(key1, new Dictionary<string, object> { ["f1"] = "v1" });
cli.XAdd(key1, new Dictionary<string, object> { ["f1"] = "v1" });
Assert.Equal(5, cli.XLen(key1));
Assert.Equal(2, cli.XTrim(key1, 3));
Assert.Equal(3, cli.XLen(key1));
}
}
}
|
2881099/FreeRedis | 2,426 | test/Unit/FreeRedis.Tests/RedisClientTests/HyperLogLogTests.cs | using Newtonsoft.Json;
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Text;
using System.Threading;
using Xunit;
namespace FreeRedis.Tests.RedisClientTests
{
public class HyperLogLogTests : TestBase
{
[Fact]
public void PfAdd()
{
var key1 = "PfAdd1";
cli.Del(key1);
Assert.True(cli.PfAdd(key1, "a", "b", "c", "d", "e", "f", "g"));
Assert.False(cli.PfAdd(key1, "a", "b", "c", "d", "e", "f", "g"));
Assert.True(cli.PfAdd(key1, "a", "b", "c", "d", "e", "f", "g", "h"));
}
[Fact]
public void PfCount()
{
var key1 = "PfCount1";
cli.Del(key1);
Assert.True(cli.PfAdd(key1, "a", "b", "c", "d", "e", "f", "g"));
Assert.Equal(7, cli.PfCount(key1));
var key2 = "PfCount2";
cli.Del(key2);
Assert.True(cli.PfAdd(key2, "foo", "bar", "zap"));
Assert.False(cli.PfAdd(key2, "zap", "zap", "zap"));
Assert.False(cli.PfAdd(key2, "foo", "bar"));
Assert.Equal(3, cli.PfCount(key2));
Assert.Equal(10, cli.PfCount(key1, key2));
Assert.Equal(10, cli.PfCount(key1, key2, Guid.NewGuid().ToString()));
Assert.Equal(10, cli.PfCount(Guid.NewGuid().ToString(), key1, key2, Guid.NewGuid().ToString()));
}
[Fact]
public void PfMerge()
{
var key1 = "PfMerge1";
cli.Del(key1);
Assert.True(cli.PfAdd(key1, "foo", "bar", "zap", "a"));
Assert.Equal(4, cli.PfCount(key1));
var key2 = "PfMerge2";
cli.Del(key2);
Assert.True(cli.PfAdd(key2, "a", "b", "c", "foo"));
Assert.Equal(4, cli.PfCount(key2));
var key3 = "PfMerge3";
cli.Del(key3);
cli.PfMerge(key3, key1, key2);
Assert.Equal(6, cli.PfCount(key3));
cli.PfMerge(key3, key1, key2, key3);
Assert.Equal(6, cli.PfCount(key3));
cli.Del(key3);
cli.PfMerge(key3, Guid.NewGuid().ToString());
Assert.Equal(0, cli.PfCount(key3));
cli.Del(key3);
cli.PfMerge(key3, key1);
Assert.Equal(4, cli.PfCount(key3));
cli.PfMerge(key3, key2);
Assert.Equal(6, cli.PfCount(key3));
}
}
}
|
2881099/FreeRedis | 9,622 | test/Unit/FreeRedis.Tests/RedisClientTests/HashesTests.cs | using Newtonsoft.Json;
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using Xunit;
namespace FreeRedis.Tests.RedisClientTests
{
public class HashesTests : TestBase
{
[Fact]
public void HDel()
{
cli.Del("TestHDel");
cli.HMSet("TestHDel", "string1", base.String, "bytes1", base.Bytes, "class1", base.Class);
Assert.Equal(3, cli.HDel("TestHDel", "string1", "bytes1", "class1"));
}
[Fact]
public void HExists()
{
cli.Del("TestHExists");
Assert.False(cli.HExists("TestHExists", "null1"));
Assert.Equal(1, cli.HSet("TestHExists", "null1", 1));
Assert.True(cli.HExists("TestHExists", "null1"));
Assert.Equal(1, cli.HDel("TestHExists", "null1"));
Assert.False(cli.HExists("TestHExists", "null1"));
}
[Fact]
public void HGet()
{
cli.Del("TestHGet");
cli.HMSet("TestHGet", "null1", base.Null, "string1", base.String, "bytes1", base.Bytes, "class1", base.Class, "class1array", new[] { base.Class, base.Class });
Assert.Equal(cli.HGet("TestHGet", "null1")?.ToString() ?? "", base.Null?.ToString() ?? "");
Assert.Equal(cli.HGet("TestHGet", "string1"), base.String);
Assert.Equal(cli.HGet<byte[]>("TestHGet", "bytes1"), base.Bytes);
Assert.Equal(cli.HGet<TestClass>("TestHGet", "class1")?.ToString(), base.Class.ToString());
Assert.Equal(2, cli.HGet<TestClass[]>("TestHGet", "class1array")?.Length);
Assert.Equal(cli.HGet<TestClass[]>("TestHGet", "class1array")?.First().ToString(), base.Class.ToString());
Assert.Equal(cli.HGet<TestClass[]>("TestHGet", "class1array")?.Last().ToString(), base.Class.ToString());
}
[Fact]
public void HGetAll()
{
cli.Del("TestHGetAll");
cli.HMSet("TestHGetAll", "string1", base.String, "bytes1", base.Bytes, "class1", base.Class, "class1array", new[] { base.Class, base.Class });
Assert.Equal(4, cli.HGetAll("TestHGetAll").Count);
Assert.Equal(base.String, cli.HGetAll("TestHGetAll")["string1"]);
Assert.Equal(Encoding.UTF8.GetString(base.Bytes), cli.HGetAll("TestHGetAll")["bytes1"]);
Assert.Equal(JsonConvert.SerializeObject(base.Class), cli.HGetAll("TestHGetAll")["class1"]);
}
[Fact]
public void HIncrBy()
{
cli.Del("TestHIncrBy");
cli.HMSet("TestHIncrBy", "null1", base.Null, "string1", base.String, "bytes1", base.Bytes, "class1", base.Class, "class1array", new[] { base.Class, base.Class });
Assert.Equal(1, cli.HIncrBy("TestHIncrBy", "null112", 1));
Assert.Throws<RedisServerException>(() => cli.HIncrBy("TestHIncrBy", "string1", 1));
Assert.Throws<RedisServerException>(() => cli.HIncrBy("TestHIncrBy", "bytes1", 1));
Assert.Equal(2, cli.HIncrBy("TestHIncrBy", "null112", 1));
Assert.Equal(12, cli.HIncrBy("TestHIncrBy", "null112", 10));
}
[Fact]
public void HIncrByFloat()
{
cli.Del("TestHIncrByFloat");
cli.HMSet("TestHIncrByFloat", "null1", base.Null, "string1", base.String, "bytes1", base.Bytes, "class1", base.Class, "class1array", new[] { base.Class, base.Class });
Assert.Equal(0.5m, cli.HIncrByFloat("TestHIncrByFloat", "null112", 0.5m));
Assert.Throws<RedisServerException>(() => cli.HIncrByFloat("TestHIncrByFloat", "string1", 1.5m));
Assert.Throws<RedisServerException>(() => cli.HIncrByFloat("TestHIncrByFloat", "bytes1", 5));
Assert.Equal(3.8m, cli.HIncrByFloat("TestHIncrByFloat", "null112", 3.3m));
Assert.Equal(14.0m, cli.HIncrByFloat("TestHIncrByFloat", "null112", 10.2m));
}
[Fact]
public void HKeys()
{
cli.Del("HKeys");
cli.HMSet("TestHKeys", "string1", base.String, "bytes1", base.Bytes, "class1", base.Class, "class1array", new[] { base.Class, base.Class });
Assert.Equal(4, cli.HKeys("TestHKeys").Length);
Assert.Contains("string1", cli.HKeys("TestHKeys"));
Assert.Contains("bytes1", cli.HKeys("TestHKeys"));
Assert.Contains("class1", cli.HKeys("TestHKeys"));
Assert.Contains("class1array", cli.HKeys("TestHKeys"));
}
[Fact]
public void HLen()
{
cli.Del("HLen");
cli.HMSet("TestHLen", "string1", base.String, "bytes1", base.Bytes, "class1", base.Class, "class1array", new[] { base.Class, base.Class });
Assert.Equal(4, cli.HLen("TestHLen"));
}
[Fact]
public void HMGet()
{
cli.Del("TestHMGet");
cli.HMSet("TestHMGet", "string1", base.String, "bytes1", base.Bytes, "class1", base.Class, "class1array", new[] { base.Class, base.Class });
cli.HMSet("TestHMGet", "string2", base.String, "bytes2", base.Bytes, "class2", base.Class, "class2array", new[] { base.Class, base.Class });
Assert.Equal(2, cli.HMGet("TestHMGet", "string1", "string2").Length);
Assert.Contains(base.String, cli.HMGet("TestHMGet", "string1", "string2"));
Assert.Equal(2, cli.HMGet<TestClass>("TestHMGet", "class1", "class2").Length);
Assert.Contains(base.Class.ToString(), cli.HMGet<TestClass>("TestHMGet", "class1", "class2")?.Select(a => a.ToString()));
}
[Fact]
public void HMSet()
{
cli.Del("TestHMSet");
cli.HMSet("TestHMSet", "string1", base.String, "bytes1", base.Bytes, "class1", base.Class, "class1array", new[] { base.Class, base.Class });
Assert.Equal(4, cli.HMGet("TestHMSet", "string1", "bytes1", "class1", "class1array").Length);
Assert.Contains(base.String, cli.HMGet("TestHMSet", "string1", "bytes1", "class1", "class1array"));
Assert.Contains(Encoding.UTF8.GetString(base.Bytes), cli.HMGet("TestHMSet", "string1", "bytes1", "class1", "class1array"));
Assert.Contains(JsonConvert.SerializeObject(base.Class), cli.HMGet("TestHMSet", "string1", "bytes1", "class1", "class1array"));
}
[Fact]
public void HScan()
{
}
[Fact]
public void HSet()
{
cli.Del("TestHSet");
Assert.Equal(1, cli.HSet("TestHSet", "string1", base.String));
Assert.Equal(base.String, cli.HGet("TestHSet", "string1"));
Assert.Equal(1, cli.HSet("TestHSet", "bytes1", base.Bytes));
Assert.Equal(base.Bytes, cli.HGet<byte[]>("TestHSet", "bytes1"));
Assert.Equal(1, cli.HSet("TestHSet", "class1", base.Class));
Assert.Equal(base.Class.ToString(), cli.HGet<TestClass>("TestHSet", "class1").ToString());
}
[Fact]
public void HSetNx()
{
cli.Del("TestHSetNx");
Assert.Equal(1, cli.HSet("TestHSetNx", "string1", base.String));
Assert.Equal(base.String, cli.HGet("TestHSetNx", "string1"));
Assert.Equal(0, cli.HSet("TestHSetNx", "string1", base.String));
Assert.Equal(1, cli.HSet("TestHSetNx", "bytes1", base.Bytes));
Assert.Equal(base.Bytes, cli.HGet<byte[]>("TestHSetNx", "bytes1"));
Assert.Equal(0, cli.HSet("TestHSetNx", "bytes1", base.Bytes));
Assert.Equal(1, cli.HSet("TestHSetNx", "class1", base.Class));
Assert.Equal(base.Class.ToString(), cli.HGet<TestClass>("TestHSetNx", "class1").ToString());
Assert.Equal(0, cli.HSet("TestHSetNx", "class1", base.Class));
}
[Fact]
public void HStrLen()
{
cli.Del("HStrLen1");
cli.HMSet("HStrLen1", "f1", 123, "f2", 2222);
Assert.Equal(3, cli.HStrLen("HStrLen1", "f1"));
Assert.Equal(4, cli.HStrLen("HStrLen1", "f2"));
Assert.Equal(0, cli.HStrLen("HStrLen1", "f3"));
}
[Fact]
public void HVals()
{
cli.Del("TestHVals1", "TestHVals2", "TestHVals3", "TestHVals4", "TestHVals5");
cli.HMSet("TestHVals1", "string1", base.String, "bytes1", base.Bytes, "class1", base.Class, "class1array1", new[] { base.Class, base.Class });
cli.HMSet("TestHVals1", "string2", base.String, "bytes2", base.Bytes, "class2", base.Class, "class2array2", new[] { base.Class, base.Class });
Assert.Equal(8, cli.HVals("TestHVals1").Length);
cli.HMSet("TestHVals2", "string1", base.String, "string2", base.String);
Assert.Equal(2, cli.HVals("TestHVals2").Length);
Assert.Contains(base.String, cli.HVals("TestHVals2"));
cli.HMSet("TestHVals3", "bytes1", base.Bytes, "bytes2", base.Bytes);
Assert.Equal(2, cli.HVals<byte[]>("TestHVals3").Length);
Assert.Contains(base.Bytes, cli.HVals<byte[]>("TestHVals3"));
cli.HMSet("TestHVals4", "class1", base.Class, "class2", base.Class);
Assert.Equal(2, cli.HVals<TestClass>("TestHVals4").Length);
Assert.Contains(base.Class.ToString(), cli.HVals<TestClass>("TestHVals4").Select(a => a.ToString()));
cli.HMSet("TestHVals5", "class2array1", new[] { base.Class, base.Class }, "class2array2", new[] { base.Class, base.Class });
Assert.Equal(2, cli.HVals<TestClass[]>("TestHVals5").Length);
}
}
}
|
2881099/FreeRedis | 7,599 | test/Unit/FreeRedis.Tests/RedisClientTests/ServerTests.cs | using Newtonsoft.Json;
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Text;
using Xunit;
namespace FreeRedis.Tests.RedisClientTests.Other
{
public class ServerTests : TestBase
{
[Fact]
public void AclCat()
{
var r1 = cli.AclCat();
Assert.NotEmpty(r1);
var r2 = cli.AclCat("scripting");
Assert.NotEmpty(r2);
Assert.Equal("ERR Unknown category 'testcategory'", Assert.Throws<RedisServerException>(() => cli.AclCat("testcategory"))?.Message);
}
[Fact]
public void AclDelUser()
{
var key1 = "AclDelUser1";
var key2 = "AclDelUser2";
cli.AclSetUser(key1);
cli.AclSetUser(key2);
var r1 = cli.AclList();
var r2 = cli.AclDelUser(key1, key2);
Assert.Equal(2, r2);
var r3 = cli.AclList();
Assert.Equal(r1.Length - 2, r3.Length);
}
[Fact]
public void AclGenPass()
{
Assert.Equal("dd721260bfe1b3d9601e7fbab36de6d04e2e67b0ef1c53de59d45950db0dd3cc".Length, cli.AclGenPass().Length);
Assert.Equal("355ef3dd".Length, cli.AclGenPass(32).Length);
Assert.Equal("90".Length, cli.AclGenPass(5).Length);
}
[Fact]
public void AclGetUser()
{
var key1 = "AclGetUser1";
var r1 = cli.AclGetUser();
Assert.NotNull(r1);
Assert.NotEmpty(r1.flags);
Assert.True(!string.IsNullOrWhiteSpace(r1.commands));
Assert.NotEmpty(r1.keys);
cli.AclDelUser(key1);
cli.AclSetUser(key1);
var r2 = cli.AclGetUser(key1);
Assert.NotNull(r1);
Assert.Single(r2.flags);
Assert.Equal("off", r2.flags[0]);
Assert.Empty(r2.passwords);
Assert.Empty(r2.keys);
cli.AclSetUser(key1, "reset", "+@all", "~*", "-@string", "+incr", "-debug", "+debug|digest");
var r3 = cli.AclGetUser(key1);
Assert.NotNull(r3);
Assert.Contains("+@all", r3.commands);
Assert.Contains("-@string", r3.commands);
Assert.Contains("+debug|digest", r3.commands);
cli.AclDelUser(key1);
}
[Fact]
public void AclSetUser()
{
var key1 = "AclSetUser1";
cli.AclDelUser(key1);
cli.AclSetUser(key1, ">123456");
using (var sh = cli.GetDatabase())
{
Assert.Equal("WRONGPASS invalid username-password pair", Assert.Throws<RedisServerException>(() => sh.Auth(key1, "123456"))?.Message);
}
cli.AclSetUser(key1, "on", "+acl");
using (var sh = cli.GetDatabase())
{
sh.Auth(key1, "123456");
var r1 = sh.AclWhoami();
Assert.Equal(key1, r1);
sh.Quit();
}
cli.AclSetUser(key1, "<123456");
cli.AclDelUser(key1);
}
[Fact]
public void AclUsers()
{
var key1 = "AclUsers1";
var r1 = cli.AclUsers();
Assert.True(r1.Length > 0);
cli.AclSetUser(key1);
Assert.Equal(2, cli.AclUsers().Length);
var r2 = cli.AclUsers();
Assert.Contains(r2, a => a == key1);
Assert.Equal(1, cli.AclDelUser(key1));
}
[Fact]
public void AclWhoami()
{
Assert.Equal("default", cli.AclWhoami());
}
[Fact]
public void BgRewriteAof()
{
var r1 = cli.BgRewriteAof();
}
[Fact]
public void BgSave()
{
//var r1 = cli.BgSave();
}
[Fact]
public void Command()
{
var r1 = cli.Command();
}
[Fact]
public void CommandCount()
{
var r1 = cli.CommandCount();
Assert.True(r1 > 0);
}
[Fact]
public void CommandGetKeys()
{
var r1 = cli.CommandGetKeys("set", "key1", "val1");
Assert.Single(r1);
Assert.Equal("key1", r1[0]);
}
[Fact]
public void CommandInfo()
{
var r1 = cli.CommandInfo("get", "set", "hset");
Assert.Equal(3, r1.Length);
}
[Fact]
public void ConfigGet()
{
var r1 = cli.ConfigGet("*max-*-entries*");
Assert.NotEmpty(r1);
}
[Fact]
public void ConfigResetStat()
{
cli.ConfigResetStat();
}
[Fact]
public void ConfigRewrite()
{
//cli.ConfigRewrite();
}
[Fact]
public void ConfigSet()
{
//cli.ConfigSet("hash-max-zipmap-entries", 512);
}
[Fact]
public void DbSize()
{
var key1 = "DbSize1";
cli.Set(key1, Guid.NewGuid());
Assert.True(cli.DbSize() > 0);
}
[Fact]
public void DebugObject()
{
var key1 = "DebugObject1";
cli.Set(key1, Guid.NewGuid());
Assert.NotNull(cli.DebugObject(key1));
}
[Fact]
public void DebugSegfault()
{
//using (var sh = cli.GetShareClient())
//{
// cli.DebugSegfault();
// var r1 = sh.AclWhoami();
//}
}
[Fact]
public void FlushAll()
{
RedisScopeExecHelper.ExecScope("redis_flush", (cli) =>
{
using (var sh = cli.GetDatabase(7))
{
cli.FlushAll(true);
cli.FlushAll(false);
}
});
}
[Fact]
public void FlushDb()
{
//using (var sh = cli.GetDatabase(7))
//{
// cli.FlushDb(true);
// cli.FlushDb(false);
//}
}
[Fact]
public void Info()
{
var r1 = cli.Info();
var r2 = cli.Info("server");
Assert.NotNull(r1);
Assert.NotNull(r2);
}
[Fact]
public void LastSave()
{
cli.Save();
var r1 = cli.LastSave();
}
[Fact]
public void LatencyDoctor()
{
var r1 = cli.LatencyDoctor();
Assert.NotNull(r1);
}
[Fact]
public void MemoryDoctor()
{
var r1 = cli.LatencyDoctor();
Assert.NotNull(r1);
}
[Fact]
public void MemoryMallocStats()
{
var r1 = cli.LatencyDoctor();
Assert.NotNull(r1);
}
[Fact]
public void MemoryPurge()
{
cli.MemoryPurge();
}
[Fact]
public void MemoryStats()
{
var r1 = cli.MemoryStats();
Assert.NotEmpty(r1);
}
[Fact]
public void MemoryUsage()
{
var key1 = "MemoryUsage1";
cli.Set(key1, "123");
var r1 = cli.MemoryUsage(key1);
Assert.True(r1 > 0);
}
[Fact]
public void Role()
{
var r1 = cli.Role();
Assert.NotNull(r1);
//Assert.Equal(RoleType.Master, r1.role);
}
}
}
|
2881099/FreeRedis | 6,712 | test/Unit/FreeRedis.Tests/RedisClientTests/SetsTests.cs | using Newtonsoft.Json;
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Text;
using System.Threading;
using Xunit;
namespace FreeRedis.Tests.RedisClientTests
{
public class SetsTests : TestBase
{
[Fact]
public void SAdd()
{
cli.Del("TestSAdd1");
Assert.Equal(1, cli.SAdd("TestSAdd1", String));
Assert.Equal(String, cli.SPop("TestSAdd1"));
Assert.Equal(4, cli.SAdd("TestSAdd1", Null, Class, String, Bytes));
cli.SPop("TestSAdd1");
Assert.Equal(3, cli.SCard("TestSAdd1"));
cli.Del("TestSAdd2");
Assert.Equal(1, cli.SAdd("TestSAdd2", Class));
Assert.Equal(JsonConvert.SerializeObject(Class), JsonConvert.SerializeObject(cli.SPop<TestClass>("TestSAdd2")));
Assert.Equal(1, cli.SAdd("TestSAdd2", Class));
Assert.Equal(JsonConvert.SerializeObject(Class), cli.SPop("TestSAdd2"));
}
[Fact]
public void SCard()
{
cli.Del("TestSCard1");
Assert.Equal(4, cli.SAdd("TestSCard1", Null, Class, String, Bytes));
Assert.Equal(4, cli.SCard("TestSCard1"));
}
[Fact]
public void SDiff()
{
cli.Del("TestSDiff1", "TestSDiff2");
Assert.Equal(2, cli.SAdd("TestSDiff1", String, Class));
Assert.Equal(2, cli.SAdd("TestSDiff2", Null, Bytes));
Assert.Equal(2, cli.SDiff("TestSDiff1", "TestSDiff2").Length);
}
[Fact]
public void SDiffStore()
{
cli.Del("TestSDiffStore1", "TestSDiffStore2", "TestSDiffStore3");
Assert.Equal(2, cli.SAdd("TestSDiffStore1", String, Class));
Assert.Equal(2, cli.SAdd("TestSDiffStore2", Null, Bytes));
Assert.Equal(2, cli.SDiffStore("TestSDiffStore3", "TestSDiffStore1", "TestSDiffStore2"));
Assert.Equal(2, cli.SCard("TestSDiffStore3"));
}
[Fact]
public void SInter()
{
cli.Del("TestSInter1", "TestSInter2");
Assert.Equal(4, cli.SAdd("TestSInter1", Null, Class, String, Bytes));
Assert.Equal(2, cli.SAdd("TestSInter2", Null, Null, String, String));
Assert.Equal(2, cli.SInter("TestSInter1", "TestSInter2").Length);
}
[Fact]
public void SInterStore()
{
cli.Del("TestSInterStore1", "TestSInterStore2", "TestSInterStore3");
Assert.Equal(4, cli.SAdd("TestSInterStore1", Null, Class, String, Bytes));
Assert.Equal(2, cli.SAdd("TestSInterStore2", Null, Null, String, String));
Assert.Equal(2, cli.SInterStore("TestSInterStore3", "TestSInterStore1", "TestSInterStore2"));
Assert.Equal(2, cli.SCard("TestSInterStore3"));
}
[Fact]
public void SIsMember()
{
cli.Del("TestSIsMember1");
Assert.Equal(4, cli.SAdd("TestSIsMember1", Null, Class, String, Bytes));
Assert.True(cli.SIsMember("TestSIsMember1", Null));
Assert.True(cli.SIsMember("TestSIsMember1", String));
Assert.True(cli.SIsMember("TestSIsMember1", Bytes));
Assert.True(cli.SIsMember("TestSIsMember1", Class));
Assert.Equal(4, cli.SCard("TestSIsMember1"));
}
[Fact]
public void SMembers()
{
cli.Del("TestSMeMembers1");
Assert.Equal(4, cli.SAdd("TestSMeMembers1", Null, Class, String, Bytes));
Assert.Equal(4, cli.SMembers("TestSMeMembers1").Length);
}
[Fact]
public void SMove()
{
cli.Del("TestSMove1", "TestSMove2");
Assert.Equal(4, cli.SAdd("TestSMove1", Null, Class, String, Bytes));
Assert.True(cli.SMove("TestSMove1", "TestSMove2", Class));
Assert.False(cli.SMove("TestSMove1", "TestSMove2", "123123123xxxdx123"));
Assert.Equal(1, cli.SCard("TestSMove2"));
Assert.Equal(Class.ToString(), cli.SPop<TestClass>("TestSMove2").ToString());
}
[Fact]
public void SPop()
{
cli.Del("TestSPop1");
Assert.Null(cli.SPop("TestSPop1"));
Assert.Equal(4, cli.SAdd("TestSPop1", Null, Null, String, String, Class, Class, Bytes, Bytes));
cli.SPop("TestSPop1");
cli.SPop<byte[]>("TestSPop1");
cli.SPop<byte[]>("TestSPop1");
cli.SPop<byte[]>("TestSPop1");
cli.SPop<byte[]>("TestSPop1");
cli.SPop<byte[]>("TestSPop1");
cli.SPop<byte[]>("TestSPop1");
cli.SPop<byte[]>("TestSPop1");
cli.SPop<byte[]>("TestSPop1");
cli.SPop<byte[]>("TestSPop1");
Assert.Null(cli.SPop("TestSPop1"));
}
[Fact]
public void SRandMember()
{
cli.Del("TestSRandMember1");
Assert.Equal(3, cli.SAdd("TestSRandMember1", String, String, Bytes, Bytes, Class, Class));
Assert.NotNull(cli.SPop("TestSRandMember1"));
Assert.NotNull(cli.SPop("TestSRandMember1"));
Assert.NotNull(cli.SPop("TestSRandMember1"));
Assert.Null(cli.SPop("TestSRandMember1"));
}
[Fact]
public void SRem()
{
cli.Del("TestSRem1");
Assert.Equal(4, cli.SAdd("TestSRem1", Null, Class, String, Bytes));
Assert.Equal(4, cli.SRem("TestSRem1", Null, String, Bytes, Class));
Assert.Null(cli.SPop("TestSRem1"));
}
[Fact]
public void SScan()
{
cli.Del("TestSScan1");
Assert.Equal(4, cli.SAdd("TestSScan1", Null, Class, String, Bytes));
Assert.Equal(4, cli.SScan("TestSScan1", 0, "*", 10).items.Length);
}
[Fact]
public void SUnion()
{
cli.Del("TestSUnion1", "TestSUnion2");
Assert.Equal(2, cli.SAdd("TestSUnion1", Bytes, Bytes, Class, Class));
Assert.Equal(2, cli.SAdd("TestSUnion2", Null, Null, String, String));
Assert.Equal(4, cli.SUnion("TestSUnion1", "TestSUnion2").Length);
}
[Fact]
public void SUnionStore()
{
cli.Del("TestSUnionStore1", "TestSUnionStore2", "TestSUnionStore3");
Assert.Equal(2, cli.SAdd("TestSUnionStore1", Bytes, Bytes, Class, Class));
Assert.Equal(2, cli.SAdd("TestSUnionStore2", Null, Null, String, String));
Assert.Equal(4, cli.SUnionStore("TestSUnionStore3", "TestSUnionStore1", "TestSUnionStore2"));
Assert.Equal(4, cli.SCard("TestSUnionStore3"));
}
}
}
|
Subsets and Splits
PyTorch Neural Network Imports
This query filters for code examples containing a specific PyTorch import pattern, which is useful for finding code snippets that use PyTorch's neural network module but doesn't provide deeper analytical insights about the dataset.
HTML Files in Train Set
Retrieves all records from the dataset where the file path ends with .html or .htm, providing a basic filter for HTML files.
SQL Console for nick007x/github-code-2025
Retrieves 200 file paths that end with '.html' or '.htm', providing a basic overview of HTML files in the dataset.
Top HTML Files
The query retrieves a sample of HTML file paths, providing basic filtering but limited analytical value.
CSharp Repositories Excluding Unity
Retrieves all records for repositories that contain C# files but are not related to Unity, providing a basic filter of the dataset.
C# File Count per Repository
Counts the total number of C# files across distinct repositories, providing a basic measure of C# file presence.
SQL Console for nick007x/github-code-2025
Lists unique repository IDs containing C# files, providing basic filtering to understand which repositories have C# code.
Select Groovy Files: Train Set
Retrieves the first 1000 entries from the 'train' dataset where the file path ends with '.groovy', providing a basic sample of Groovy files.
GitHub Repos with WiFiClientSecure
Finds specific file paths in repositories that contain particular code snippets related to WiFiClientSecure and ChatGPT, providing basic filtering of relevant files.