Commit ·
67d7823
1
Parent(s): 0cb6726
flash-linear-attention
Browse files
PyTorchConference2025_GithubRepos.json
CHANGED
|
@@ -1344,5 +1344,10 @@
|
|
| 1344 |
"repo_link": "https://github.com/triton-lang/triton-windows",
|
| 1345 |
"github_about_section": "Triton with Windows support",
|
| 1346 |
"homepage_link": "https://triton-lang.org"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1347 |
}
|
| 1348 |
]
|
|
|
|
| 1344 |
"repo_link": "https://github.com/triton-lang/triton-windows",
|
| 1345 |
"github_about_section": "Triton with Windows support",
|
| 1346 |
"homepage_link": "https://triton-lang.org"
|
| 1347 |
+
},
|
| 1348 |
+
{
|
| 1349 |
+
"repo_name": "flash-linear-attention",
|
| 1350 |
+
"repo_link": "https://github.com/fla-org/flash-linear-attention",
|
| 1351 |
+
"github_about_section": "Efficient implementations of state-of-the-art linear attention models"
|
| 1352 |
}
|
| 1353 |
]
|