Commit
·
c4bdc09
1
Parent(s):
0bfbd08
aiter, LMCache, Mooncake, torchtitan and ao
Browse files
PyTorchConference2025_GithubRepos.json
CHANGED
|
@@ -302,6 +302,7 @@
|
|
| 302 |
{
|
| 303 |
"repo_name": "aiter",
|
| 304 |
"repo_link": "https://github.com/ROCm/aiter",
|
|
|
|
| 305 |
"github_about_section": "AI Tensor Engine for ROCm",
|
| 306 |
"homepage_link": "https://rocm.blogs.amd.com/software-tools-optimization/aiter-ai-tensor-engine/README.html",
|
| 307 |
"contributors_all": 151,
|
|
@@ -312,6 +313,7 @@
|
|
| 312 |
{
|
| 313 |
"repo_name": "LMCache",
|
| 314 |
"repo_link": "https://github.com/LMCache/LMCache",
|
|
|
|
| 315 |
"github_about_section": "Supercharge Your LLM with the Fastest KV Cache Layer",
|
| 316 |
"homepage_link": "https://lmcache.ai",
|
| 317 |
"contributors_all": 152,
|
|
@@ -333,6 +335,7 @@
|
|
| 333 |
{
|
| 334 |
"repo_name": "Mooncake",
|
| 335 |
"repo_link": "https://github.com/kvcache-ai/Mooncake",
|
|
|
|
| 336 |
"github_about_section": "Mooncake is the serving platform for Kimi, a leading LLM service provided by Moonshot AI.",
|
| 337 |
"homepage_link": "https://kvcache-ai.github.io/Mooncake",
|
| 338 |
"github_topic_closest_fit": "inference",
|
|
@@ -344,7 +347,9 @@
|
|
| 344 |
{
|
| 345 |
"repo_name": "torchtitan",
|
| 346 |
"repo_link": "https://github.com/pytorch/torchtitan",
|
|
|
|
| 347 |
"github_about_section": "A PyTorch native platform for training generative AI models",
|
|
|
|
| 348 |
"contributors_all": 145,
|
| 349 |
"contributors_2025": 119,
|
| 350 |
"contributors_2024": 43,
|
|
@@ -353,6 +358,7 @@
|
|
| 353 |
{
|
| 354 |
"repo_name": "ao",
|
| 355 |
"repo_link": "https://github.com/pytorch/ao",
|
|
|
|
| 356 |
"github_about_section": "PyTorch native quantization and sparsity for training and inference",
|
| 357 |
"homepage_link": "https://pytorch.org/ao",
|
| 358 |
"github_topic_closest_fit": "quantization",
|
|
|
|
| 302 |
{
|
| 303 |
"repo_name": "aiter",
|
| 304 |
"repo_link": "https://github.com/ROCm/aiter",
|
| 305 |
+
"category": "gpu kernels",
|
| 306 |
"github_about_section": "AI Tensor Engine for ROCm",
|
| 307 |
"homepage_link": "https://rocm.blogs.amd.com/software-tools-optimization/aiter-ai-tensor-engine/README.html",
|
| 308 |
"contributors_all": 151,
|
|
|
|
| 313 |
{
|
| 314 |
"repo_name": "LMCache",
|
| 315 |
"repo_link": "https://github.com/LMCache/LMCache",
|
| 316 |
+
"category": "inference",
|
| 317 |
"github_about_section": "Supercharge Your LLM with the Fastest KV Cache Layer",
|
| 318 |
"homepage_link": "https://lmcache.ai",
|
| 319 |
"contributors_all": 152,
|
|
|
|
| 335 |
{
|
| 336 |
"repo_name": "Mooncake",
|
| 337 |
"repo_link": "https://github.com/kvcache-ai/Mooncake",
|
| 338 |
+
"category": "inference",
|
| 339 |
"github_about_section": "Mooncake is the serving platform for Kimi, a leading LLM service provided by Moonshot AI.",
|
| 340 |
"homepage_link": "https://kvcache-ai.github.io/Mooncake",
|
| 341 |
"github_topic_closest_fit": "inference",
|
|
|
|
| 347 |
{
|
| 348 |
"repo_name": "torchtitan",
|
| 349 |
"repo_link": "https://github.com/pytorch/torchtitan",
|
| 350 |
+
"category": "training framework",
|
| 351 |
"github_about_section": "A PyTorch native platform for training generative AI models",
|
| 352 |
+
"homepage_link": "https://arxiv.org/abs/2410.06511",
|
| 353 |
"contributors_all": 145,
|
| 354 |
"contributors_2025": 119,
|
| 355 |
"contributors_2024": 43,
|
|
|
|
| 358 |
{
|
| 359 |
"repo_name": "ao",
|
| 360 |
"repo_link": "https://github.com/pytorch/ao",
|
| 361 |
+
"category": "quantization",
|
| 362 |
"github_about_section": "PyTorch native quantization and sparsity for training and inference",
|
| 363 |
"homepage_link": "https://pytorch.org/ao",
|
| 364 |
"github_topic_closest_fit": "quantization",
|