| | import os |
| | import json |
| | import requests |
| | from dotenv import load_dotenv |
| |
|
| | |
| | load_dotenv() |
| |
|
| | |
| | GITHUB_TOKEN = os.getenv("GITHUB_TOKEN") |
| |
|
| | |
| | repo_urls = [ |
| | "https://github.com/gizatechxyz/orion", |
| | "https://github.com/gizatechxyz/Giza-Hub", |
| | "https://github.com/zkonduit/ezkl", |
| | "https://github.com/socathie/keras2circom", |
| | "https://github.com/socathie/circomlib-ml" |
| | "https://github.com/worldcoin/proto-neural-zkp", |
| | "https://github.com/Modulus-Labs/RockyBot" |
| | "https://github.com/ora-io/keras2circom", |
| | "https://github.com/zk-ml/tachikoma", |
| | "https://github.com/only4sim/ZK-DTP", |
| | "https://github.com/ddkang/zkml", |
| | "https://github.com/socathie/ZKaggleV2" |
| | ] |
| |
|
| | |
| | output_file = "dataset.json" |
| |
|
| | |
| | dataset = [] |
| |
|
| | def retrieve_files(repo_url, path=""): |
| | repo_owner, repo_name = repo_url.split("/")[-2:] |
| | api_url = f"https://api.github.com/repos/{repo_owner}/{repo_name}/contents/{path}" |
| | headers = { |
| | "Authorization": f"Bearer {GITHUB_TOKEN}", |
| | "Accept": "application/vnd.github.v3+json" |
| | } |
| | response = requests.get(api_url, headers=headers) |
| |
|
| | if response.status_code == 200: |
| | contents = response.json() |
| | for item in contents: |
| | |
| | if "/issues/" in item["url"] or "/commits/" in item["url"] or "/pulls/" in item["url"]: |
| | continue |
| | |
| | if item["name"].startswith("."): |
| | continue |
| | if item["type"] == "file": |
| | if item["name"].endswith(( ".py", ".js", ".java", ".c", ".cpp", ".h", ".hpp", ".rs", "cairo", ".zkey", ".sol", ".circom", ".ejs", ".ipynb" )): |
| | file_url = item["download_url"] |
| | file_response = requests.get(file_url) |
| | file_content = file_response.text |
| | data_entry = { |
| | "repo": repo_url, |
| | "file_path": item["path"], |
| | "content": file_content |
| | } |
| | dataset.append(data_entry) |
| | print("Appended ", item["path"]) |
| | elif item["type"] == "dir": |
| | retrieve_files(repo_url, item["path"]) |
| | else: |
| | print(f"Failed to retrieve contents for path: {path} in repository: {repo_url}") |
| |
|
| | |
| | if os.path.exists(output_file): |
| | with open(output_file, "r") as file: |
| | existing_dataset = json.load(file) |
| | dataset.extend(existing_dataset) |
| |
|
| | |
| | for repo_url in repo_urls: |
| | print("Scrapping ", repo_url) |
| | retrieve_files(repo_url) |
| |
|
| | |
| | with open(output_file, "w") as file: |
| | json.dump(dataset, file, indent=4) |
| |
|
| | print(f"Dataset created successfully. Saved to {output_file}.") |