|
| 1 | +"""Export competition submissions to Hugging Face datasets as parquet files.""" |
| 2 | + |
| 3 | +import io |
| 4 | +import tempfile |
| 5 | +from datetime import datetime, timezone |
| 6 | +from importlib.resources import files |
| 7 | + |
| 8 | +import pyarrow as pa |
| 9 | +import pyarrow.parquet as pq |
| 10 | +from huggingface_hub import HfApi |
| 11 | + |
| 12 | +from libkernelbot.leaderboard_db import LeaderboardDB |
| 13 | +from libkernelbot.utils import setup_logging |
| 14 | + |
| 15 | +logger = setup_logging(__name__) |
| 16 | +HF_EXPORT_ROWS_SQL = files("libkernelbot").joinpath("sql/get_hf_export_rows.sql").read_text( |
| 17 | + encoding="utf-8" |
| 18 | +) |
| 19 | + |
| 20 | +# Explicit schema matching GPUMODE/kernelbot-data nvidia_nvfp4_submissions.parquet |
| 21 | +SUBMISSIONS_SCHEMA = pa.schema([ |
| 22 | + ("submission_id", pa.int64()), |
| 23 | + ("leaderboard_id", pa.int64()), |
| 24 | + ("problem_name", pa.large_string()), |
| 25 | + ("user_id", pa.large_string()), |
| 26 | + ("user_name", pa.large_string()), |
| 27 | + ("code_id", pa.int64()), |
| 28 | + ("file_name", pa.large_string()), |
| 29 | + ("submission_time", pa.timestamp("us", tz="UTC")), |
| 30 | + ("status", pa.large_string()), |
| 31 | + ("score", pa.float64()), |
| 32 | + ("passed", pa.bool_()), |
| 33 | + ("mode", pa.large_string()), |
| 34 | + ("runner", pa.large_string()), |
| 35 | + ("code", pa.large_string()), |
| 36 | +]) |
| 37 | + |
| 38 | + |
| 39 | +def _normalize_deadline(deadline: datetime) -> datetime: |
| 40 | + """Ensure deadlines are timezone-aware before comparing them.""" |
| 41 | + if deadline.tzinfo is None: |
| 42 | + return deadline.replace(tzinfo=timezone.utc) |
| 43 | + return deadline |
| 44 | + |
| 45 | + |
| 46 | +MAX_COMPETITION_HORIZON_DAYS = 365 |
| 47 | + |
| 48 | + |
| 49 | +def get_active_competition_leaderboards( |
| 50 | + leaderboards: list[dict], |
| 51 | + *, |
| 52 | + now: datetime | None = None, |
| 53 | +) -> list[dict]: |
| 54 | + """Return leaderboards that belong to real, active competitions. |
| 55 | +
|
| 56 | + Filters out: |
| 57 | + - Expired leaderboards (deadline <= now) |
| 58 | + - Dev leaderboards (name ending with "-dev") |
| 59 | + - Permanent/practice leaderboards (deadline > 1 year from now, e.g. year 2100) |
| 60 | + """ |
| 61 | + if now is None: |
| 62 | + now = datetime.now(timezone.utc) |
| 63 | + |
| 64 | + from datetime import timedelta |
| 65 | + |
| 66 | + horizon = now + timedelta(days=MAX_COMPETITION_HORIZON_DAYS) |
| 67 | + |
| 68 | + active_competitions = [] |
| 69 | + for leaderboard in leaderboards: |
| 70 | + deadline = _normalize_deadline(leaderboard["deadline"]) |
| 71 | + if deadline > now and deadline < horizon and not leaderboard["name"].endswith("-dev"): |
| 72 | + active_competitions.append(leaderboard) |
| 73 | + return active_competitions |
| 74 | + |
| 75 | + |
| 76 | +def ensure_public_export_allowed( |
| 77 | + db: LeaderboardDB, |
| 78 | + leaderboard_ids: list[int], |
| 79 | + *, |
| 80 | + now: datetime | None = None, |
| 81 | +) -> None: |
| 82 | + """Block public exports while any selected leaderboard is still active.""" |
| 83 | + if now is None: |
| 84 | + now = datetime.now(timezone.utc) |
| 85 | + |
| 86 | + selected_ids = set(leaderboard_ids) |
| 87 | + active_names = [] |
| 88 | + for leaderboard in db.get_leaderboards(): |
| 89 | + if leaderboard["id"] not in selected_ids: |
| 90 | + continue |
| 91 | + deadline = _normalize_deadline(leaderboard["deadline"]) |
| 92 | + if deadline > now: |
| 93 | + active_names.append(leaderboard["name"]) |
| 94 | + |
| 95 | + if active_names: |
| 96 | + active_names.sort() |
| 97 | + raise ValueError( |
| 98 | + "Cannot export active leaderboards to the public dataset: " |
| 99 | + + ", ".join(active_names) |
| 100 | + ) |
| 101 | + |
| 102 | + |
| 103 | +def get_hf_export_rows(db: LeaderboardDB, leaderboard_ids: list[int]) -> list[dict]: |
| 104 | + """Fetch deduplicated submissions for export.""" |
| 105 | + if not leaderboard_ids: |
| 106 | + return [] |
| 107 | + |
| 108 | + db.cursor.execute(HF_EXPORT_ROWS_SQL, (leaderboard_ids,)) |
| 109 | + |
| 110 | + columns = [ |
| 111 | + "submission_id", "leaderboard_id", "problem_name", "user_id", "user_name", |
| 112 | + "code_id", "file_name", "submission_time", "status", "score", "passed", |
| 113 | + "mode", "runner", "code", |
| 114 | + ] |
| 115 | + return [dict(zip(columns, row, strict=True)) for row in db.cursor.fetchall()] |
| 116 | + |
| 117 | + |
| 118 | +def rows_to_parquet_bytes(rows: list[dict]) -> bytes: |
| 119 | + """Convert a list of row dicts to parquet bytes using the canonical schema.""" |
| 120 | + if not rows: |
| 121 | + table = pa.table({field.name: pa.array([], type=field.type) for field in SUBMISSIONS_SCHEMA}) |
| 122 | + else: |
| 123 | + for row in rows: |
| 124 | + if row.get("user_id") is not None: |
| 125 | + row["user_id"] = str(row["user_id"]) |
| 126 | + if row.get("user_name") is None: |
| 127 | + row["user_name"] = "" |
| 128 | + if row.get("score") is not None: |
| 129 | + row["score"] = float(row["score"]) |
| 130 | + table = pa.Table.from_pylist(rows, schema=SUBMISSIONS_SCHEMA) |
| 131 | + |
| 132 | + buf = io.BytesIO() |
| 133 | + pq.write_table(table, buf, compression="snappy") |
| 134 | + return buf.getvalue() |
| 135 | + |
| 136 | + |
| 137 | +def export_to_hf( |
| 138 | + db: LeaderboardDB, |
| 139 | + leaderboard_ids: list[int], |
| 140 | + repo_id: str, |
| 141 | + filename: str, |
| 142 | + token: str, |
| 143 | + private: bool = True, |
| 144 | +) -> dict: |
| 145 | + """Export deduplicated submissions to a HF dataset repo as a parquet file. |
| 146 | +
|
| 147 | + Returns a summary dict with row count and repo info. |
| 148 | + """ |
| 149 | + if not private: |
| 150 | + ensure_public_export_allowed(db, leaderboard_ids) |
| 151 | + |
| 152 | + api = HfApi(token=token) |
| 153 | + api.create_repo(repo_id, repo_type="dataset", private=private, exist_ok=True) |
| 154 | + |
| 155 | + rows = get_hf_export_rows(db, leaderboard_ids) |
| 156 | + parquet_bytes = rows_to_parquet_bytes(rows) |
| 157 | + with tempfile.NamedTemporaryFile(suffix=".parquet") as tmp: |
| 158 | + tmp.write(parquet_bytes) |
| 159 | + tmp.flush() |
| 160 | + api.upload_file( |
| 161 | + path_or_fileobj=tmp.name, |
| 162 | + path_in_repo=filename, |
| 163 | + repo_id=repo_id, |
| 164 | + repo_type="dataset", |
| 165 | + ) |
| 166 | + |
| 167 | + logger.info("Exported %d rows to %s/%s", len(rows), repo_id, filename) |
| 168 | + return {"rows": len(rows), "repo_id": repo_id, "filename": filename} |
| 169 | + |
| 170 | + |
| 171 | +def publish_to_public_repo( |
| 172 | + db: LeaderboardDB, |
| 173 | + leaderboard_ids: list[int], |
| 174 | + public_repo_id: str, |
| 175 | + filename: str, |
| 176 | + token: str, |
| 177 | +) -> dict: |
| 178 | + """Export final competition data to the public dataset repo.""" |
| 179 | + return export_to_hf( |
| 180 | + db=db, |
| 181 | + leaderboard_ids=leaderboard_ids, |
| 182 | + repo_id=public_repo_id, |
| 183 | + filename=filename, |
| 184 | + token=token, |
| 185 | + private=False, |
| 186 | + ) |
0 commit comments