{
  "schema_version": "v4.0",
  "notes": [
    "This manifest reflects the actual corpus used in the benchmark runs that produced the results in /provenance/ and /results/.",
    "Where the spec's 8-char prefix (Appendix A.1) matches the resolved HF revision, we record both. Where they differ, we record the actual SHA resolved from HF main as of the benchmark date (2026-05-13). See DEVIATIONS.md for the prefix mismatches.",
    "The cross-layer (§8) corpus uses SmolLM2-1.7B as a test-model substitute for Llama-3.2-3B (which is HF-gated). See provenance/ for per-stage BUILD_LOG and FINAL_VERDICT files."
  ],
  "models": [
    {
      "repo_id": "gpt2",
      "revision_prefix_spec": "607a30d4",
      "revision_full": "607a30d783dfa663caf39e06633721c8d4cfcd7e",
      "prefix_match": true,
      "formats": [
        "bf16",
        "fp16",
        "fp32"
      ],
      "tensor_counts": {
        "bf16": 81,
        "fp16": 81
      },
      "split": "train",
      "total_bytes": 498000000,
      "used_in": [
        "bf16_main",
        "bf16_supplementary"
      ]
    },
    {
      "repo_id": "distilbert-base-uncased",
      "revision_prefix_spec": "1c4513b2",
      "revision_full": "12040accade4e8a0f71eabdb258fecc2e7e948be",
      "prefix_match": false,
      "formats": [
        "bf16",
        "fp16",
        "fp32"
      ],
      "tensor_counts": {
        "bf16": 73,
        "fp16": 73
      },
      "split": "train",
      "total_bytes": 268000000,
      "used_in": [
        "bf16_main",
        "bf16_supplementary"
      ]
    },
    {
      "repo_id": "facebook/opt-125m",
      "revision_prefix_spec": "27dcfa7c",
      "revision_full": "27dcfa74d334bc871f3234de431e71c6eeba5dd6",
      "prefix_match": true,
      "formats": [
        "bf16",
        "fp16",
        "fp32"
      ],
      "tensor_counts": {
        "bf16": 75,
        "fp16": 75
      },
      "split": "train",
      "total_bytes": 500000000,
      "used_in": [
        "bf16_main",
        "bf16_supplementary"
      ]
    },
    {
      "repo_id": "sentence-transformers/all-MiniLM-L6-v2",
      "revision_prefix_spec": "e4ce9879",
      "revision_full": "c9745ed1d9f207416be6d2e6f8de32d1f16199bf",
      "prefix_match": false,
      "formats": [
        "bf16",
        "fp16"
      ],
      "tensor_counts": {
        "bf16": 13,
        "fp16": 13
      },
      "split": "test",
      "total_bytes": 90000000,
      "used_in": [
        "bf16_main",
        "bf16_supplementary"
      ]
    },
    {
      "repo_id": "Qwen/Qwen2.5-0.5B",
      "revision_prefix_spec": "060db6e4",
      "revision_full": "060db6499f32faf8b98477b0a26969ef7d8b9987",
      "prefix_match": true,
      "formats": [
        "bf16",
        "fp16"
      ],
      "tensor_counts": {
        "bf16": 121,
        "fp16": 121
      },
      "split": "test",
      "total_bytes": 988000000,
      "used_in": [
        "bf16_main",
        "bf16_supplementary",
        "cross_layer_train"
      ]
    },
    {
      "repo_id": "TinyLlama/TinyLlama-1.1B-Chat-v1.0",
      "revision_prefix_spec": "fe8a4eaa",
      "revision_full": "fe8a4ea1ffedaf415f4da2f062534de366a451e6",
      "prefix_match": true,
      "formats": [
        "bf16",
        "fp16"
      ],
      "tensor_counts": {
        "bf16": 156,
        "fp16": 156
      },
      "split": "test",
      "total_bytes": 2200000000,
      "used_in": [
        "bf16_main",
        "bf16_supplementary"
      ]
    },
    {
      "repo_id": "Qwen/Qwen2.5-7B-Instruct",
      "revision_prefix_spec": "a09a3543",
      "revision_full": "a09a35458c702b33eeacc393d103063234e8bc28",
      "prefix_match": true,
      "formats": [
        "bf16"
      ],
      "tensor_counts": {
        "bf16": 196
      },
      "split": "7B_validation",
      "total_bytes": 12400000000,
      "used_in": [
        "bf16_7B_validation",
        "cross_layer_test"
      ]
    },
    {
      "repo_id": "Qwen/Qwen2.5-1.5B",
      "revision_full": "<resolved at run time; see notes>",
      "formats": [
        "bf16"
      ],
      "split": "cross_layer_train",
      "used_in": [
        "cross_layer_train"
      ]
    },
    {
      "repo_id": "HuggingFaceTB/SmolLM2-1.7B",
      "revision_full": "<resolved at run time; substituted for Llama-3.2-3B which is gated>",
      "formats": [
        "bf16"
      ],
      "split": "cross_layer_test_substitute",
      "used_in": [
        "cross_layer_test"
      ]
    },
    {
      "repo_id": "bartowski/Qwen2.5-0.5B-Instruct-GGUF",
      "quantization": "Q4_K_M",
      "revision_prefix_spec": "a8b21f63",
      "revision_full": "41ba88dbac95fed2528c92514c131d73eb5a174b",
      "prefix_match": false,
      "formats": [
        "Q4_K"
      ],
      "tensor_counts": {
        "Q4_K": 168,
        "Q6_K": 24
      },
      "split": "train",
      "total_bytes": 397808192,
      "used_in": [
        "Q4_K_train",
        "GGUF_artifact_train"
      ]
    },
    {
      "repo_id": "bartowski/Qwen2.5-1.5B-Instruct-GGUF",
      "quantization": "Q4_K_M",
      "revision_prefix_spec": "b4d309e1",
      "revision_full": "9eadc66189c7641e1ddd226b8267a9119b2ce2d4",
      "prefix_match": false,
      "formats": [
        "Q4_K"
      ],
      "tensor_counts": {
        "Q4_K": 345,
        "Q6_K": 56
      },
      "split": "train",
      "total_bytes": 986000000,
      "used_in": [
        "Q4_K_train",
        "GGUF_artifact_train"
      ]
    },
    {
      "repo_id": "bartowski/Qwen2.5-7B-Instruct-GGUF",
      "quantization": "Q4_K_M",
      "revision_prefix_spec": "c7e5a82d",
      "revision_full": "8911e8a47f92bac19d6f5c64a2e2095bd2f7d031",
      "prefix_match": false,
      "formats": [
        "Q4_K"
      ],
      "tensor_counts": {
        "Q4_K": 196,
        "Q6_K": 28
      },
      "split": "test",
      "total_bytes": 4680000000,
      "used_in": [
        "Q4_K_test",
        "GGUF_artifact_test"
      ]
    },
    {
      "repo_id": "bartowski/Llama-3.2-3B-Instruct-GGUF",
      "quantization": "Q4_K_M",
      "revision_prefix_spec": "0cb88a4f",
      "revision_full": "5ab33fa94d1d04e903623ae72c95d1696f09f9e8",
      "prefix_match": false,
      "formats": [
        "Q4_K"
      ],
      "tensor_counts": {
        "Q4_K": 168,
        "Q6_K": 28
      },
      "split": "test",
      "total_bytes": 2020000000,
      "used_in": [
        "Q4_K_test",
        "GGUF_artifact_test"
      ]
    },
    {
      "repo_id": "bartowski/Mistral-7B-Instruct-v0.3-GGUF",
      "quantization": "Q4_K_M",
      "revision_prefix_spec": "e0bc86c7",
      "revision_full": "61fd4167fff3ab01ee1cfe0da183fa27a944db48",
      "prefix_match": false,
      "formats": [
        "Q4_K"
      ],
      "tensor_counts": {
        "Q4_K": 166,
        "Q6_K": 32
      },
      "split": "test",
      "total_bytes": 4370000000,
      "used_in": [
        "Q4_K_test",
        "GGUF_artifact_test"
      ]
    }
  ]
}
