{
  "affected": [
    {
      "ranges": [
        {
          "events": [
            {
              "introduced": "01efc7ef781391e744ed08c3292817a773d654e6"
            },
            {
              "fixed": "439368496db48d8f992ba8c606a0c0b1eebbfa69"
            }
          ],
          "repo": "https://github.com/vllm-project/vllm",
          "type": "GIT"
        }
      ]
    }
  ],
  "aliases": [
    "GHSA-mrw7-hf4f-83pf"
  ],
  "database_specific": {
    "cna_assigner": "GitHub_M",
    "cwe_ids": [
      "CWE-123",
      "CWE-20",
      "CWE-502",
      "CWE-787"
    ],
    "osv_generated_from": "https://github.com/CVEProject/cvelistV5/tree/main/cves/2025/62xxx/CVE-2025-62164.json"
  },
  "details": "vLLM is an inference and serving engine for large language models (LLMs). From versions 0.10.2 to before 0.11.1, a memory corruption vulnerability could lead to a crash (denial-of-service) and potentially remote code execution (RCE), exists in the Completions API endpoint. When processing user-supplied prompt embeddings, the endpoint loads serialized tensors using torch.load() without sufficient validation. Due to a change introduced in PyTorch 2.8.0, sparse tensor integrity checks are disabled by default. As a result, maliciously crafted tensors can bypass internal bounds checks and trigger an out-of-bounds memory write during the call to to_dense(). This memory corruption can crash vLLM and potentially lead to code execution on the server hosting vLLM. This issue has been patched in version 0.11.1.",
  "id": "CVE-2025-62164",
  "modified": "2026-04-01T23:09:15.400287873Z",
  "published": "2025-11-21T01:18:38.803Z",
  "references": [
    {
      "type": "ADVISORY",
      "url": "https://github.com/CVEProject/cvelistV5/tree/main/cves/2025/62xxx/CVE-2025-62164.json"
    },
    {
      "type": "ADVISORY",
      "url": "https://github.com/vllm-project/vllm/security/advisories/GHSA-mrw7-hf4f-83pf"
    },
    {
      "type": "ADVISORY",
      "url": "https://nvd.nist.gov/vuln/detail/CVE-2025-62164"
    },
    {
      "type": "FIX",
      "url": "https://github.com/vllm-project/vllm/commit/58fab50d82838d5014f4a14d991fdb9352c9c84b"
    },
    {
      "type": "FIX",
      "url": "https://github.com/vllm-project/vllm/pull/27204"
    }
  ],
  "schema_version": "1.7.3",
  "severity": [
    {
      "score": "CVSS:3.1/AV:N/AC:L/PR:L/UI:N/S:U/C:H/I:H/A:H",
      "type": "CVSS_V3"
    }
  ],
  "summary": "VLLM deserialization vulnerability leading to DoS and potential RCE"
}