Skip to content

vllm.v1.simple_kv_offload.cuda_mem_ops

Low-level CUDA memory helpers: pinning and batch DMA transfers.

_resolve_batch_memcpy

_resolve_batch_memcpy()

Resolve cuMemcpyBatchAsync via cuGetProcAddress (one-time).

Source code in vllm/v1/simple_kv_offload/cuda_mem_ops.py
def _resolve_batch_memcpy():
    """Resolve cuMemcpyBatchAsync via cuGetProcAddress (one-time)."""
    from cuda.bindings import driver as drv

    err, ptr, _ = drv.cuGetProcAddress(b"cuMemcpyBatchAsync", 12080, 0)
    if err != drv.CUresult.CUDA_SUCCESS:
        raise RuntimeError(f"cuGetProcAddress(cuMemcpyBatchAsync) failed: {err}")
    return _BATCH_MEMCPY_FUNC_TYPE(ptr)

copy_blocks

copy_blocks(
    src_block_ids: list[int],
    dst_block_ids: list[int],
    params: BatchMemcpyParams,
) -> None

Copy blocks via cuMemcpyBatchAsync.

Source code in vllm/v1/simple_kv_offload/cuda_mem_ops.py
def copy_blocks(
    src_block_ids: list[int],
    dst_block_ids: list[int],
    params: BatchMemcpyParams,
) -> None:
    """Copy blocks via cuMemcpyBatchAsync."""
    n = len(src_block_ids)
    if n == 0:
        return

    src_ids = np.array(src_block_ids, dtype=np.uint64)
    dst_ids = np.array(dst_block_ids, dtype=np.uint64)

    src_all = (
        params.src_bases[:, None] + src_ids[None, :] * params.bpb[:, None]
    ).ravel()
    dst_all = (
        params.dst_bases[:, None] + dst_ids[None, :] * params.bpb[:, None]
    ).ravel()
    sz_all = np.repeat(params.bpb, n)

    total = n * params.num_layers
    err = _batch_memcpy_fn(
        dst_all.ctypes.data,
        src_all.ctypes.data,
        sz_all.ctypes.data,
        total,
        ctypes.addressof(params.attrs),
        ctypes.byref(params.attrs_idx),
        1,
        ctypes.byref(params.fail_idx),
        params.stream_handle,
    )
    if err != 0:
        raise RuntimeError(
            f"cuMemcpyBatchAsync failed: err={err} failIdx={params.fail_idx.value}"
        )

pin_tensor

pin_tensor(tensor: Tensor) -> None

Pin a CPU tensor via cudaHostRegister.

This bypasses PyTorch's CUDACachingHostAllocator which rounds every pin_memory=True allocation up to the next power of 2 (e.g. 100 GB becomes 128 GB).

Source code in vllm/v1/simple_kv_offload/cuda_mem_ops.py
def pin_tensor(tensor: torch.Tensor) -> None:
    """Pin a CPU tensor via cudaHostRegister.

    This bypasses PyTorch's CUDACachingHostAllocator which rounds
    every ``pin_memory=True`` allocation up to the next power of 2
    (e.g. 100 GB becomes 128 GB).
    """
    err = torch.cuda.cudart().cudaHostRegister(tensor.data_ptr(), tensor.nbytes, 0)
    if err.value != 0:
        raise RuntimeError(f"cudaHostRegister failed: {err}")