Skip to content

vllm.v1.kv_offload.worker.cpu_gpu

logger module-attribute

logger = init_logger(__name__)

CpuGpuOffloadingHandlers

Source code in vllm/v1/kv_offload/worker/cpu_gpu.py
class CpuGpuOffloadingHandlers:
    def __init__(
        self,
        gpu_block_size: int,
        cpu_block_size: int,
        num_cpu_blocks: int,
        gpu_caches: dict[str, torch.Tensor],
        attn_backends: dict[str, type[AttentionBackend]],
    ):
        assert gpu_caches
        assert cpu_block_size % gpu_block_size == 0

        # find kernel block size and determine layout per each gpu tensor
        kernel_block_size: int | None = None
        # list of (gpu_tensor, split_k_and_v)
        parsed_gpu_tensors: list[tuple[torch.Tensor, bool]] = []
        for layer_name, gpu_tensor in gpu_caches.items():
            gpu_shape = gpu_tensor.shape
            attn_backend = attn_backends[layer_name]
            test_shape = attn_backend.get_kv_cache_shape(
                num_blocks=1234, block_size=16, num_kv_heads=8, head_size=256
            )

            has_layers_dim = False
            split_k_and_v = False
            if len(gpu_shape) != len(test_shape):
                # cross-layers tensor
                # shape is (num_blocks, ...)
                assert len(gpu_shape) == len(test_shape) + 1
                has_layers_dim = True
                # prepend a dummy num_layers=80 to test_shape
                test_shape = (80,) + test_shape
            elif test_shape[0] != 1234:
                # shape should be (2, num_blocks, ...)
                assert test_shape[0] == 2
                assert test_shape[1] == 1234
                assert gpu_shape[0] == 2
                split_k_and_v = True

            try:
                kv_cache_stride_order = attn_backend.get_kv_cache_stride_order(
                    include_num_layers_dimension=has_layers_dim
                )
                assert len(kv_cache_stride_order) == len(gpu_shape)
            except (AttributeError, NotImplementedError):
                kv_cache_stride_order = tuple(range(len(gpu_shape)))

            # permute test_shape according to stride_order
            test_shape = tuple(test_shape[i] for i in kv_cache_stride_order)

            # find block_size (16) dimension index
            block_size_idx = test_shape.index(16)
            if kernel_block_size is not None:
                assert kernel_block_size == gpu_shape[block_size_idx]
            else:
                kernel_block_size = gpu_shape[block_size_idx]
                assert gpu_block_size % kernel_block_size == 0

            parsed_gpu_tensors.append((gpu_tensor, split_k_and_v))

        assert kernel_block_size is not None
        cpu_block_size_factor = cpu_block_size // kernel_block_size
        gpu_block_size_factor = gpu_block_size // kernel_block_size
        num_cpu_kernel_blocks = num_cpu_blocks * cpu_block_size_factor

        # allocate cpu tensors
        pin_memory = is_pin_memory_available()
        logger.info("Allocating %d CPU tensors...", len(parsed_gpu_tensors))
        gpu_tensors: list[torch.Tensor] = []
        cpu_tensors: list[torch.Tensor] = []
        for gpu_tensor, split_k_and_v in parsed_gpu_tensors:
            cpu_shape = list(gpu_tensor.shape)
            cpu_shape[1 if split_k_and_v else 0] = num_cpu_kernel_blocks

            logger.debug("Allocating CPU tensor of shape %r", cpu_shape)
            cpu_tensor = torch.zeros(
                cpu_shape,
                dtype=gpu_tensor.dtype,
                device="cpu",
                pin_memory=pin_memory,
            )

            gpu_tensors.extend(gpu_tensor.unbind(0) if split_k_and_v else [gpu_tensor])
            cpu_tensors.extend(cpu_tensor.unbind(0) if split_k_and_v else [cpu_tensor])

        self.gpu_to_cpu_handler = SingleDirectionOffloadingHandler(
            src_tensors=gpu_tensors,
            dst_tensors=cpu_tensors,
            src_block_size_factor=gpu_block_size_factor,
            dst_block_size_factor=cpu_block_size_factor,
        )

        self.cpu_to_gpu_handler = SingleDirectionOffloadingHandler(
            src_tensors=cpu_tensors,
            dst_tensors=gpu_tensors,
            src_block_size_factor=cpu_block_size_factor,
            dst_block_size_factor=gpu_block_size_factor,
        )

cpu_to_gpu_handler instance-attribute

cpu_to_gpu_handler = SingleDirectionOffloadingHandler(
    src_tensors=cpu_tensors,
    dst_tensors=gpu_tensors,
    src_block_size_factor=cpu_block_size_factor,
    dst_block_size_factor=gpu_block_size_factor,
)

gpu_to_cpu_handler instance-attribute

gpu_to_cpu_handler = SingleDirectionOffloadingHandler(
    src_tensors=gpu_tensors,
    dst_tensors=cpu_tensors,
    src_block_size_factor=gpu_block_size_factor,
    dst_block_size_factor=cpu_block_size_factor,
)

__init__

__init__(
    gpu_block_size: int,
    cpu_block_size: int,
    num_cpu_blocks: int,
    gpu_caches: dict[str, Tensor],
    attn_backends: dict[str, type[AttentionBackend]],
)
Source code in vllm/v1/kv_offload/worker/cpu_gpu.py
def __init__(
    self,
    gpu_block_size: int,
    cpu_block_size: int,
    num_cpu_blocks: int,
    gpu_caches: dict[str, torch.Tensor],
    attn_backends: dict[str, type[AttentionBackend]],
):
    assert gpu_caches
    assert cpu_block_size % gpu_block_size == 0

    # find kernel block size and determine layout per each gpu tensor
    kernel_block_size: int | None = None
    # list of (gpu_tensor, split_k_and_v)
    parsed_gpu_tensors: list[tuple[torch.Tensor, bool]] = []
    for layer_name, gpu_tensor in gpu_caches.items():
        gpu_shape = gpu_tensor.shape
        attn_backend = attn_backends[layer_name]
        test_shape = attn_backend.get_kv_cache_shape(
            num_blocks=1234, block_size=16, num_kv_heads=8, head_size=256
        )

        has_layers_dim = False
        split_k_and_v = False
        if len(gpu_shape) != len(test_shape):
            # cross-layers tensor
            # shape is (num_blocks, ...)
            assert len(gpu_shape) == len(test_shape) + 1
            has_layers_dim = True
            # prepend a dummy num_layers=80 to test_shape
            test_shape = (80,) + test_shape
        elif test_shape[0] != 1234:
            # shape should be (2, num_blocks, ...)
            assert test_shape[0] == 2
            assert test_shape[1] == 1234
            assert gpu_shape[0] == 2
            split_k_and_v = True

        try:
            kv_cache_stride_order = attn_backend.get_kv_cache_stride_order(
                include_num_layers_dimension=has_layers_dim
            )
            assert len(kv_cache_stride_order) == len(gpu_shape)
        except (AttributeError, NotImplementedError):
            kv_cache_stride_order = tuple(range(len(gpu_shape)))

        # permute test_shape according to stride_order
        test_shape = tuple(test_shape[i] for i in kv_cache_stride_order)

        # find block_size (16) dimension index
        block_size_idx = test_shape.index(16)
        if kernel_block_size is not None:
            assert kernel_block_size == gpu_shape[block_size_idx]
        else:
            kernel_block_size = gpu_shape[block_size_idx]
            assert gpu_block_size % kernel_block_size == 0

        parsed_gpu_tensors.append((gpu_tensor, split_k_and_v))

    assert kernel_block_size is not None
    cpu_block_size_factor = cpu_block_size // kernel_block_size
    gpu_block_size_factor = gpu_block_size // kernel_block_size
    num_cpu_kernel_blocks = num_cpu_blocks * cpu_block_size_factor

    # allocate cpu tensors
    pin_memory = is_pin_memory_available()
    logger.info("Allocating %d CPU tensors...", len(parsed_gpu_tensors))
    gpu_tensors: list[torch.Tensor] = []
    cpu_tensors: list[torch.Tensor] = []
    for gpu_tensor, split_k_and_v in parsed_gpu_tensors:
        cpu_shape = list(gpu_tensor.shape)
        cpu_shape[1 if split_k_and_v else 0] = num_cpu_kernel_blocks

        logger.debug("Allocating CPU tensor of shape %r", cpu_shape)
        cpu_tensor = torch.zeros(
            cpu_shape,
            dtype=gpu_tensor.dtype,
            device="cpu",
            pin_memory=pin_memory,
        )

        gpu_tensors.extend(gpu_tensor.unbind(0) if split_k_and_v else [gpu_tensor])
        cpu_tensors.extend(cpu_tensor.unbind(0) if split_k_and_v else [cpu_tensor])

    self.gpu_to_cpu_handler = SingleDirectionOffloadingHandler(
        src_tensors=gpu_tensors,
        dst_tensors=cpu_tensors,
        src_block_size_factor=gpu_block_size_factor,
        dst_block_size_factor=cpu_block_size_factor,
    )

    self.cpu_to_gpu_handler = SingleDirectionOffloadingHandler(
        src_tensors=cpu_tensors,
        dst_tensors=gpu_tensors,
        src_block_size_factor=cpu_block_size_factor,
        dst_block_size_factor=gpu_block_size_factor,
    )

SingleDirectionOffloadingHandler

Bases: OffloadingHandler

SingleDirectionOffloadingHandler handles transfers for a single direction, either CPU->GPU or GPU->CPU. Transfers are guaranteed to be executed in order of their submission. Each transfer uses a unique CUDA stream, and its stream will start executing only after the streams of previous transfers have finished.

Source code in vllm/v1/kv_offload/worker/cpu_gpu.py
class SingleDirectionOffloadingHandler(OffloadingHandler):
    """
    SingleDirectionOffloadingHandler handles transfers for a single direction,
    either CPU->GPU or GPU->CPU.
    Transfers are guaranteed to be executed in order of their submission.
    Each transfer uses a unique CUDA stream, and its stream will start
    executing only after the streams of previous transfers have finished.
    """

    def __init__(
        self,
        src_tensors: list[torch.Tensor],
        dst_tensors: list[torch.Tensor],
        src_block_size_factor: int,
        dst_block_size_factor: int,
    ):
        """
        Initialize a SingleDirectionOffloadingHandler.

        Args:
            src_tensors: list of KV cache tensors to copy from.
            dst_tensors: list of KV cache tensors to copy to.
                Order should match src_tensors.
            src_block_size_factor: The number of kernel blocks
                per KV block in a source tensor.
            dst_block_size_factor: The number of kernel blocks
                per KV block in a destination tensor.
        """
        assert len(src_tensors) == len(dst_tensors)

        self.src_tensors: list[torch.Tensor] = src_tensors
        self.dst_tensors: list[torch.Tensor] = dst_tensors
        min_block_size_factor = min(src_block_size_factor, dst_block_size_factor)
        self.src_block_size_factor: int = src_block_size_factor // min_block_size_factor
        self.dst_block_size_factor: int = dst_block_size_factor // min_block_size_factor

        self.block_size_in_bytes = [
            tensor.element_size() * tensor.stride(0) * min_block_size_factor
            for tensor in src_tensors
        ]

        assert len(src_tensors) > 0
        self.gpu_to_cpu: bool = self.src_tensors[0].is_cuda

        # job_id -> event
        self._transfer_events: dict[int, torch.Event] = {}
        # queue of transfers (job_id, stream, event)
        self._transfers: deque[tuple[int, torch.cuda.Stream, torch.Event]] = deque()
        # list of CUDA streams available for re-use
        self._stream_pool: list[torch.cuda.Stream] = []
        # list of CUDA events available for re-use
        self._event_pool: list[torch.Event] = []

    def transfer_async(self, job_id: int, transfer_spec: TransferSpec) -> bool:
        src_spec, dst_spec = transfer_spec
        assert isinstance(src_spec, BlockIDsLoadStoreSpec)
        assert isinstance(dst_spec, BlockIDsLoadStoreSpec)

        src_blocks = src_spec.block_ids
        dst_blocks = dst_spec.block_ids
        assert src_blocks.ndim == 1
        assert dst_blocks.ndim == 1

        src_sub_block_count = src_blocks.size * self.src_block_size_factor
        dst_sub_block_count = dst_blocks.size * self.dst_block_size_factor
        src_sub_blocks_to_skip = -dst_blocks.size % self.src_block_size_factor

        assert dst_sub_block_count == src_sub_block_count - src_sub_blocks_to_skip

        src_to_dst = np.empty((dst_sub_block_count, 2), dtype=np.int64)
        expand_block_ids(
            src_blocks,
            self.src_block_size_factor,
            src_to_dst[:, 0],
            skip_count=src_sub_blocks_to_skip,
        )
        expand_block_ids(dst_blocks, self.dst_block_size_factor, src_to_dst[:, 1])
        src_to_dst_tensor = torch.from_numpy(src_to_dst)

        stream = self._stream_pool.pop() if self._stream_pool else torch.cuda.Stream()
        event = self._event_pool.pop() if self._event_pool else torch.Event()

        if self.gpu_to_cpu:
            # wait for model computation to finish before offloading
            stream.wait_stream(torch.cuda.current_stream())
        if self._transfers:
            _, _, last_event = self._transfers[-1]
            # assure job will start only after the previous one completes
            stream.wait_event(last_event)
        with torch.cuda.stream(stream):
            for src_tensor, dst_tensor, block_size_in_bytes in zip(
                self.src_tensors,
                self.dst_tensors,
                self.block_size_in_bytes,
            ):
                ops.swap_blocks(
                    src_tensor,
                    dst_tensor,
                    block_size_in_bytes,
                    src_to_dst_tensor,
                )
            event.record(stream)

        self._transfer_events[job_id] = event
        self._transfers.append((job_id, stream, event))

        # success
        return True

    def get_finished(self) -> list[TransferResult]:
        results: list[TransferResult] = []
        while self._transfers and self._transfers[0][2].query():
            job_id, stream, event = self._transfers.popleft()
            results.append((job_id, True))
            self._stream_pool.append(stream)
            self._event_pool.append(event)
            del self._transfer_events[job_id]
        return results

    def wait(self, job_ids: set[int]):
        for job_id in job_ids:
            event = self._transfer_events.get(job_id)
            if event is not None:
                event.synchronize()

_event_pool instance-attribute

_event_pool: list[Event] = []

_stream_pool instance-attribute

_stream_pool: list[Stream] = []

_transfer_events instance-attribute

_transfer_events: dict[int, Event] = {}

_transfers instance-attribute

_transfers: deque[tuple[int, Stream, Event]] = deque()

block_size_in_bytes instance-attribute

block_size_in_bytes = [
    (element_size() * stride(0) * min_block_size_factor)
    for tensor in src_tensors
]

dst_block_size_factor instance-attribute

dst_block_size_factor: int = (
    dst_block_size_factor // min_block_size_factor
)

dst_tensors instance-attribute

dst_tensors: list[Tensor] = dst_tensors

gpu_to_cpu instance-attribute

gpu_to_cpu: bool = is_cuda

src_block_size_factor instance-attribute

src_block_size_factor: int = (
    src_block_size_factor // min_block_size_factor
)

src_tensors instance-attribute

src_tensors: list[Tensor] = src_tensors

__init__

__init__(
    src_tensors: list[Tensor],
    dst_tensors: list[Tensor],
    src_block_size_factor: int,
    dst_block_size_factor: int,
)

Initialize a SingleDirectionOffloadingHandler.

Parameters:

Name Type Description Default
src_tensors list[Tensor]

list of KV cache tensors to copy from.

required
dst_tensors list[Tensor]

list of KV cache tensors to copy to. Order should match src_tensors.

required
src_block_size_factor int

The number of kernel blocks per KV block in a source tensor.

required
dst_block_size_factor int

The number of kernel blocks per KV block in a destination tensor.

required
Source code in vllm/v1/kv_offload/worker/cpu_gpu.py
def __init__(
    self,
    src_tensors: list[torch.Tensor],
    dst_tensors: list[torch.Tensor],
    src_block_size_factor: int,
    dst_block_size_factor: int,
):
    """
    Initialize a SingleDirectionOffloadingHandler.

    Args:
        src_tensors: list of KV cache tensors to copy from.
        dst_tensors: list of KV cache tensors to copy to.
            Order should match src_tensors.
        src_block_size_factor: The number of kernel blocks
            per KV block in a source tensor.
        dst_block_size_factor: The number of kernel blocks
            per KV block in a destination tensor.
    """
    assert len(src_tensors) == len(dst_tensors)

    self.src_tensors: list[torch.Tensor] = src_tensors
    self.dst_tensors: list[torch.Tensor] = dst_tensors
    min_block_size_factor = min(src_block_size_factor, dst_block_size_factor)
    self.src_block_size_factor: int = src_block_size_factor // min_block_size_factor
    self.dst_block_size_factor: int = dst_block_size_factor // min_block_size_factor

    self.block_size_in_bytes = [
        tensor.element_size() * tensor.stride(0) * min_block_size_factor
        for tensor in src_tensors
    ]

    assert len(src_tensors) > 0
    self.gpu_to_cpu: bool = self.src_tensors[0].is_cuda

    # job_id -> event
    self._transfer_events: dict[int, torch.Event] = {}
    # queue of transfers (job_id, stream, event)
    self._transfers: deque[tuple[int, torch.cuda.Stream, torch.Event]] = deque()
    # list of CUDA streams available for re-use
    self._stream_pool: list[torch.cuda.Stream] = []
    # list of CUDA events available for re-use
    self._event_pool: list[torch.Event] = []

get_finished

get_finished() -> list[TransferResult]
Source code in vllm/v1/kv_offload/worker/cpu_gpu.py
def get_finished(self) -> list[TransferResult]:
    results: list[TransferResult] = []
    while self._transfers and self._transfers[0][2].query():
        job_id, stream, event = self._transfers.popleft()
        results.append((job_id, True))
        self._stream_pool.append(stream)
        self._event_pool.append(event)
        del self._transfer_events[job_id]
    return results

transfer_async

transfer_async(
    job_id: int, transfer_spec: TransferSpec
) -> bool
Source code in vllm/v1/kv_offload/worker/cpu_gpu.py
def transfer_async(self, job_id: int, transfer_spec: TransferSpec) -> bool:
    src_spec, dst_spec = transfer_spec
    assert isinstance(src_spec, BlockIDsLoadStoreSpec)
    assert isinstance(dst_spec, BlockIDsLoadStoreSpec)

    src_blocks = src_spec.block_ids
    dst_blocks = dst_spec.block_ids
    assert src_blocks.ndim == 1
    assert dst_blocks.ndim == 1

    src_sub_block_count = src_blocks.size * self.src_block_size_factor
    dst_sub_block_count = dst_blocks.size * self.dst_block_size_factor
    src_sub_blocks_to_skip = -dst_blocks.size % self.src_block_size_factor

    assert dst_sub_block_count == src_sub_block_count - src_sub_blocks_to_skip

    src_to_dst = np.empty((dst_sub_block_count, 2), dtype=np.int64)
    expand_block_ids(
        src_blocks,
        self.src_block_size_factor,
        src_to_dst[:, 0],
        skip_count=src_sub_blocks_to_skip,
    )
    expand_block_ids(dst_blocks, self.dst_block_size_factor, src_to_dst[:, 1])
    src_to_dst_tensor = torch.from_numpy(src_to_dst)

    stream = self._stream_pool.pop() if self._stream_pool else torch.cuda.Stream()
    event = self._event_pool.pop() if self._event_pool else torch.Event()

    if self.gpu_to_cpu:
        # wait for model computation to finish before offloading
        stream.wait_stream(torch.cuda.current_stream())
    if self._transfers:
        _, _, last_event = self._transfers[-1]
        # assure job will start only after the previous one completes
        stream.wait_event(last_event)
    with torch.cuda.stream(stream):
        for src_tensor, dst_tensor, block_size_in_bytes in zip(
            self.src_tensors,
            self.dst_tensors,
            self.block_size_in_bytes,
        ):
            ops.swap_blocks(
                src_tensor,
                dst_tensor,
                block_size_in_bytes,
                src_to_dst_tensor,
            )
        event.record(stream)

    self._transfer_events[job_id] = event
    self._transfers.append((job_id, stream, event))

    # success
    return True

wait

wait(job_ids: set[int])
Source code in vllm/v1/kv_offload/worker/cpu_gpu.py
def wait(self, job_ids: set[int]):
    for job_id in job_ids:
        event = self._transfer_events.get(job_id)
        if event is not None:
            event.synchronize()

expand_block_ids

expand_block_ids(
    block_ids: ndarray,
    block_size_factor: int,
    output: ndarray,
    skip_count: int = 0,
)

Convert a list of block IDs to a list of matching block ids, assuming each block is composed of actual block_size_factor blocks. Outputs to output tensor. The first skip_count blocks will be skipped. Note that skip_count must be less than block_size_factor.

For example, if block_ids = [0, 1, 3] and block_size_factor = 4, then it yields [0, 1, 2, 3, 4, 5, 6, 7, 12, 13, 14, 15] since 0 maps to [0, 1, 2, 3] 1 maps to [4, 5, 6, 7] and 3 maps to [12, 13, 14, 15]

Source code in vllm/v1/kv_offload/worker/cpu_gpu.py
def expand_block_ids(
    block_ids: np.ndarray,
    block_size_factor: int,
    output: np.ndarray,
    skip_count: int = 0,
):
    """
    Convert a list of block IDs to a list of matching block ids,
    assuming each block is composed of actual block_size_factor blocks.
    Outputs to output tensor.
    The first skip_count blocks will be skipped.
    Note that skip_count must be less than block_size_factor.

    For example, if block_ids = [0, 1, 3] and block_size_factor =  4,
    then it yields [0, 1, 2, 3, 4, 5, 6, 7, 12, 13, 14, 15]
    since 0 maps to [0, 1, 2, 3]
    1 maps to [4, 5, 6, 7]
    and 3 maps to [12, 13, 14, 15]
    """
    assert skip_count < block_size_factor

    first_range = np.arange(skip_count, block_size_factor)
    full_range = np.arange(0, block_size_factor)

    output_idx = 0
    for i, block_id in enumerate(block_ids):
        base_block_id = block_id * block_size_factor
        indices = first_range if i == 0 else full_range
        output_end_idx = output_idx + len(indices)
        output[output_idx:output_end_idx] = base_block_id + indices
        output_idx = output_end_idx