Skip to content

vllm.model_executor.layers.fla.ops.chunk_scaled_dot_kkt

chunk_scaled_dot_kkt_fwd

chunk_scaled_dot_kkt_fwd(
    k: Tensor,
    g: Tensor | None = None,
    beta: Tensor | None = None,
    cu_seqlens: LongTensor | None = None,
    chunk_size: int = 64,
    output_dtype: dtype = float32,
) -> Tensor

Compute beta * K * K^T.

Parameters:

Name Type Description Default
k Tensor

The key tensor of shape [B, T, H, K].

required
beta Tensor

The beta tensor of shape [B, T, H].

None
g Tensor

The cumulative sum of the gate tensor of shape [B, T, H]. Default: None.

None
cu_seqlens LongTensor

The cumulative sequence lengths of the input tensor. Default: None

None
chunk_size int

The chunk size. Default: 64.

64
output_dtype dtype

The dtype of the output tensor. Default: torch.float32

float32

Returns:

Type Description
Tensor

beta * K * K^T of shape [B, T, H, BT] where BT is the chunk size.

Source code in vllm/model_executor/layers/fla/ops/chunk_scaled_dot_kkt.py
def chunk_scaled_dot_kkt_fwd(
    k: torch.Tensor,
    g: torch.Tensor | None = None,
    beta: torch.Tensor | None = None,
    cu_seqlens: torch.LongTensor | None = None,
    chunk_size: int = 64,
    output_dtype: torch.dtype = torch.float32,
) -> torch.Tensor:
    r"""
    Compute beta * K * K^T.

    Args:
        k (torch.Tensor):
            The key tensor of shape `[B, T, H, K]`.
        beta (torch.Tensor):
            The beta tensor of shape `[B, T, H]`.
        g (torch.Tensor):
            The cumulative sum of the gate tensor of shape `[B, T, H]`. Default: `None`.
        cu_seqlens (torch.LongTensor):
            The cumulative sequence lengths of the input tensor.
            Default: None
        chunk_size (int):
            The chunk size. Default: 64.
        output_dtype (torch.dtype):
            The dtype of the output tensor. Default: `torch.float32`

    Returns:
        beta * K * K^T of shape `[B, T, H, BT]` where `BT` is the chunk size.
    """
    # This kernel is slightly different from fla to support Q/K with different head numbers.
    # In fla, Q/K always have the same head number, so Hg is always equal to H.
    B, T, Hg, K = k.shape
    H = beta.shape[-1]
    BT = chunk_size
    chunk_indices = (
        prepare_chunk_indices(cu_seqlens, BT) if cu_seqlens is not None else None
    )
    NT = triton.cdiv(T, BT) if cu_seqlens is None else len(chunk_indices)

    A = torch.empty(B, T, H, BT, device=k.device, dtype=output_dtype)
    chunk_scaled_dot_kkt_fwd_kernel[(NT, B * H)](
        k=k,
        g=g,
        beta=beta,
        A=A,
        cu_seqlens=cu_seqlens,
        chunk_indices=chunk_indices,
        T=T,
        H=H,
        Hg=Hg,
        K=K,
        BT=BT,
    )
    return A