Skip to content

vllm.model_executor.models.whisper

WhisperAudioInputs

Bases: TensorSchema

Dimensions
  • b: Batch size
  • nmb: Number of mel bins
  • t: Time frames (M)
Source code in vllm/model_executor/models/whisper.py
class WhisperAudioInputs(TensorSchema):
    """
    Dimensions:
        - b: Batch size
        - nmb: Number of mel bins
        - t: Time frames (M)
    """

    input_features: Annotated[
        list[torch.Tensor] | None,
        TensorShape("b", "nmb", "t"),
    ]

WhisperEncoderAttention

Bases: MMEncoderAttention

Multi-headed attention for Whisper encoder with 2D tensor support.

Source code in vllm/model_executor/models/whisper.py
class WhisperEncoderAttention(MMEncoderAttention):
    """Multi-headed attention for Whisper encoder with 2D tensor support."""

    def forward(
        self,
        query: torch.Tensor,
        key: torch.Tensor,
        value: torch.Tensor,
    ) -> torch.Tensor:
        """
        Input shape: batch_size x seq_len x hidden_size
                     or seq_len x hidden_size
        """
        is_2d = query.dim() == 2
        if is_2d:
            query = query.unsqueeze(0)
            key = key.unsqueeze(0)
            value = value.unsqueeze(0)

        # Call the parent forward method
        out = super().forward(query, key, value)

        if is_2d:
            out = out.squeeze(0)

        return out

forward

forward(
    query: Tensor, key: Tensor, value: Tensor
) -> Tensor
batch_size x seq_len x hidden_size

or seq_len x hidden_size

Source code in vllm/model_executor/models/whisper.py
def forward(
    self,
    query: torch.Tensor,
    key: torch.Tensor,
    value: torch.Tensor,
) -> torch.Tensor:
    """
    Input shape: batch_size x seq_len x hidden_size
                 or seq_len x hidden_size
    """
    is_2d = query.dim() == 2
    if is_2d:
        query = query.unsqueeze(0)
        key = key.unsqueeze(0)
        value = value.unsqueeze(0)

    # Call the parent forward method
    out = super().forward(query, key, value)

    if is_2d:
        out = out.squeeze(0)

    return out

WhisperProcessingInfo

Bases: BaseProcessingInfo

Source code in vllm/model_executor/models/whisper.py
class WhisperProcessingInfo(BaseProcessingInfo):
    def get_hf_config(self) -> WhisperConfig:
        return self.ctx.get_hf_config(WhisperConfig)

    def get_data_parser(self):
        feature_extractor = self.get_feature_extractor()

        return MultiModalDataParser(
            target_sr=feature_extractor.sampling_rate,
            target_channels=self.get_target_channels(),
            expected_hidden_size=self._get_expected_hidden_size(),
        )

    @property
    def skip_prompt_length_check(self) -> bool:
        return True  # Because the encoder prompt is padded

    def get_supported_mm_limits(self) -> Mapping[str, int | None]:
        return {"audio": 1}

    def get_feature_extractor(self, **kwargs: object) -> WhisperFeatureExtractor:
        hf_processor = self.get_hf_processor(**kwargs)
        feature_extractor = hf_processor.feature_extractor  # type: ignore
        assert isinstance(feature_extractor, WhisperFeatureExtractor)
        return feature_extractor

    def get_target_channels(self) -> int:
        """Return target audio channels for Whisper models (mono)."""
        return 1

    def get_num_audio_tokens(self) -> int:
        return self.get_hf_config().max_source_positions

get_target_channels

get_target_channels() -> int

Return target audio channels for Whisper models (mono).

Source code in vllm/model_executor/models/whisper.py
def get_target_channels(self) -> int:
    """Return target audio channels for Whisper models (mono)."""
    return 1

_create_fake_bias_for_k_proj

_create_fake_bias_for_k_proj(
    weights: Iterable[tuple[str, Tensor]],
    fake_bias_key_name: str,
) -> Iterable[tuple[str, Tensor]]

Create full zeros bias for k_proj weight in self-attn and x-attn layers. So that the bias for k_proj in qkv_proj can be initialized with zeros.

Source code in vllm/model_executor/models/whisper.py
def _create_fake_bias_for_k_proj(
    weights: Iterable[tuple[str, torch.Tensor]], fake_bias_key_name: str
) -> Iterable[tuple[str, torch.Tensor]]:
    """
    Create full zeros bias for k_proj weight in self-attn and x-attn layers.
    So that the bias for k_proj in qkv_proj can be initialized with zeros.
    """
    for name, weight in weights:
        yield name, weight
        if name.endswith(fake_bias_key_name):
            bias = torch.zeros(weight.size(0))
            bias_name = name.replace("weight", "bias")
            yield bias_name, bias