Skip to content

vllm.renderers.grok2

logger module-attribute

logger = init_logger(__name__)

Grok2Renderer

Bases: RendererLike

Source code in vllm/renderers/grok2.py
class Grok2Renderer(RendererLike):
    @classmethod
    def from_config(
        cls,
        config: ModelConfig,
        tokenizer_kwargs: dict[str, Any],
    ) -> "RendererLike":
        return cls(config, tokenizer_kwargs)

    def __init__(
        self,
        config: ModelConfig,
        tokenizer_kwargs: dict[str, Any],
    ) -> None:
        super().__init__()

        self.config = config

        if config.skip_tokenizer_init:
            tokenizer = None
        else:
            tokenizer = cached_get_tokenizer(
                tokenizer_cls=Grok2Tokenizer,
                **tokenizer_kwargs,
            )

        self._tokenizer = tokenizer

    @property
    def tokenizer(self) -> Grok2Tokenizer | None:
        return self._tokenizer

    def get_tokenizer(self) -> Grok2Tokenizer:
        tokenizer = self.tokenizer
        if tokenizer is None:
            raise ValueError("Tokenizer not available when `skip_tokenizer_init=True`")

        return tokenizer

    def render_messages(
        self,
        messages: list[ChatCompletionMessageParam],
        **kwargs,
    ) -> tuple[list[ConversationMessage], TextPrompt | TokensPrompt]:
        tokenizer = self.get_tokenizer()
        conversation, mm_data, mm_uuids = parse_chat_messages(
            messages,
            self.config,
            content_format="string",
        )

        prompt_raw = tokenizer.apply_chat_template(
            conversation=conversation,
            messages=messages,
            **kwargs,
        )

        prompt = (
            TextPrompt(prompt=prompt_raw)
            if isinstance(prompt_raw, str)
            else TokensPrompt(prompt_token_ids=prompt_raw)
        )
        if mm_data is not None:
            prompt["multi_modal_data"] = mm_data
        if mm_uuids is not None:
            prompt["multi_modal_uuids"] = mm_uuids

        return conversation, prompt  # type: ignore[return-value]

    async def render_messages_async(
        self,
        messages: list[ChatCompletionMessageParam],
        **kwargs,
    ) -> tuple[list[ConversationMessage], TextPrompt | TokensPrompt]:
        tokenizer = self.get_tokenizer()
        conversation, mm_data, mm_uuids = await parse_chat_messages_async(
            messages,
            self.config,
            content_format="string",
        )

        prompt_raw = tokenizer.apply_chat_template(
            conversation=conversation,
            messages=messages,
            **kwargs,
        )

        prompt = (
            TextPrompt(prompt=prompt_raw)
            if isinstance(prompt_raw, str)
            else TokensPrompt(prompt_token_ids=prompt_raw)
        )
        if mm_data is not None:
            prompt["multi_modal_data"] = mm_data
        if mm_uuids is not None:
            prompt["multi_modal_uuids"] = mm_uuids

        return conversation, prompt  # type: ignore[return-value]

_tokenizer instance-attribute

_tokenizer = tokenizer

config instance-attribute

config = config

tokenizer property

tokenizer: Grok2Tokenizer | None

__init__

__init__(
    config: ModelConfig, tokenizer_kwargs: dict[str, Any]
) -> None
Source code in vllm/renderers/grok2.py
def __init__(
    self,
    config: ModelConfig,
    tokenizer_kwargs: dict[str, Any],
) -> None:
    super().__init__()

    self.config = config

    if config.skip_tokenizer_init:
        tokenizer = None
    else:
        tokenizer = cached_get_tokenizer(
            tokenizer_cls=Grok2Tokenizer,
            **tokenizer_kwargs,
        )

    self._tokenizer = tokenizer

from_config classmethod

from_config(
    config: ModelConfig, tokenizer_kwargs: dict[str, Any]
) -> RendererLike
Source code in vllm/renderers/grok2.py
@classmethod
def from_config(
    cls,
    config: ModelConfig,
    tokenizer_kwargs: dict[str, Any],
) -> "RendererLike":
    return cls(config, tokenizer_kwargs)

get_tokenizer

get_tokenizer() -> Grok2Tokenizer
Source code in vllm/renderers/grok2.py
def get_tokenizer(self) -> Grok2Tokenizer:
    tokenizer = self.tokenizer
    if tokenizer is None:
        raise ValueError("Tokenizer not available when `skip_tokenizer_init=True`")

    return tokenizer

render_messages

render_messages(
    messages: list[ChatCompletionMessageParam], **kwargs
) -> tuple[
    list[ConversationMessage], TextPrompt | TokensPrompt
]
Source code in vllm/renderers/grok2.py
def render_messages(
    self,
    messages: list[ChatCompletionMessageParam],
    **kwargs,
) -> tuple[list[ConversationMessage], TextPrompt | TokensPrompt]:
    tokenizer = self.get_tokenizer()
    conversation, mm_data, mm_uuids = parse_chat_messages(
        messages,
        self.config,
        content_format="string",
    )

    prompt_raw = tokenizer.apply_chat_template(
        conversation=conversation,
        messages=messages,
        **kwargs,
    )

    prompt = (
        TextPrompt(prompt=prompt_raw)
        if isinstance(prompt_raw, str)
        else TokensPrompt(prompt_token_ids=prompt_raw)
    )
    if mm_data is not None:
        prompt["multi_modal_data"] = mm_data
    if mm_uuids is not None:
        prompt["multi_modal_uuids"] = mm_uuids

    return conversation, prompt  # type: ignore[return-value]

render_messages_async async

render_messages_async(
    messages: list[ChatCompletionMessageParam], **kwargs
) -> tuple[
    list[ConversationMessage], TextPrompt | TokensPrompt
]
Source code in vllm/renderers/grok2.py
async def render_messages_async(
    self,
    messages: list[ChatCompletionMessageParam],
    **kwargs,
) -> tuple[list[ConversationMessage], TextPrompt | TokensPrompt]:
    tokenizer = self.get_tokenizer()
    conversation, mm_data, mm_uuids = await parse_chat_messages_async(
        messages,
        self.config,
        content_format="string",
    )

    prompt_raw = tokenizer.apply_chat_template(
        conversation=conversation,
        messages=messages,
        **kwargs,
    )

    prompt = (
        TextPrompt(prompt=prompt_raw)
        if isinstance(prompt_raw, str)
        else TokensPrompt(prompt_token_ids=prompt_raw)
    )
    if mm_data is not None:
        prompt["multi_modal_data"] = mm_data
    if mm_uuids is not None:
        prompt["multi_modal_uuids"] = mm_uuids

    return conversation, prompt  # type: ignore[return-value]