Skip to content

attention

Classes

fastvideo.attention.AttentionBackend

Bases: ABC

Abstract class for attention backends.

fastvideo.attention.AttentionMetadata dataclass

AttentionMetadata(current_timestep: int)

Attention metadata for prefill and decode batched together.

Functions

fastvideo.attention.AttentionMetadata.asdict_zerocopy
asdict_zerocopy(skip_fields: set[str] | None = None) -> dict[str, Any]

Similar to dataclasses.asdict, but avoids deepcopying.

Source code in fastvideo/attention/backends/abstract.py
def asdict_zerocopy(self, skip_fields: set[str] | None = None) -> dict[str, Any]:
    """Similar to dataclasses.asdict, but avoids deepcopying."""
    if skip_fields is None:
        skip_fields = set()
    # Note that if we add dataclasses as fields, they will need
    # similar handling.
    return {field.name: getattr(self, field.name) for field in fields(self) if field.name not in skip_fields}

fastvideo.attention.AttentionMetadataBuilder

AttentionMetadataBuilder()

Bases: ABC, Generic[T]

Abstract class for attention metadata builders.

Create the builder, remember some configuration and parameters.

Source code in fastvideo/attention/backends/abstract.py
@abstractmethod
def __init__(self) -> None:
    """Create the builder, remember some configuration and parameters."""
    raise NotImplementedError

Functions

fastvideo.attention.AttentionMetadataBuilder.build abstractmethod
build(**kwargs: dict[str, Any]) -> AttentionMetadata

Build attention metadata with on-device tensors.

Source code in fastvideo/attention/backends/abstract.py
@abstractmethod
def build(
    self,
    **kwargs: dict[str, Any],
) -> AttentionMetadata:
    """Build attention metadata with on-device tensors."""
    raise NotImplementedError
fastvideo.attention.AttentionMetadataBuilder.prepare abstractmethod
prepare() -> None

Prepare for one batch.

Source code in fastvideo/attention/backends/abstract.py
@abstractmethod
def prepare(self) -> None:
    """Prepare for one batch."""
    raise NotImplementedError

fastvideo.attention.DistributedAttention

DistributedAttention(num_heads: int, head_size: int, num_kv_heads: int | None = None, softmax_scale: float | None = None, causal: bool = False, supported_attention_backends: tuple[AttentionBackendEnum, ...] | None = None, prefix: str = '', **extra_impl_args)

Bases: Module

Distributed attention layer.

Source code in fastvideo/attention/layer.py
def __init__(self,
             num_heads: int,
             head_size: int,
             num_kv_heads: int | None = None,
             softmax_scale: float | None = None,
             causal: bool = False,
             supported_attention_backends: tuple[AttentionBackendEnum, ...]
             | None = None,
             prefix: str = "",
             **extra_impl_args) -> None:
    super().__init__()
    if softmax_scale is None:
        self.softmax_scale = head_size**-0.5
    else:
        self.softmax_scale = softmax_scale

    if num_kv_heads is None:
        num_kv_heads = num_heads

    dtype = get_compute_dtype()
    attn_backend = get_attn_backend(head_size, dtype, supported_attention_backends=supported_attention_backends)
    impl_cls = attn_backend.get_impl_cls()
    self.attn_impl = impl_cls(num_heads=num_heads,
                              head_size=head_size,
                              causal=causal,
                              softmax_scale=self.softmax_scale,
                              num_kv_heads=num_kv_heads,
                              prefix=f"{prefix}.impl",
                              **extra_impl_args)
    # Register attn_impl as submodule if it has learnable parameters (e.g., SLA's proj_l)
    # This ensures its parameters are included in state_dict() for saving/loading
    if isinstance(self.attn_impl, nn.Module):
        self.add_module('attn_impl', self.attn_impl)
    self.num_heads = num_heads
    self.head_size = head_size
    self.num_kv_heads = num_kv_heads
    self.backend = backend_name_to_enum(attn_backend.get_name())
    self.dtype = dtype

Functions

fastvideo.attention.DistributedAttention.forward
forward(q: Tensor, k: Tensor, v: Tensor, original_seq_len: int | None = None, replicated_q: Tensor | None = None, replicated_k: Tensor | None = None, replicated_v: Tensor | None = None, freqs_cis: tuple[Tensor, Tensor] | None = None) -> tuple[Tensor, Tensor | None]

Forward pass for distributed attention.

Parameters:

Name Type Description Default
q Tensor

Query tensor [batch_size, seq_len, num_heads, head_dim]

required
k Tensor

Key tensor [batch_size, seq_len, num_heads, head_dim]

required
v Tensor

Value tensor [batch_size, seq_len, num_heads, head_dim]

required
original_seq_len int

Original (unpadded) full sequence length

None
replicated_q Optional[Tensor]

Replicated query tensor, typically for text tokens

None
replicated_k Optional[Tensor]

Replicated key tensor

None
replicated_v Optional[Tensor]

Replicated value tensor

None

Returns:

Type Description
tuple[Tensor, Tensor | None]

Tuple[torch.Tensor, Optional[torch.Tensor]]: A tuple containing: - o (torch.Tensor): Output tensor after attention for the main sequence - replicated_o (Optional[torch.Tensor]): Output tensor for replicated tokens, if provided

Source code in fastvideo/attention/layer.py
@torch.compiler.disable
def forward(
    self,
    q: torch.Tensor,
    k: torch.Tensor,
    v: torch.Tensor,
    original_seq_len: int | None = None,
    replicated_q: torch.Tensor | None = None,
    replicated_k: torch.Tensor | None = None,
    replicated_v: torch.Tensor | None = None,
    freqs_cis: tuple[torch.Tensor, torch.Tensor] | None = None,
) -> tuple[torch.Tensor, torch.Tensor | None]:
    """Forward pass for distributed attention.

    Args:
        q (torch.Tensor): Query tensor [batch_size, seq_len, num_heads, head_dim]
        k (torch.Tensor): Key tensor [batch_size, seq_len, num_heads, head_dim]
        v (torch.Tensor): Value tensor [batch_size, seq_len, num_heads, head_dim]
        original_seq_len (int): Original (unpadded) full sequence length
        replicated_q (Optional[torch.Tensor]): Replicated query tensor, typically for text tokens
        replicated_k (Optional[torch.Tensor]): Replicated key tensor
        replicated_v (Optional[torch.Tensor]): Replicated value tensor

    Returns:
        Tuple[torch.Tensor, Optional[torch.Tensor]]: A tuple containing:
            - o (torch.Tensor): Output tensor after attention for the main sequence
            - replicated_o (Optional[torch.Tensor]): Output tensor for replicated tokens, if provided
    """
    # Check input shapes
    assert q.dim() == 4 and k.dim() == 4 and v.dim() == 4, "Expected 4D tensors"
    batch_size, _, num_heads, _ = q.shape
    local_rank = get_sp_parallel_rank()
    world_size = get_sp_world_size()

    forward_context: ForwardContext = get_forward_context()
    ctx_attn_metadata = forward_context.attn_metadata

    # Stack QKV
    qkv = torch.cat([q, k, v], dim=0)  # [3*batch, seq_len, num_heads, head_dim]

    # Redistribute heads across sequence dimension
    qkv = sequence_model_parallel_all_to_all_4D(qkv, scatter_dim=2, gather_dim=1)

    # After all-to-all, each rank has the full sequence but only a subset of heads.
    # Trim away SP padding for attention compute, then pad back before returning.
    original_seq_len = original_seq_len or qkv.shape[1]
    pad_seq_len = qkv.shape[1] - original_seq_len
    qkv = qkv[:, :original_seq_len, :, :]

    if freqs_cis is not None:
        cos, sin = freqs_cis
        qkv[:batch_size * 2] = _apply_rotary_emb(qkv[:batch_size * 2], cos, sin, is_neox_style=False)
    # Apply backend-specific preprocess_qkv
    qkv = self.attn_impl.preprocess_qkv(qkv, ctx_attn_metadata)

    # Concatenate with replicated QKV if provided
    if replicated_q is not None:
        assert replicated_k is not None and replicated_v is not None
        replicated_qkv = torch.cat([replicated_q, replicated_k, replicated_v],
                                   dim=0)  # [3, seq_len, num_heads, head_dim]
        heads_per_rank = num_heads // world_size
        replicated_qkv = replicated_qkv[:, :, local_rank * heads_per_rank:(local_rank + 1) * heads_per_rank]
        qkv = torch.cat([qkv, replicated_qkv], dim=1)

    q, k, v = qkv.chunk(3, dim=0)

    output = self.attn_impl.forward(q, k, v, ctx_attn_metadata)

    # Redistribute back if using sequence parallelism
    replicated_output = None
    if replicated_q is not None:
        split_idx = original_seq_len
        replicated_output = output[:, split_idx:]
        output = output[:, :split_idx]
        # TODO: make this asynchronous
        replicated_output = sequence_model_parallel_all_gather(replicated_output.contiguous(), dim=2)
    # Apply backend-specific postprocess_output
    output = self.attn_impl.postprocess_output(output, ctx_attn_metadata)

    output = torch.nn.functional.pad(output, (0, 0, 0, 0, 0, pad_seq_len))

    output = sequence_model_parallel_all_to_all_4D(output, scatter_dim=1, gather_dim=2)

    return output, replicated_output

fastvideo.attention.DistributedAttention_VSA

DistributedAttention_VSA(num_heads: int, head_size: int, num_kv_heads: int | None = None, softmax_scale: float | None = None, causal: bool = False, supported_attention_backends: tuple[AttentionBackendEnum, ...] | None = None, prefix: str = '', **extra_impl_args)

Bases: DistributedAttention

Distributed attention layer with VSA support.

Source code in fastvideo/attention/layer.py
def __init__(self,
             num_heads: int,
             head_size: int,
             num_kv_heads: int | None = None,
             softmax_scale: float | None = None,
             causal: bool = False,
             supported_attention_backends: tuple[AttentionBackendEnum, ...]
             | None = None,
             prefix: str = "",
             **extra_impl_args) -> None:
    super().__init__()
    if softmax_scale is None:
        self.softmax_scale = head_size**-0.5
    else:
        self.softmax_scale = softmax_scale

    if num_kv_heads is None:
        num_kv_heads = num_heads

    dtype = get_compute_dtype()
    attn_backend = get_attn_backend(head_size, dtype, supported_attention_backends=supported_attention_backends)
    impl_cls = attn_backend.get_impl_cls()
    self.attn_impl = impl_cls(num_heads=num_heads,
                              head_size=head_size,
                              causal=causal,
                              softmax_scale=self.softmax_scale,
                              num_kv_heads=num_kv_heads,
                              prefix=f"{prefix}.impl",
                              **extra_impl_args)
    # Register attn_impl as submodule if it has learnable parameters (e.g., SLA's proj_l)
    # This ensures its parameters are included in state_dict() for saving/loading
    if isinstance(self.attn_impl, nn.Module):
        self.add_module('attn_impl', self.attn_impl)
    self.num_heads = num_heads
    self.head_size = head_size
    self.num_kv_heads = num_kv_heads
    self.backend = backend_name_to_enum(attn_backend.get_name())
    self.dtype = dtype

Functions

fastvideo.attention.DistributedAttention_VSA.forward
forward(q: Tensor, k: Tensor, v: Tensor, original_seq_len: int, replicated_q: Tensor | None = None, replicated_k: Tensor | None = None, replicated_v: Tensor | None = None, gate_compress: Tensor | None = None, freqs_cis: tuple[Tensor, Tensor] | None = None) -> tuple[Tensor, Tensor | None]

Forward pass for distributed attention.

Parameters:

Name Type Description Default
q Tensor

Query tensor [batch_size, seq_len, num_heads, head_dim]

required
k Tensor

Key tensor [batch_size, seq_len, num_heads, head_dim]

required
v Tensor

Value tensor [batch_size, seq_len, num_heads, head_dim]

required
original_seq_len int

Original (unpadded) full sequence length

required
gate_compress Tensor

Gate compress tensor [batch_size, seq_len, num_heads, head_dim]

None
replicated_q Optional[Tensor]

Replicated query tensor, typically for text tokens

None
replicated_k Optional[Tensor]

Replicated key tensor

None
replicated_v Optional[Tensor]

Replicated value tensor

None

Returns:

Type Description
tuple[Tensor, Tensor | None]

Tuple[torch.Tensor, Optional[torch.Tensor]]: A tuple containing: - o (torch.Tensor): Output tensor after attention for the main sequence - replicated_o (Optional[torch.Tensor]): Output tensor for replicated tokens, if provided

Source code in fastvideo/attention/layer.py
@torch.compiler.disable
def forward(
    self,
    q: torch.Tensor,
    k: torch.Tensor,
    v: torch.Tensor,
    original_seq_len: int,
    replicated_q: torch.Tensor | None = None,
    replicated_k: torch.Tensor | None = None,
    replicated_v: torch.Tensor | None = None,
    gate_compress: torch.Tensor | None = None,
    freqs_cis: tuple[torch.Tensor, torch.Tensor] | None = None,
) -> tuple[torch.Tensor, torch.Tensor | None]:
    """Forward pass for distributed attention.

    Args:
        q (torch.Tensor): Query tensor [batch_size, seq_len, num_heads, head_dim]
        k (torch.Tensor): Key tensor [batch_size, seq_len, num_heads, head_dim]
        v (torch.Tensor): Value tensor [batch_size, seq_len, num_heads, head_dim]
        original_seq_len (int): Original (unpadded) full sequence length
        gate_compress (torch.Tensor): Gate compress tensor [batch_size, seq_len, num_heads, head_dim]
        replicated_q (Optional[torch.Tensor]): Replicated query tensor, typically for text tokens
        replicated_k (Optional[torch.Tensor]): Replicated key tensor
        replicated_v (Optional[torch.Tensor]): Replicated value tensor

    Returns:
        Tuple[torch.Tensor, Optional[torch.Tensor]]: A tuple containing:
            - o (torch.Tensor): Output tensor after attention for the main sequence
            - replicated_o (Optional[torch.Tensor]): Output tensor for replicated tokens, if provided
    """
    # Check text tokens are not supported for VSA now
    assert replicated_q is None and replicated_k is None and replicated_v is None, "Replicated QKV is not supported for VSA now"
    # Check input shapes
    assert q.dim() == 4 and k.dim() == 4 and v.dim() == 4, "Expected 4D tensors"

    forward_context: ForwardContext = get_forward_context()
    ctx_attn_metadata = forward_context.attn_metadata

    batch_size, seq_len, num_heads, head_dim = q.shape
    # Stack QKV
    qkvg = torch.cat([q, k, v, gate_compress], dim=0)  # [4*batch, seq_len, num_heads, head_dim]

    # Redistribute heads across sequence dimension
    # Before: [4*batch, shard_seq_len, num_heads, head_dim]
    # After:  [4*batch, full_seq_len, shard_num_heads, head_dim]
    qkvg = sequence_model_parallel_all_to_all_4D(qkvg, scatter_dim=2, gather_dim=1)

    # After all-to-all, each rank has the full sequence but only a subset of heads
    pad_seq_len = qkvg.shape[1] - original_seq_len
    qkvg = qkvg[:, :original_seq_len, :, :]

    if freqs_cis is not None:
        cos, sin = freqs_cis
        qkvg[:batch_size * 2] = _apply_rotary_emb(qkvg[:batch_size * 2], cos, sin, is_neox_style=False)

    qkvg = self.attn_impl.preprocess_qkv(qkvg, ctx_attn_metadata)

    q, k, v, gate_compress = qkvg.chunk(4, dim=0)
    output = self.attn_impl.forward(q, k, v, gate_compress, ctx_attn_metadata)  # type: ignore[call-arg]

    # Redistribute back if using sequence parallelism
    replicated_output = None

    # Apply backend-specific postprocess_output
    output = self.attn_impl.postprocess_output(output, ctx_attn_metadata)

    output = torch.nn.functional.pad(output, (0, 0, 0, 0, 0, pad_seq_len))

    output = sequence_model_parallel_all_to_all_4D(output, scatter_dim=1, gather_dim=2)
    return output, replicated_output

fastvideo.attention.LocalAttention

LocalAttention(num_heads: int, head_size: int, num_kv_heads: int | None = None, softmax_scale: float | None = None, causal: bool = False, supported_attention_backends: tuple[AttentionBackendEnum, ...] | None = None, **extra_impl_args)

Bases: Module

Attention layer.

Source code in fastvideo/attention/layer.py
def __init__(self,
             num_heads: int,
             head_size: int,
             num_kv_heads: int | None = None,
             softmax_scale: float | None = None,
             causal: bool = False,
             supported_attention_backends: tuple[AttentionBackendEnum, ...]
             | None = None,
             **extra_impl_args) -> None:
    super().__init__()
    if softmax_scale is None:
        self.softmax_scale = head_size**-0.5
    else:
        self.softmax_scale = softmax_scale
    if num_kv_heads is None:
        num_kv_heads = num_heads

    dtype = get_compute_dtype()
    attn_backend = get_attn_backend(head_size, dtype, supported_attention_backends=supported_attention_backends)
    impl_cls = attn_backend.get_impl_cls()
    self.attn_impl = impl_cls(num_heads=num_heads,
                              head_size=head_size,
                              softmax_scale=self.softmax_scale,
                              num_kv_heads=num_kv_heads,
                              causal=causal,
                              **extra_impl_args)
    self.num_heads = num_heads
    self.head_size = head_size
    self.num_kv_heads = num_kv_heads
    self.backend = backend_name_to_enum(attn_backend.get_name())
    self.dtype = dtype

Functions

fastvideo.attention.LocalAttention.forward
forward(q: Tensor, k: Tensor, v: Tensor, freqs_cis: tuple[Tensor, Tensor] | None = None) -> Tensor

Apply local attention between query, key and value tensors.

Parameters:

Name Type Description Default
q Tensor

Query tensor of shape [batch_size, seq_len, num_heads, head_dim]

required
k Tensor

Key tensor of shape [batch_size, seq_len, num_heads, head_dim]

required
v Tensor

Value tensor of shape [batch_size, seq_len, num_heads, head_dim]

required

Returns:

Type Description
Tensor

torch.Tensor: Output tensor after local attention

Source code in fastvideo/attention/layer.py
def forward(
    self,
    q: torch.Tensor,
    k: torch.Tensor,
    v: torch.Tensor,
    freqs_cis: tuple[torch.Tensor, torch.Tensor] | None = None,
) -> torch.Tensor:
    """
    Apply local attention between query, key and value tensors.

    Args:
        q (torch.Tensor): Query tensor of shape [batch_size, seq_len, num_heads, head_dim]
        k (torch.Tensor): Key tensor of shape [batch_size, seq_len, num_heads, head_dim] 
        v (torch.Tensor): Value tensor of shape [batch_size, seq_len, num_heads, head_dim]

    Returns:
        torch.Tensor: Output tensor after local attention
    """
    # Check input shapes
    assert q.dim() == 4 and k.dim() == 4 and v.dim() == 4, "Expected 4D tensors"

    forward_context: ForwardContext = get_forward_context()
    ctx_attn_metadata = forward_context.attn_metadata

    if freqs_cis is not None:
        cos, sin = freqs_cis
        q = _apply_rotary_emb(q, cos, sin, is_neox_style=False)
        k = _apply_rotary_emb(k, cos, sin, is_neox_style=False)

    output = self.attn_impl.forward(q, k, v, ctx_attn_metadata)
    return output

Modules

fastvideo.attention.backends

Modules

fastvideo.attention.backends.abstract
Classes
fastvideo.attention.backends.abstract.AttentionBackend

Bases: ABC

Abstract class for attention backends.

fastvideo.attention.backends.abstract.AttentionImpl
AttentionImpl(num_heads: int, head_size: int, softmax_scale: float, causal: bool = False, num_kv_heads: int | None = None, prefix: str = '', **extra_impl_args)

Bases: ABC, Generic[T]

Source code in fastvideo/attention/backends/abstract.py
@abstractmethod
def __init__(
    self,
    num_heads: int,
    head_size: int,
    softmax_scale: float,
    causal: bool = False,
    num_kv_heads: int | None = None,
    prefix: str = "",
    **extra_impl_args,
) -> None:
    raise NotImplementedError
Functions
fastvideo.attention.backends.abstract.AttentionImpl.postprocess_output
postprocess_output(output: Tensor, attn_metadata: T) -> Tensor

Postprocess the output tensor after the attention operation.

Default implementation returns the tensor unchanged. Subclasses can override this to implement custom postprocessing like untiling, scaling, or other transformations.

Called BEFORE all_to_all for distributed attention

Parameters:

Name Type Description Default
output Tensor

The output tensor from the attention operation

required
attn_metadata T

Metadata for the attention operation

required

Returns:

Type Description
Tensor

Postprocessed output tensor

Source code in fastvideo/attention/backends/abstract.py
def postprocess_output(
    self,
    output: torch.Tensor,
    attn_metadata: T,
) -> torch.Tensor:
    """Postprocess the output tensor after the attention operation.

    Default implementation returns the tensor unchanged.
    Subclasses can override this to implement custom postprocessing
    like untiling, scaling, or other transformations.

    Called BEFORE all_to_all for distributed attention

    Args:
        output: The output tensor from the attention operation
        attn_metadata: Metadata for the attention operation

    Returns:
        Postprocessed output tensor
    """

    return output
fastvideo.attention.backends.abstract.AttentionImpl.preprocess_qkv
preprocess_qkv(qkv: Tensor, attn_metadata: T) -> Tensor

Preprocess QKV tensor before performing attention operation.

Default implementation returns the tensor unchanged. Subclasses can override this to implement custom preprocessing like reshaping, tiling, scaling, or other transformations.

Called AFTER all_to_all for distributed attention

Parameters:

Name Type Description Default
qkv Tensor

The query-key-value tensor

required
attn_metadata T

Metadata for the attention operation

required

Returns:

Type Description
Tensor

Processed QKV tensor

Source code in fastvideo/attention/backends/abstract.py
def preprocess_qkv(self, qkv: torch.Tensor, attn_metadata: T) -> torch.Tensor:
    """Preprocess QKV tensor before performing attention operation.

    Default implementation returns the tensor unchanged.
    Subclasses can override this to implement custom preprocessing
    like reshaping, tiling, scaling, or other transformations.

    Called AFTER all_to_all for distributed attention

    Args:
        qkv: The query-key-value tensor
        attn_metadata: Metadata for the attention operation

    Returns:
        Processed QKV tensor
    """
    return qkv
fastvideo.attention.backends.abstract.AttentionMetadata dataclass
AttentionMetadata(current_timestep: int)

Attention metadata for prefill and decode batched together.

Functions
fastvideo.attention.backends.abstract.AttentionMetadata.asdict_zerocopy
asdict_zerocopy(skip_fields: set[str] | None = None) -> dict[str, Any]

Similar to dataclasses.asdict, but avoids deepcopying.

Source code in fastvideo/attention/backends/abstract.py
def asdict_zerocopy(self, skip_fields: set[str] | None = None) -> dict[str, Any]:
    """Similar to dataclasses.asdict, but avoids deepcopying."""
    if skip_fields is None:
        skip_fields = set()
    # Note that if we add dataclasses as fields, they will need
    # similar handling.
    return {field.name: getattr(self, field.name) for field in fields(self) if field.name not in skip_fields}
fastvideo.attention.backends.abstract.AttentionMetadataBuilder
AttentionMetadataBuilder()

Bases: ABC, Generic[T]

Abstract class for attention metadata builders.

Create the builder, remember some configuration and parameters.

Source code in fastvideo/attention/backends/abstract.py
@abstractmethod
def __init__(self) -> None:
    """Create the builder, remember some configuration and parameters."""
    raise NotImplementedError
Functions
fastvideo.attention.backends.abstract.AttentionMetadataBuilder.build abstractmethod
build(**kwargs: dict[str, Any]) -> AttentionMetadata

Build attention metadata with on-device tensors.

Source code in fastvideo/attention/backends/abstract.py
@abstractmethod
def build(
    self,
    **kwargs: dict[str, Any],
) -> AttentionMetadata:
    """Build attention metadata with on-device tensors."""
    raise NotImplementedError
fastvideo.attention.backends.abstract.AttentionMetadataBuilder.prepare abstractmethod
prepare() -> None

Prepare for one batch.

Source code in fastvideo/attention/backends/abstract.py
@abstractmethod
def prepare(self) -> None:
    """Prepare for one batch."""
    raise NotImplementedError
fastvideo.attention.backends.sla
Classes
fastvideo.attention.backends.sla.SLAAttentionBackend

Bases: AttentionBackend

Sparse-Linear Attention backend.

fastvideo.attention.backends.sla.SLAAttentionImpl
SLAAttentionImpl(num_heads: int, head_size: int, causal: bool = False, softmax_scale: float | None = None, num_kv_heads: int | None = None, prefix: str = '', topk_ratio: float = 0.1, feature_map: str = 'softmax', BLKQ: int = 128, BLKK: int = 64, use_bf16: bool = True, **extra_impl_args)

Bases: AttentionImpl, Module

SLA attention implementation with learnable linear projection.

This implementation combines sparse attention with linear attention, using a learnable projection to blend the outputs. The sparse attention uses a block-sparse pattern determined by QK similarity.

Parameters:

Name Type Description Default
num_heads int

Number of attention heads

required
head_size int

Dimension of each head

required
topk_ratio float

Ratio of key blocks to attend to (0-1), default 0.5

0.1
feature_map str

Feature map for linear attention ('softmax', 'elu', 'relu')

'softmax'
BLKQ int

Query block size for sparse attention

128
BLKK int

Key block size for sparse attention

64
use_bf16 bool

Whether to use bfloat16 for computation

True
Source code in fastvideo/attention/backends/sla.py
def __init__(
    self,
    num_heads: int,
    head_size: int,
    causal: bool = False,
    softmax_scale: float | None = None,
    num_kv_heads: int | None = None,
    prefix: str = "",
    # SLA-specific parameters - matched to TurboDiffusion defaults
    topk_ratio: float = 0.1,  # TurboDiffusion uses topk=0.1
    feature_map: str = "softmax",
    BLKQ: int = 128,  # TurboDiffusion uses BLKQ=128
    BLKK: int = 64,  # TurboDiffusion uses BLKK=64
    use_bf16: bool = True,
    **extra_impl_args,
) -> None:
    nn.Module.__init__(self)

    self.num_heads = num_heads
    self.head_size = head_size
    self.softmax_scale = softmax_scale if softmax_scale else head_size**-0.5
    self.causal = causal
    self.prefix = prefix

    # SLA-specific config
    self.topk_ratio = topk_ratio
    self.BLKQ = BLKQ
    self.BLKK = BLKK
    self.dtype = torch.bfloat16 if use_bf16 else torch.float16

    # Learnable linear projection for combining sparse + linear attention
    self.proj_l = nn.Linear(head_size, head_size, dtype=torch.float32)

    # Feature map for linear attention
    # Type annotation for callables
    self.feature_map_q: Callable[[torch.Tensor], torch.Tensor]
    self.feature_map_k: Callable[[torch.Tensor], torch.Tensor]
    if feature_map == "elu":
        self.feature_map_q = lambda x: F.elu(x) + 1
        self.feature_map_k = lambda x: F.elu(x) + 1
    elif feature_map == "relu":
        self.feature_map_q = F.relu
        self.feature_map_k = F.relu
    elif feature_map == "softmax":
        self.feature_map_q = lambda x: F.softmax(x, dim=-1)
        self.feature_map_k = lambda x: F.softmax(x, dim=-1)
    else:
        raise ValueError(f"Unknown feature map: {feature_map}")

    self._init_weights()
Functions
fastvideo.attention.backends.sla.SLAAttentionImpl.forward
forward(query: Tensor, key: Tensor, value: Tensor, attn_metadata: AttentionMetadata) -> Tensor

Forward pass for SLA attention.

Input tensors are in FastVideo format: (B, L, H, D) Internally converted to SLA format: (B, H, L, D)

Parameters:

Name Type Description Default
query Tensor

Query tensor (B, L, H, D)

required
key Tensor

Key tensor (B, L, H, D)

required
value Tensor

Value tensor (B, L, H, D)

required
attn_metadata AttentionMetadata

Attention metadata

required

Returns:

Type Description
Tensor

Output tensor (B, L, H, D)

Source code in fastvideo/attention/backends/sla.py
def forward(
    self,
    query: torch.Tensor,
    key: torch.Tensor,
    value: torch.Tensor,
    attn_metadata: AttentionMetadata,
) -> torch.Tensor:
    """Forward pass for SLA attention.

    Input tensors are in FastVideo format: (B, L, H, D)
    Internally converted to SLA format: (B, H, L, D)

    Args:
        query: Query tensor (B, L, H, D)
        key: Key tensor (B, L, H, D)
        value: Value tensor (B, L, H, D)
        attn_metadata: Attention metadata

    Returns:
        Output tensor (B, L, H, D)
    """
    original_dtype = query.dtype

    # Convert from FastVideo format (B, L, H, D) to SLA format (B, H, L, D)
    q = query.transpose(1, 2).contiguous()
    k = key.transpose(1, 2).contiguous()
    v = value.transpose(1, 2).contiguous()

    # Get topk ratio from metadata if available
    topk_ratio = self.topk_ratio
    if hasattr(attn_metadata, 'topk_ratio'):
        topk_ratio = attn_metadata.topk_ratio  # type: ignore[union-attr]

    # Compute block-sparse attention pattern
    sparse_map, lut, real_topk = get_block_map(q, k, topk_ratio=topk_ratio, BLKQ=self.BLKQ, BLKK=self.BLKK)

    # Convert to compute dtype
    q = q.to(self.dtype)
    k = k.to(self.dtype)
    v = v.to(self.dtype)

    # Sparse attention
    o_s = _attention.apply(q, k, v, sparse_map, lut, real_topk, self.BLKQ, self.BLKK)

    # Linear attention with feature maps
    q_linear = self.feature_map_q(q).contiguous().to(self.dtype)
    k_linear = self.feature_map_k(k).contiguous().to(self.dtype)
    o_l = self._calc_linear_attention(q_linear, k_linear, v)

    # Project linear attention output and combine
    with torch.amp.autocast('cuda', dtype=self.dtype):
        o_l = self.proj_l(o_l)

    # Combine sparse and linear outputs
    output = (o_s + o_l).to(original_dtype)

    # Convert back to FastVideo format (B, L, H, D)
    output = output.transpose(1, 2)

    return output
fastvideo.attention.backends.sla.SLAAttentionMetadata dataclass
SLAAttentionMetadata(current_timestep: int, topk_ratio: float = 0.5)

Bases: AttentionMetadata

Metadata for SLA attention.

fastvideo.attention.backends.sla.SLAAttentionMetadataBuilder
SLAAttentionMetadataBuilder()

Bases: AttentionMetadataBuilder

Builder for SLA attention metadata.

Source code in fastvideo/attention/backends/sla.py
def __init__(self) -> None:
    pass
fastvideo.attention.backends.sla.SageSLAAttentionBackend

Bases: AttentionBackend

Quantized Sparse-Linear Attention backend using SageAttention kernels.

fastvideo.attention.backends.sla.SageSLAAttentionImpl
SageSLAAttentionImpl(num_heads: int, head_size: int, causal: bool = False, softmax_scale: float | None = None, num_kv_heads: int | None = None, prefix: str = '', topk_ratio: float = 0.5, feature_map: str = 'softmax', use_bf16: bool = True, **extra_impl_args)

Bases: AttentionImpl, Module

SageSLA attention implementation using quantized SageAttention kernels.

This uses INT8 quantization for Q/K and FP8 for V to achieve better performance while maintaining accuracy. Requires spas_sage_attn package.

Parameters:

Name Type Description Default
num_heads int

Number of attention heads

required
head_size int

Dimension of each head (must be 64 or 128)

required
topk_ratio float

Ratio of key blocks to attend to (0-1), default 0.5

0.5
feature_map str

Feature map for linear attention ('softmax', 'elu', 'relu')

'softmax'
use_bf16 bool

Whether to use bfloat16 for computation

True
Source code in fastvideo/attention/backends/sla.py
def __init__(
    self,
    num_heads: int,
    head_size: int,
    causal: bool = False,
    softmax_scale: float | None = None,
    num_kv_heads: int | None = None,
    prefix: str = "",
    # SageSLA-specific parameters
    topk_ratio: float = 0.5,
    feature_map: str = "softmax",
    use_bf16: bool = True,
    **extra_impl_args,
) -> None:
    nn.Module.__init__(self)

    if not SAGESLA_ENABLED:
        raise ImportError("SageSLA requires spas_sage_attn. "
                          "Install with: pip install git+https://github.com/thu-ml/SpargeAttn.git")

    assert head_size in [64, 128], f"SageSLA requires head_size in [64, 128], got {head_size}"

    self.num_heads = num_heads
    self.head_size = head_size
    self.softmax_scale = softmax_scale if softmax_scale else head_size**-0.5
    self.causal = causal
    self.prefix = prefix

    # SageSLA-specific config
    self.topk_ratio = topk_ratio
    self.dtype = torch.bfloat16 if use_bf16 else torch.float16

    # Learnable linear projection for combining sparse + linear attention
    self.proj_l = nn.Linear(head_size, head_size, dtype=torch.float32)

    # Feature map for linear attention
    # Type annotation for callables
    self.feature_map_q: Callable[[torch.Tensor], torch.Tensor]
    self.feature_map_k: Callable[[torch.Tensor], torch.Tensor]
    if feature_map == "elu":
        self.feature_map_q = lambda x: F.elu(x) + 1
        self.feature_map_k = lambda x: F.elu(x) + 1
    elif feature_map == "relu":
        self.feature_map_q = F.relu
        self.feature_map_k = F.relu
    elif feature_map == "softmax":
        self.feature_map_q = lambda x: F.softmax(x, dim=-1)
        self.feature_map_k = lambda x: F.softmax(x, dim=-1)
    else:
        raise ValueError(f"Unknown feature map: {feature_map}")

    self._init_weights()
Functions
fastvideo.attention.backends.sla.SageSLAAttentionImpl.forward
forward(query: Tensor, key: Tensor, value: Tensor, attn_metadata: AttentionMetadata) -> Tensor

Forward pass for SageSLA attention with quantized kernels.

Input tensors are in FastVideo format: (B, L, H, D)

Parameters:

Name Type Description Default
query Tensor

Query tensor (B, L, H, D)

required
key Tensor

Key tensor (B, L, H, D)

required
value Tensor

Value tensor (B, L, H, D)

required
attn_metadata AttentionMetadata

Attention metadata

required

Returns:

Type Description
Tensor

Output tensor (B, L, H, D)

Source code in fastvideo/attention/backends/sla.py
def forward(
    self,
    query: torch.Tensor,
    key: torch.Tensor,
    value: torch.Tensor,
    attn_metadata: AttentionMetadata,
) -> torch.Tensor:
    """Forward pass for SageSLA attention with quantized kernels.

    Input tensors are in FastVideo format: (B, L, H, D)

    Args:
        query: Query tensor (B, L, H, D)
        key: Key tensor (B, L, H, D)
        value: Value tensor (B, L, H, D)
        attn_metadata: Attention metadata

    Returns:
        Output tensor (B, L, H, D)
    """
    original_dtype = query.dtype

    # Convert from FastVideo format (B, L, H, D) to SLA format (B, H, L, D)
    q = query.transpose(1, 2).contiguous()
    k = key.transpose(1, 2).contiguous()
    v = value.transpose(1, 2).contiguous()

    # Get topk ratio from metadata if available
    topk_ratio = self.topk_ratio
    if hasattr(attn_metadata, 'topk_ratio'):
        topk_ratio = attn_metadata.topk_ratio  # type: ignore[union-attr]

    # Determine block sizes based on GPU architecture
    arch = _get_cuda_arch(q.device.index)
    if arch == "sm90":
        BLKQ, BLKK = 64, 128
    else:
        BLKQ, BLKK = 128, 64

    # Compute block-sparse attention pattern
    sparse_map, lut, real_topk = get_block_map(q, k, topk_ratio=topk_ratio, BLKQ=BLKQ, BLKK=BLKK)

    # Convert to compute dtype
    q = q.to(self.dtype)
    k = k.to(self.dtype)
    v = v.to(self.dtype)

    # ========== SPARGE QUANTIZED ATTENTION ==========
    km = k.mean(dim=-2, keepdim=True)
    headdim = q.size(-1)
    scale = 1.0 / (headdim**0.5)

    # Quantize Q, K to INT8
    q_int8, q_scale, k_int8, k_scale = get_vanilla_qk_quant(q, k, km, BLKQ, BLKK)
    lut_triton, valid_block_num = block_map_lut_triton(sparse_map)

    # Quantize V to FP8
    b, h_kv, kv_len, head_dim = v.shape
    padded_len = (kv_len + 127) // 128 * 128
    v_transposed_permutted = torch.empty((b, h_kv, head_dim, padded_len), dtype=v.dtype, device=v.device)
    fused.transpose_pad_permute_cuda(v, v_transposed_permutted, 1)
    v_fp8 = torch.empty(v_transposed_permutted.shape, dtype=torch.float8_e4m3fn, device=v.device)
    v_scale = torch.empty((b, h_kv, head_dim), dtype=torch.float32, device=v.device)
    fused.scale_fuse_quant_cuda(v_transposed_permutted, v_fp8, v_scale, kv_len, 2.25, 1)

    # Sparse attention with quantized kernels
    o_s = torch.empty_like(q)
    if arch == "sm90":
        qattn.qk_int8_sv_f8_accum_f32_block_sparse_attn_inst_buf_fuse_v_scale_sm90(
            q_int8, k_int8, v_fp8, o_s, lut_triton, valid_block_num, q_scale, k_scale, v_scale, 1, False, 1, scale)
    else:
        pvthreshold = torch.full((q.shape[-3], ), 1e6, dtype=torch.float32, device=q.device)
        if SAGE2PP_ENABLED:
            qk_int8_sv_f8_accum_f16_block_sparse_attn_inst_buf_fuse_v_scale_with_pv_threshold(
                q_int8, k_int8, v_fp8, o_s, lut_triton, valid_block_num, pvthreshold, q_scale, k_scale, v_scale, 1,
                False, 1, scale, 0)
        else:
            qattn.qk_int8_sv_f8_accum_f32_block_sparse_attn_inst_buf_fuse_v_scale_with_pv_threshold(
                q_int8, k_int8, v_fp8, o_s, lut_triton, valid_block_num, pvthreshold, q_scale, k_scale, v_scale, 1,
                False, 1, scale, 0)
    # ========== END SPARGE ==========

    # Linear attention with feature maps
    q_linear = self.feature_map_q(q).contiguous().to(self.dtype)
    k_linear = self.feature_map_k(k).contiguous().to(self.dtype)
    o_l = self._calc_linear_attention(q_linear, k_linear, v)

    # Project linear attention output and combine
    with torch.amp.autocast('cuda', dtype=self.dtype):
        o_l = self.proj_l(o_l)

    # Combine sparse and linear outputs
    output = (o_s + o_l).to(original_dtype)

    # Convert back to FastVideo format (B, L, H, D)
    output = output.transpose(1, 2)

    return output
Functions
fastvideo.attention.backends.sla.get_block_map
get_block_map(q: Tensor, k: Tensor, topk_ratio: float, BLKQ: int = 64, BLKK: int = 64) -> tuple[Tensor, Tensor, int]

Compute sparse block map for attention based on QK similarity.

Parameters:

Name Type Description Default
q Tensor

Query tensor of shape (B, H, L, D)

required
k Tensor

Key tensor of shape (B, H, L, D)

required
topk_ratio float

Ratio of key blocks to attend to (0-1)

required
BLKQ int

Query block size

64
BLKK int

Key block size

64

Returns:

Name Type Description
sparse_map Tensor

Binary mask of shape (B, H, num_q_blocks, num_k_blocks)

lut Tensor

Top-k indices of shape (B, H, num_q_blocks, topk)

topk int

Number of key blocks selected

Source code in fastvideo/attention/backends/sla.py
def get_block_map(
    q: torch.Tensor,
    k: torch.Tensor,
    topk_ratio: float,
    BLKQ: int = 64,
    BLKK: int = 64,
) -> tuple[torch.Tensor, torch.Tensor, int]:
    """Compute sparse block map for attention based on QK similarity.

    Args:
        q: Query tensor of shape (B, H, L, D)
        k: Key tensor of shape (B, H, L, D)
        topk_ratio: Ratio of key blocks to attend to (0-1)
        BLKQ: Query block size
        BLKK: Key block size

    Returns:
        sparse_map: Binary mask of shape (B, H, num_q_blocks, num_k_blocks)
        lut: Top-k indices of shape (B, H, num_q_blocks, topk)
        topk: Number of key blocks selected
    """
    arg_k = k - torch.mean(k, dim=-2, keepdim=True)  # smooth-k technique from SageAttention
    pooled_qblocks = mean_pool(q, BLKQ)
    pooled_kblocks = mean_pool(arg_k, BLKK)
    pooled_score = pooled_qblocks @ pooled_kblocks.transpose(-1, -2)

    K = pooled_score.shape[-1]
    topk = min(K, int(topk_ratio * K))
    lut = torch.topk(pooled_score, topk, dim=-1, sorted=False).indices

    sparse_map = torch.zeros_like(pooled_score, dtype=torch.int8)
    sparse_map.scatter_(-1, lut, 1)
    return sparse_map, lut, topk
fastvideo.attention.backends.sla.mean_pool
mean_pool(x: Tensor, BLK: int) -> Tensor

Mean pool tensor along sequence dimension with block size BLK.

Source code in fastvideo/attention/backends/sla.py
def mean_pool(x: torch.Tensor, BLK: int) -> torch.Tensor:
    """Mean pool tensor along sequence dimension with block size BLK."""
    assert x.is_contiguous()

    B, H, L, D = x.shape
    L_BLOCKS = (L + BLK - 1) // BLK
    x_mean = torch.empty((B, H, L_BLOCKS, D), device=x.device, dtype=x.dtype)

    grid = (L_BLOCKS, B * H)
    compress_kernel[grid](x, x_mean, L, D, BLK)
    return x_mean
fastvideo.attention.backends.video_sparse_attn
Classes
Functions
fastvideo.attention.backends.video_sparse_attn.construct_variable_block_sizes cached
construct_variable_block_sizes(dit_seq_shape: tuple[int, int, int], num_tiles: tuple[int, int, int], device: device) -> LongTensor

Compute the number of valid (non‑padded) tokens inside every (ts_t × ts_h × ts_w) tile after padding ‑‑ flattened in the order (t‑tile, h‑tile, w‑tile) that rearrange uses.

Returns

torch.LongTensor # shape: [∏ full_window_size]

Source code in fastvideo/attention/backends/video_sparse_attn.py
@functools.lru_cache(maxsize=10)
def construct_variable_block_sizes(
    dit_seq_shape: tuple[int, int, int],
    num_tiles: tuple[int, int, int],
    device: torch.device,
) -> torch.LongTensor:
    """
    Compute the number of valid (non‑padded) tokens inside every
    (ts_t × ts_h × ts_w) tile after padding ‑‑ flattened in the order
    (t‑tile, h‑tile, w‑tile) that `rearrange` uses.

    Returns
    -------
    torch.LongTensor  # shape: [∏ full_window_size]
    """
    # unpack
    t, h, w = dit_seq_shape
    ts_t, ts_h, ts_w = VSA_TILE_SIZE
    n_t, n_h, n_w = num_tiles

    def _sizes(dim_len: int, tile: int, n_tiles: int) -> torch.LongTensor:
        """Vector with the size of each tile along one dimension."""
        sizes = torch.full((n_tiles, ), tile, dtype=torch.int, device=device)
        # size of last (possibly partial) tile
        remainder = dim_len - (n_tiles - 1) * tile
        sizes[-1] = remainder if remainder > 0 else tile
        return sizes

    t_sizes = _sizes(t, ts_t, n_t)  # [n_t]
    h_sizes = _sizes(h, ts_h, n_h)  # [n_h]
    w_sizes = _sizes(w, ts_w, n_w)  # [n_w]

    # broadcast‑multiply to get voxels per tile, then flatten
    block_sizes = (
        t_sizes[:, None, None]  # [n_t, 1,   1]
        * h_sizes[None, :, None]  # [1,   n_h, 1]
        * w_sizes[None, None, :]  # [1,   1,   n_w]
    ).reshape(-1)  # [n_t * n_h * n_w]

    return block_sizes
fastvideo.attention.backends.vmoba
Classes
fastvideo.attention.backends.vmoba.VMOBAAttentionImpl
VMOBAAttentionImpl(num_heads, head_size, softmax_scale, causal=False, num_kv_heads=None, prefix='', **extra_impl_args)

Bases: AttentionImpl

Source code in fastvideo/attention/backends/vmoba.py
def __init__(self,
             num_heads,
             head_size,
             softmax_scale,
             causal=False,
             num_kv_heads=None,
             prefix="",
             **extra_impl_args) -> None:
    self.prefix = prefix
    self.layer_idx = self._get_layer_idx(prefix)
    from flash_attn.bert_padding import pad_input
    self.pad_input = pad_input
Functions
fastvideo.attention.backends.vmoba.VMOBAAttentionImpl.forward
forward(query: Tensor, key: Tensor, value: Tensor, attn_metadata: AttentionMetadata) -> Tensor

query: [B, L, H, D] key: [B, L, H, D] value: [B, L, H, D] attn_metadata: AttentionMetadata

Source code in fastvideo/attention/backends/vmoba.py
def forward(
    self,
    query: torch.Tensor,
    key: torch.Tensor,
    value: torch.Tensor,
    attn_metadata: AttentionMetadata,
) -> torch.Tensor:
    """
    query: [B, L, H, D]
    key:   [B, L, H, D]
    value: [B, L, H, D]
    attn_metadata: AttentionMetadata
    """
    batch_size, sequence_length, num_heads, head_dim = query.shape

    # select chunk type according to layer idx:
    loop_layer_num = attn_metadata.temporal_layer + attn_metadata.spatial_layer + attn_metadata.st_layer
    moba_layer = self.layer_idx - attn_metadata.first_full_layer
    if moba_layer % loop_layer_num < attn_metadata.temporal_layer:
        moba_chunk_size = attn_metadata.temporal_chunk_size
        moba_topk = attn_metadata.temporal_topk
    elif moba_layer % loop_layer_num < attn_metadata.temporal_layer + attn_metadata.spatial_layer:
        moba_chunk_size = attn_metadata.spatial_chunk_size
        moba_topk = attn_metadata.spatial_topk
    elif moba_layer % loop_layer_num < attn_metadata.temporal_layer + attn_metadata.spatial_layer + attn_metadata.st_layer:
        moba_chunk_size = attn_metadata.st_chunk_size
        moba_topk = attn_metadata.st_topk

    query, chunk_size = process_moba_input(query, attn_metadata.patch_resolution, moba_chunk_size)
    key, chunk_size = process_moba_input(key, attn_metadata.patch_resolution, moba_chunk_size)
    value, chunk_size = process_moba_input(value, attn_metadata.patch_resolution, moba_chunk_size)
    max_seqlen = query.shape[1]
    indices_q = torch.arange(0, query.shape[0] * query.shape[1], device=query.device)
    cu_seqlens = torch.arange(0,
                              query.shape[0] * query.shape[1] + 1,
                              query.shape[1],
                              dtype=torch.int32,
                              device=query.device)
    query = rearrange(query, "b s ... -> (b s) ...")
    key = rearrange(key, "b s ... -> (b s) ...")
    value = rearrange(value, "b s ... -> (b s) ...")

    # current_timestep=attn_metadata.current_timestep
    hidden_states = moba_attn_varlen(
        query,
        key,
        value,
        cu_seqlens=cu_seqlens,
        max_seqlen=max_seqlen,
        moba_chunk_size=chunk_size,
        moba_topk=moba_topk,
        select_mode=attn_metadata.moba_select_mode,
        simsum_threshold=attn_metadata.moba_threshold,
        threshold_type=attn_metadata.moba_threshold_type,
    )
    hidden_states = self.pad_input(hidden_states, indices_q, batch_size, sequence_length)
    hidden_states = process_moba_output(hidden_states, attn_metadata.patch_resolution, moba_chunk_size)

    return hidden_states
Functions

fastvideo.attention.layer

Classes

fastvideo.attention.layer.DistributedAttention
DistributedAttention(num_heads: int, head_size: int, num_kv_heads: int | None = None, softmax_scale: float | None = None, causal: bool = False, supported_attention_backends: tuple[AttentionBackendEnum, ...] | None = None, prefix: str = '', **extra_impl_args)

Bases: Module

Distributed attention layer.

Source code in fastvideo/attention/layer.py
def __init__(self,
             num_heads: int,
             head_size: int,
             num_kv_heads: int | None = None,
             softmax_scale: float | None = None,
             causal: bool = False,
             supported_attention_backends: tuple[AttentionBackendEnum, ...]
             | None = None,
             prefix: str = "",
             **extra_impl_args) -> None:
    super().__init__()
    if softmax_scale is None:
        self.softmax_scale = head_size**-0.5
    else:
        self.softmax_scale = softmax_scale

    if num_kv_heads is None:
        num_kv_heads = num_heads

    dtype = get_compute_dtype()
    attn_backend = get_attn_backend(head_size, dtype, supported_attention_backends=supported_attention_backends)
    impl_cls = attn_backend.get_impl_cls()
    self.attn_impl = impl_cls(num_heads=num_heads,
                              head_size=head_size,
                              causal=causal,
                              softmax_scale=self.softmax_scale,
                              num_kv_heads=num_kv_heads,
                              prefix=f"{prefix}.impl",
                              **extra_impl_args)
    # Register attn_impl as submodule if it has learnable parameters (e.g., SLA's proj_l)
    # This ensures its parameters are included in state_dict() for saving/loading
    if isinstance(self.attn_impl, nn.Module):
        self.add_module('attn_impl', self.attn_impl)
    self.num_heads = num_heads
    self.head_size = head_size
    self.num_kv_heads = num_kv_heads
    self.backend = backend_name_to_enum(attn_backend.get_name())
    self.dtype = dtype
Functions
fastvideo.attention.layer.DistributedAttention.forward
forward(q: Tensor, k: Tensor, v: Tensor, original_seq_len: int | None = None, replicated_q: Tensor | None = None, replicated_k: Tensor | None = None, replicated_v: Tensor | None = None, freqs_cis: tuple[Tensor, Tensor] | None = None) -> tuple[Tensor, Tensor | None]

Forward pass for distributed attention.

Parameters:

Name Type Description Default
q Tensor

Query tensor [batch_size, seq_len, num_heads, head_dim]

required
k Tensor

Key tensor [batch_size, seq_len, num_heads, head_dim]

required
v Tensor

Value tensor [batch_size, seq_len, num_heads, head_dim]

required
original_seq_len int

Original (unpadded) full sequence length

None
replicated_q Optional[Tensor]

Replicated query tensor, typically for text tokens

None
replicated_k Optional[Tensor]

Replicated key tensor

None
replicated_v Optional[Tensor]

Replicated value tensor

None

Returns:

Type Description
tuple[Tensor, Tensor | None]

Tuple[torch.Tensor, Optional[torch.Tensor]]: A tuple containing: - o (torch.Tensor): Output tensor after attention for the main sequence - replicated_o (Optional[torch.Tensor]): Output tensor for replicated tokens, if provided

Source code in fastvideo/attention/layer.py
@torch.compiler.disable
def forward(
    self,
    q: torch.Tensor,
    k: torch.Tensor,
    v: torch.Tensor,
    original_seq_len: int | None = None,
    replicated_q: torch.Tensor | None = None,
    replicated_k: torch.Tensor | None = None,
    replicated_v: torch.Tensor | None = None,
    freqs_cis: tuple[torch.Tensor, torch.Tensor] | None = None,
) -> tuple[torch.Tensor, torch.Tensor | None]:
    """Forward pass for distributed attention.

    Args:
        q (torch.Tensor): Query tensor [batch_size, seq_len, num_heads, head_dim]
        k (torch.Tensor): Key tensor [batch_size, seq_len, num_heads, head_dim]
        v (torch.Tensor): Value tensor [batch_size, seq_len, num_heads, head_dim]
        original_seq_len (int): Original (unpadded) full sequence length
        replicated_q (Optional[torch.Tensor]): Replicated query tensor, typically for text tokens
        replicated_k (Optional[torch.Tensor]): Replicated key tensor
        replicated_v (Optional[torch.Tensor]): Replicated value tensor

    Returns:
        Tuple[torch.Tensor, Optional[torch.Tensor]]: A tuple containing:
            - o (torch.Tensor): Output tensor after attention for the main sequence
            - replicated_o (Optional[torch.Tensor]): Output tensor for replicated tokens, if provided
    """
    # Check input shapes
    assert q.dim() == 4 and k.dim() == 4 and v.dim() == 4, "Expected 4D tensors"
    batch_size, _, num_heads, _ = q.shape
    local_rank = get_sp_parallel_rank()
    world_size = get_sp_world_size()

    forward_context: ForwardContext = get_forward_context()
    ctx_attn_metadata = forward_context.attn_metadata

    # Stack QKV
    qkv = torch.cat([q, k, v], dim=0)  # [3*batch, seq_len, num_heads, head_dim]

    # Redistribute heads across sequence dimension
    qkv = sequence_model_parallel_all_to_all_4D(qkv, scatter_dim=2, gather_dim=1)

    # After all-to-all, each rank has the full sequence but only a subset of heads.
    # Trim away SP padding for attention compute, then pad back before returning.
    original_seq_len = original_seq_len or qkv.shape[1]
    pad_seq_len = qkv.shape[1] - original_seq_len
    qkv = qkv[:, :original_seq_len, :, :]

    if freqs_cis is not None:
        cos, sin = freqs_cis
        qkv[:batch_size * 2] = _apply_rotary_emb(qkv[:batch_size * 2], cos, sin, is_neox_style=False)
    # Apply backend-specific preprocess_qkv
    qkv = self.attn_impl.preprocess_qkv(qkv, ctx_attn_metadata)

    # Concatenate with replicated QKV if provided
    if replicated_q is not None:
        assert replicated_k is not None and replicated_v is not None
        replicated_qkv = torch.cat([replicated_q, replicated_k, replicated_v],
                                   dim=0)  # [3, seq_len, num_heads, head_dim]
        heads_per_rank = num_heads // world_size
        replicated_qkv = replicated_qkv[:, :, local_rank * heads_per_rank:(local_rank + 1) * heads_per_rank]
        qkv = torch.cat([qkv, replicated_qkv], dim=1)

    q, k, v = qkv.chunk(3, dim=0)

    output = self.attn_impl.forward(q, k, v, ctx_attn_metadata)

    # Redistribute back if using sequence parallelism
    replicated_output = None
    if replicated_q is not None:
        split_idx = original_seq_len
        replicated_output = output[:, split_idx:]
        output = output[:, :split_idx]
        # TODO: make this asynchronous
        replicated_output = sequence_model_parallel_all_gather(replicated_output.contiguous(), dim=2)
    # Apply backend-specific postprocess_output
    output = self.attn_impl.postprocess_output(output, ctx_attn_metadata)

    output = torch.nn.functional.pad(output, (0, 0, 0, 0, 0, pad_seq_len))

    output = sequence_model_parallel_all_to_all_4D(output, scatter_dim=1, gather_dim=2)

    return output, replicated_output
fastvideo.attention.layer.DistributedAttention_VSA
DistributedAttention_VSA(num_heads: int, head_size: int, num_kv_heads: int | None = None, softmax_scale: float | None = None, causal: bool = False, supported_attention_backends: tuple[AttentionBackendEnum, ...] | None = None, prefix: str = '', **extra_impl_args)

Bases: DistributedAttention

Distributed attention layer with VSA support.

Source code in fastvideo/attention/layer.py
def __init__(self,
             num_heads: int,
             head_size: int,
             num_kv_heads: int | None = None,
             softmax_scale: float | None = None,
             causal: bool = False,
             supported_attention_backends: tuple[AttentionBackendEnum, ...]
             | None = None,
             prefix: str = "",
             **extra_impl_args) -> None:
    super().__init__()
    if softmax_scale is None:
        self.softmax_scale = head_size**-0.5
    else:
        self.softmax_scale = softmax_scale

    if num_kv_heads is None:
        num_kv_heads = num_heads

    dtype = get_compute_dtype()
    attn_backend = get_attn_backend(head_size, dtype, supported_attention_backends=supported_attention_backends)
    impl_cls = attn_backend.get_impl_cls()
    self.attn_impl = impl_cls(num_heads=num_heads,
                              head_size=head_size,
                              causal=causal,
                              softmax_scale=self.softmax_scale,
                              num_kv_heads=num_kv_heads,
                              prefix=f"{prefix}.impl",
                              **extra_impl_args)
    # Register attn_impl as submodule if it has learnable parameters (e.g., SLA's proj_l)
    # This ensures its parameters are included in state_dict() for saving/loading
    if isinstance(self.attn_impl, nn.Module):
        self.add_module('attn_impl', self.attn_impl)
    self.num_heads = num_heads
    self.head_size = head_size
    self.num_kv_heads = num_kv_heads
    self.backend = backend_name_to_enum(attn_backend.get_name())
    self.dtype = dtype
Functions
fastvideo.attention.layer.DistributedAttention_VSA.forward
forward(q: Tensor, k: Tensor, v: Tensor, original_seq_len: int, replicated_q: Tensor | None = None, replicated_k: Tensor | None = None, replicated_v: Tensor | None = None, gate_compress: Tensor | None = None, freqs_cis: tuple[Tensor, Tensor] | None = None) -> tuple[Tensor, Tensor | None]

Forward pass for distributed attention.

Parameters:

Name Type Description Default
q Tensor

Query tensor [batch_size, seq_len, num_heads, head_dim]

required
k Tensor

Key tensor [batch_size, seq_len, num_heads, head_dim]

required
v Tensor

Value tensor [batch_size, seq_len, num_heads, head_dim]

required
original_seq_len int

Original (unpadded) full sequence length

required
gate_compress Tensor

Gate compress tensor [batch_size, seq_len, num_heads, head_dim]

None
replicated_q Optional[Tensor]

Replicated query tensor, typically for text tokens

None
replicated_k Optional[Tensor]

Replicated key tensor

None
replicated_v Optional[Tensor]

Replicated value tensor

None

Returns:

Type Description
tuple[Tensor, Tensor | None]

Tuple[torch.Tensor, Optional[torch.Tensor]]: A tuple containing: - o (torch.Tensor): Output tensor after attention for the main sequence - replicated_o (Optional[torch.Tensor]): Output tensor for replicated tokens, if provided

Source code in fastvideo/attention/layer.py
@torch.compiler.disable
def forward(
    self,
    q: torch.Tensor,
    k: torch.Tensor,
    v: torch.Tensor,
    original_seq_len: int,
    replicated_q: torch.Tensor | None = None,
    replicated_k: torch.Tensor | None = None,
    replicated_v: torch.Tensor | None = None,
    gate_compress: torch.Tensor | None = None,
    freqs_cis: tuple[torch.Tensor, torch.Tensor] | None = None,
) -> tuple[torch.Tensor, torch.Tensor | None]:
    """Forward pass for distributed attention.

    Args:
        q (torch.Tensor): Query tensor [batch_size, seq_len, num_heads, head_dim]
        k (torch.Tensor): Key tensor [batch_size, seq_len, num_heads, head_dim]
        v (torch.Tensor): Value tensor [batch_size, seq_len, num_heads, head_dim]
        original_seq_len (int): Original (unpadded) full sequence length
        gate_compress (torch.Tensor): Gate compress tensor [batch_size, seq_len, num_heads, head_dim]
        replicated_q (Optional[torch.Tensor]): Replicated query tensor, typically for text tokens
        replicated_k (Optional[torch.Tensor]): Replicated key tensor
        replicated_v (Optional[torch.Tensor]): Replicated value tensor

    Returns:
        Tuple[torch.Tensor, Optional[torch.Tensor]]: A tuple containing:
            - o (torch.Tensor): Output tensor after attention for the main sequence
            - replicated_o (Optional[torch.Tensor]): Output tensor for replicated tokens, if provided
    """
    # Check text tokens are not supported for VSA now
    assert replicated_q is None and replicated_k is None and replicated_v is None, "Replicated QKV is not supported for VSA now"
    # Check input shapes
    assert q.dim() == 4 and k.dim() == 4 and v.dim() == 4, "Expected 4D tensors"

    forward_context: ForwardContext = get_forward_context()
    ctx_attn_metadata = forward_context.attn_metadata

    batch_size, seq_len, num_heads, head_dim = q.shape
    # Stack QKV
    qkvg = torch.cat([q, k, v, gate_compress], dim=0)  # [4*batch, seq_len, num_heads, head_dim]

    # Redistribute heads across sequence dimension
    # Before: [4*batch, shard_seq_len, num_heads, head_dim]
    # After:  [4*batch, full_seq_len, shard_num_heads, head_dim]
    qkvg = sequence_model_parallel_all_to_all_4D(qkvg, scatter_dim=2, gather_dim=1)

    # After all-to-all, each rank has the full sequence but only a subset of heads
    pad_seq_len = qkvg.shape[1] - original_seq_len
    qkvg = qkvg[:, :original_seq_len, :, :]

    if freqs_cis is not None:
        cos, sin = freqs_cis
        qkvg[:batch_size * 2] = _apply_rotary_emb(qkvg[:batch_size * 2], cos, sin, is_neox_style=False)

    qkvg = self.attn_impl.preprocess_qkv(qkvg, ctx_attn_metadata)

    q, k, v, gate_compress = qkvg.chunk(4, dim=0)
    output = self.attn_impl.forward(q, k, v, gate_compress, ctx_attn_metadata)  # type: ignore[call-arg]

    # Redistribute back if using sequence parallelism
    replicated_output = None

    # Apply backend-specific postprocess_output
    output = self.attn_impl.postprocess_output(output, ctx_attn_metadata)

    output = torch.nn.functional.pad(output, (0, 0, 0, 0, 0, pad_seq_len))

    output = sequence_model_parallel_all_to_all_4D(output, scatter_dim=1, gather_dim=2)
    return output, replicated_output
fastvideo.attention.layer.LocalAttention
LocalAttention(num_heads: int, head_size: int, num_kv_heads: int | None = None, softmax_scale: float | None = None, causal: bool = False, supported_attention_backends: tuple[AttentionBackendEnum, ...] | None = None, **extra_impl_args)

Bases: Module

Attention layer.

Source code in fastvideo/attention/layer.py
def __init__(self,
             num_heads: int,
             head_size: int,
             num_kv_heads: int | None = None,
             softmax_scale: float | None = None,
             causal: bool = False,
             supported_attention_backends: tuple[AttentionBackendEnum, ...]
             | None = None,
             **extra_impl_args) -> None:
    super().__init__()
    if softmax_scale is None:
        self.softmax_scale = head_size**-0.5
    else:
        self.softmax_scale = softmax_scale
    if num_kv_heads is None:
        num_kv_heads = num_heads

    dtype = get_compute_dtype()
    attn_backend = get_attn_backend(head_size, dtype, supported_attention_backends=supported_attention_backends)
    impl_cls = attn_backend.get_impl_cls()
    self.attn_impl = impl_cls(num_heads=num_heads,
                              head_size=head_size,
                              softmax_scale=self.softmax_scale,
                              num_kv_heads=num_kv_heads,
                              causal=causal,
                              **extra_impl_args)
    self.num_heads = num_heads
    self.head_size = head_size
    self.num_kv_heads = num_kv_heads
    self.backend = backend_name_to_enum(attn_backend.get_name())
    self.dtype = dtype
Functions
fastvideo.attention.layer.LocalAttention.forward
forward(q: Tensor, k: Tensor, v: Tensor, freqs_cis: tuple[Tensor, Tensor] | None = None) -> Tensor

Apply local attention between query, key and value tensors.

Parameters:

Name Type Description Default
q Tensor

Query tensor of shape [batch_size, seq_len, num_heads, head_dim]

required
k Tensor

Key tensor of shape [batch_size, seq_len, num_heads, head_dim]

required
v Tensor

Value tensor of shape [batch_size, seq_len, num_heads, head_dim]

required

Returns:

Type Description
Tensor

torch.Tensor: Output tensor after local attention

Source code in fastvideo/attention/layer.py
def forward(
    self,
    q: torch.Tensor,
    k: torch.Tensor,
    v: torch.Tensor,
    freqs_cis: tuple[torch.Tensor, torch.Tensor] | None = None,
) -> torch.Tensor:
    """
    Apply local attention between query, key and value tensors.

    Args:
        q (torch.Tensor): Query tensor of shape [batch_size, seq_len, num_heads, head_dim]
        k (torch.Tensor): Key tensor of shape [batch_size, seq_len, num_heads, head_dim] 
        v (torch.Tensor): Value tensor of shape [batch_size, seq_len, num_heads, head_dim]

    Returns:
        torch.Tensor: Output tensor after local attention
    """
    # Check input shapes
    assert q.dim() == 4 and k.dim() == 4 and v.dim() == 4, "Expected 4D tensors"

    forward_context: ForwardContext = get_forward_context()
    ctx_attn_metadata = forward_context.attn_metadata

    if freqs_cis is not None:
        cos, sin = freqs_cis
        q = _apply_rotary_emb(q, cos, sin, is_neox_style=False)
        k = _apply_rotary_emb(k, cos, sin, is_neox_style=False)

    output = self.attn_impl.forward(q, k, v, ctx_attn_metadata)
    return output

Functions

fastvideo.attention.selector

Classes

Functions

fastvideo.attention.selector.backend_name_to_enum
backend_name_to_enum(backend_name: str) -> AttentionBackendEnum | None

Convert a string backend name to a _Backend enum value.

Returns: * _Backend: enum value if backend_name is a valid in-tree type * None: otherwise it's an invalid in-tree type or an out-of-tree platform is loaded.

Source code in fastvideo/attention/selector.py
def backend_name_to_enum(backend_name: str) -> AttentionBackendEnum | None:
    """
    Convert a string backend name to a _Backend enum value.

    Returns:
    * _Backend: enum value if backend_name is a valid in-tree type
    * None: otherwise it's an invalid in-tree type or an out-of-tree platform is
            loaded.
    """
    assert backend_name is not None
    return AttentionBackendEnum[backend_name] if backend_name in AttentionBackendEnum.__members__ else \
          None
fastvideo.attention.selector.get_env_variable_attn_backend
get_env_variable_attn_backend() -> AttentionBackendEnum | None

Get the backend override specified by the FastVideo attention backend environment variable, if one is specified.

Returns:

  • _Backend enum value if an override is specified
  • None otherwise
Source code in fastvideo/attention/selector.py
def get_env_variable_attn_backend() -> AttentionBackendEnum | None:
    '''
    Get the backend override specified by the FastVideo attention
    backend environment variable, if one is specified.

    Returns:

    * _Backend enum value if an override is specified
    * None otherwise
    '''
    backend_name = os.environ.get(STR_BACKEND_ENV_VAR)
    return (None if backend_name is None else backend_name_to_enum(backend_name))
fastvideo.attention.selector.get_global_forced_attn_backend
get_global_forced_attn_backend() -> AttentionBackendEnum | None

Get the currently-forced choice of attention backend, or None if auto-selection is currently enabled.

Source code in fastvideo/attention/selector.py
def get_global_forced_attn_backend() -> AttentionBackendEnum | None:
    '''
    Get the currently-forced choice of attention backend,
    or None if auto-selection is currently enabled.
    '''
    return forced_attn_backend
fastvideo.attention.selector.global_force_attn_backend
global_force_attn_backend(attn_backend: AttentionBackendEnum | None) -> None

Force all attention operations to use a specified backend.

Passing None for the argument re-enables automatic backend selection.,

Arguments:

  • attn_backend: backend selection (None to revert to auto)
Source code in fastvideo/attention/selector.py
def global_force_attn_backend(attn_backend: AttentionBackendEnum | None) -> None:
    '''
    Force all attention operations to use a specified backend.

    Passing `None` for the argument re-enables automatic
    backend selection.,

    Arguments:

    * attn_backend: backend selection (None to revert to auto)
    '''
    global forced_attn_backend
    forced_attn_backend = attn_backend
fastvideo.attention.selector.global_force_attn_backend_context_manager
global_force_attn_backend_context_manager(attn_backend: AttentionBackendEnum) -> Generator[None, None, None]

Globally force a FastVideo attention backend override within a context manager, reverting the global attention backend override to its prior state upon exiting the context manager.

Arguments:

  • attn_backend: attention backend to force

Returns:

  • Generator
Source code in fastvideo/attention/selector.py
@contextmanager
def global_force_attn_backend_context_manager(attn_backend: AttentionBackendEnum) -> Generator[None, None, None]:
    '''
    Globally force a FastVideo attention backend override within a
    context manager, reverting the global attention backend
    override to its prior state upon exiting the context
    manager.

    Arguments:

    * attn_backend: attention backend to force

    Returns:

    * Generator
    '''

    # Save the current state of the global backend override (if any)
    original_value = get_global_forced_attn_backend()

    # Globally force the new backend override
    global_force_attn_backend(attn_backend)

    # Yield control back to the enclosed code block
    try:
        yield
    finally:
        # Revert the original global backend override, if any
        global_force_attn_backend(original_value)