Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion .jenkins/validate_tutorials_built.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,6 @@
"intermediate_source/torchrec_intro_tutorial.py", #failing with 2.8 reenable after 3498
"intermediate_source/torch_export_tutorial.py", # failing with 2.11 issue #3773
"beginner_source/mosaic_memory_profiling_tutorial.py", # failing with 2.11 issue #3774
"intermediate_source/variable_length_attention_tutorial.py", # failing with 2.11 issue #3775
]

def tutorial_source_dirs() -> List[Path]:
Expand Down
24 changes: 15 additions & 9 deletions intermediate_source/variable_length_attention_tutorial.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,27 +99,33 @@
# cu_seq_k: torch.Tensor,
# max_q: int,
# max_k: int,
# is_causal: bool = False,
# *,
# return_aux: AuxRequest | None = None,
# scale: float | None = None,
# window_size: tuple[int, int] = (-1, -1),
# ) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]:
#
# ``query``, ``key``, and ``value`` correspond to the ``q``, ``k``, and
# ``v`` of the packed input. ``cu_seq_q`` and ``cu_seq_k`` are the
# cumulative indices for query and key/value, respectively. These mark the
# logical boundaries that separate the documents in our input. ``max_q``
# and ``max_k`` are the maximum sequence lengths of query and key,
# respectively. ``is_causal`` applies causal masking if set to True and
# ``return_aux`` specifies which auxiliary outputs to return (ie ``lse``).
# respectively. ``return_aux`` specifies which auxiliary outputs to return
# (ie ``lse``). ``scale`` is an optional scaling factor applied to the
# attention scores before softmax. ``window_size`` is a ``(left, right)``
# tuple that controls sliding window attention: use ``(-1, -1)`` for full
# attention (default), ``(-1, 0)`` for causal attention, or ``(W, 0)``
# for causal attention with a sliding window of size ``W``.

######################################################################
# **Note on causal masking**
# When ``is_causal`` is set to True, causal masking is applied which means
# that tokens can only attend to previous tokens. For bidirectional
# attention, set this flag to False.
# When ``window_size`` is set to ``(-1, 0)``, causal masking is applied
# which means that tokens can only attend to previous tokens. For
# bidirectional (full) attention, use the default ``(-1, -1)``.
#
# In torchtitan (PyTorch's pretraining framework), we set
# ``is_causal = True`` uniformly to prevent the model from cheating and
# artificially driving the loss down too quickly.
# ``window_size = (-1, 0)`` uniformly to prevent the model from cheating
# and artificially driving the loss down too quickly.


######################################################################
Expand Down Expand Up @@ -241,7 +247,7 @@ def forward(
cu_seq_k=cu_seq,
max_q=max_len,
max_k=max_len,
is_causal=True,
window_size=(-1, 0),
)
attn_out = attn_out.view(-1, self.embed_dim)
attn_out = self.out_proj(attn_out)
Expand Down
Loading