agreeupon commited on
Commit
00836ee
·
verified ·
1 Parent(s): 1565152

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. flash-attention/csrc/flash_attn/flash_api.cpp +1550 -0
  2. flash-attention/csrc/flash_attn/src/flash_fwd_split_hdim128_bf16_causal_sm80.cu +7 -0
  3. flash-attention/csrc/flash_attn/src/flash_fwd_split_hdim128_bf16_sm80.cu +7 -0
  4. flash-attention/csrc/flash_attn/src/flash_fwd_split_hdim128_fp16_causal_sm80.cu +7 -0
  5. flash-attention/csrc/flash_attn/src/flash_fwd_split_hdim128_fp16_sm80.cu +7 -0
  6. flash-attention/csrc/flash_attn/src/flash_fwd_split_hdim160_bf16_causal_sm80.cu +7 -0
  7. flash-attention/csrc/flash_attn/src/flash_fwd_split_hdim160_bf16_sm80.cu +7 -0
  8. flash-attention/csrc/flash_attn/src/flash_fwd_split_hdim160_fp16_causal_sm80.cu +7 -0
  9. flash-attention/csrc/flash_attn/src/flash_fwd_split_hdim160_fp16_sm80.cu +7 -0
  10. flash-attention/csrc/flash_attn/src/flash_fwd_split_hdim192_bf16_causal_sm80.cu +7 -0
  11. flash-attention/csrc/flash_attn/src/flash_fwd_split_hdim192_bf16_sm80.cu +7 -0
  12. flash-attention/csrc/flash_attn/src/flash_fwd_split_hdim192_fp16_causal_sm80.cu +7 -0
  13. flash-attention/csrc/flash_attn/src/flash_fwd_split_hdim192_fp16_sm80.cu +7 -0
  14. flash-attention/csrc/flash_attn/src/flash_fwd_split_hdim224_bf16_causal_sm80.cu +7 -0
  15. flash-attention/csrc/flash_attn/src/flash_fwd_split_hdim224_bf16_sm80.cu +7 -0
  16. flash-attention/csrc/flash_attn/src/flash_fwd_split_hdim224_fp16_causal_sm80.cu +7 -0
  17. flash-attention/csrc/flash_attn/src/flash_fwd_split_hdim224_fp16_sm80.cu +7 -0
  18. flash-attention/csrc/flash_attn/src/flash_fwd_split_hdim256_bf16_causal_sm80.cu +7 -0
  19. flash-attention/csrc/flash_attn/src/flash_fwd_split_hdim256_bf16_sm80.cu +7 -0
  20. flash-attention/csrc/flash_attn/src/flash_fwd_split_hdim256_fp16_causal_sm80.cu +7 -0
  21. flash-attention/csrc/flash_attn/src/flash_fwd_split_hdim256_fp16_sm80.cu +7 -0
  22. flash-attention/csrc/flash_attn/src/flash_fwd_split_hdim32_bf16_causal_sm80.cu +7 -0
  23. flash-attention/csrc/flash_attn/src/flash_fwd_split_hdim32_bf16_sm80.cu +7 -0
  24. flash-attention/csrc/flash_attn/src/flash_fwd_split_hdim32_fp16_causal_sm80.cu +7 -0
  25. flash-attention/csrc/flash_attn/src/flash_fwd_split_hdim32_fp16_sm80.cu +7 -0
  26. flash-attention/csrc/flash_attn/src/flash_fwd_split_hdim64_bf16_causal_sm80.cu +7 -0
  27. flash-attention/csrc/flash_attn/src/flash_fwd_split_hdim64_bf16_sm80.cu +7 -0
  28. flash-attention/csrc/flash_attn/src/flash_fwd_split_hdim64_fp16_causal_sm80.cu +7 -0
  29. flash-attention/csrc/flash_attn/src/flash_fwd_split_hdim64_fp16_sm80.cu +7 -0
  30. flash-attention/csrc/flash_attn/src/flash_fwd_split_hdim96_bf16_causal_sm80.cu +7 -0
  31. flash-attention/csrc/flash_attn/src/flash_fwd_split_hdim96_bf16_sm80.cu +7 -0
  32. flash-attention/csrc/flash_attn/src/flash_fwd_split_hdim96_fp16_causal_sm80.cu +7 -0
  33. flash-attention/csrc/flash_attn/src/flash_fwd_split_hdim96_fp16_sm80.cu +7 -0
  34. flash-attention/csrc/flash_attn/src/generate_kernels.py +111 -0
  35. flash-attention/csrc/flash_attn/src/kernel_traits.h +344 -0
  36. flash-attention/csrc/flash_attn/src/mask.h +213 -0
  37. flash-attention/csrc/flash_attn/src/philox.cuh +51 -0
  38. flash-attention/csrc/flash_attn/src/rotary.h +152 -0
  39. flash-attention/csrc/flash_attn/src/softmax.h +188 -0
  40. flash-attention/csrc/flash_attn/src/static_switch.h +117 -0
  41. flash-attention/csrc/flash_attn/src/utils.h +393 -0
  42. flash-attention/csrc/ft_attention/README.md +14 -0
  43. flash-attention/csrc/ft_attention/cuda_bf16_fallbacks.cuh +257 -0
  44. flash-attention/csrc/ft_attention/cuda_bf16_wrapper.h +23 -0
  45. flash-attention/csrc/ft_attention/decoder_masked_multihead_attention.cu +149 -0
  46. flash-attention/csrc/ft_attention/decoder_masked_multihead_attention.h +192 -0
  47. flash-attention/csrc/ft_attention/decoder_masked_multihead_attention_template.hpp +1619 -0
  48. flash-attention/csrc/ft_attention/decoder_masked_multihead_attention_utils.h +2017 -0
  49. flash-attention/csrc/ft_attention/ft_attention.cpp +232 -0
  50. flash-attention/csrc/ft_attention/setup.py +153 -0
flash-attention/csrc/flash_attn/flash_api.cpp ADDED
@@ -0,0 +1,1550 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * Copyright (c) 2024, Tri Dao.
3
+ ******************************************************************************/
4
+
5
+ // Include these 2 headers instead of torch/extension.h since we don't need all of the torch headers.
6
+ #include <torch/python.h>
7
+ #include <torch/nn/functional.h>
8
+ #include <ATen/cuda/CUDAContext.h>
9
+ #include <c10/cuda/CUDAGuard.h>
10
+
11
+ #include <cutlass/numeric_types.h>
12
+
13
+ #include "flash.h"
14
+ #include "static_switch.h"
15
+
16
+ #define CHECK_DEVICE(x) TORCH_CHECK(x.is_cuda(), #x " must be on CUDA")
17
+ #define CHECK_SHAPE(x, ...) TORCH_CHECK(x.sizes() == torch::IntArrayRef({__VA_ARGS__}), #x " must have shape (" #__VA_ARGS__ ")")
18
+ #define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous")
19
+
20
+
21
+ void set_params_fprop(Flash_fwd_params &params,
22
+ // sizes
23
+ const size_t b,
24
+ const size_t seqlen_q,
25
+ const size_t seqlen_k,
26
+ const size_t seqlen_q_rounded,
27
+ const size_t seqlen_k_rounded,
28
+ const size_t h,
29
+ const size_t h_k,
30
+ const size_t d,
31
+ const size_t d_rounded,
32
+ // device pointers
33
+ const at::Tensor q,
34
+ const at::Tensor k,
35
+ const at::Tensor v,
36
+ at::Tensor out,
37
+ void *cu_seqlens_q_d,
38
+ void *cu_seqlens_k_d,
39
+ void *seqused_k,
40
+ void *p_d,
41
+ void *softmax_lse_d,
42
+ float p_dropout,
43
+ float softmax_scale,
44
+ int window_size_left,
45
+ int window_size_right,
46
+ const float softcap,
47
+ bool seqlenq_ngroups_swapped=false,
48
+ const bool unpadded_lse=false) {
49
+
50
+ // Reset the parameters
51
+ params = {};
52
+
53
+ params.is_bf16 = q.dtype() == torch::kBFloat16;
54
+
55
+ // Set the pointers and strides.
56
+ params.q_ptr = q.data_ptr();
57
+ params.k_ptr = k.data_ptr();
58
+ params.v_ptr = v.data_ptr();
59
+ // All stride are in elements, not bytes.
60
+ params.q_row_stride = q.stride(-3);
61
+ params.k_row_stride = k.stride(-3);
62
+ params.v_row_stride = v.stride(-3);
63
+ params.q_head_stride = q.stride(-2);
64
+ params.k_head_stride = k.stride(-2);
65
+ params.v_head_stride = v.stride(-2);
66
+ params.o_ptr = out.data_ptr();
67
+ params.o_row_stride = out.stride(-3);
68
+ params.o_head_stride = out.stride(-2);
69
+
70
+ if (cu_seqlens_q_d == nullptr) {
71
+ params.q_batch_stride = q.stride(0);
72
+ params.k_batch_stride = k.stride(0);
73
+ params.v_batch_stride = v.stride(0);
74
+ params.o_batch_stride = out.stride(0);
75
+ if (seqlenq_ngroups_swapped) {
76
+ params.q_batch_stride *= seqlen_q;
77
+ params.o_batch_stride *= seqlen_q;
78
+ }
79
+ }
80
+
81
+ params.cu_seqlens_q = static_cast<int *>(cu_seqlens_q_d);
82
+ params.cu_seqlens_k = static_cast<int *>(cu_seqlens_k_d);
83
+ params.seqused_k = static_cast<int *>(seqused_k);
84
+
85
+ // P = softmax(QK^T)
86
+ params.p_ptr = p_d;
87
+
88
+ // Softmax sum
89
+ params.softmax_lse_ptr = softmax_lse_d;
90
+
91
+ // Set the dimensions.
92
+ params.b = b;
93
+ params.h = h;
94
+ params.h_k = h_k;
95
+ params.h_h_k_ratio = h / h_k;
96
+ params.seqlen_q = seqlen_q;
97
+ params.seqlen_k = seqlen_k;
98
+ params.seqlen_q_rounded = seqlen_q_rounded;
99
+ params.seqlen_k_rounded = seqlen_k_rounded;
100
+ params.d = d;
101
+ params.d_rounded = d_rounded;
102
+
103
+ // Set the different scale values.
104
+ #ifdef FLASHATTENTION_DISABLE_SOFTCAP
105
+ TORCH_CHECK(softcap <= 0.0, "This flash attention build does not support softcap.");
106
+ #endif
107
+ if (softcap > 0.0) {
108
+ params.softcap = softmax_scale / softcap;
109
+ params.scale_softmax = softcap;
110
+ params.scale_softmax_log2 = softcap * M_LOG2E;
111
+ } else{
112
+ // Remove potential NaN
113
+ params.softcap = 0.0;
114
+ params.scale_softmax = softmax_scale;
115
+ params.scale_softmax_log2 = softmax_scale * M_LOG2E;
116
+ }
117
+
118
+ // Set this to probability of keeping an element to simplify things.
119
+ params.p_dropout = 1.f - p_dropout;
120
+ // Convert p from float to int so we don't have to convert the random uint to float to compare.
121
+ // [Minor] We want to round down since when we do the comparison we use <= instead of <
122
+ // params.p_dropout_in_uint = uint32_t(std::floor(params.p_dropout * 4294967295.0));
123
+ // params.p_dropout_in_uint16_t = uint16_t(std::floor(params.p_dropout * 65535.0));
124
+ params.p_dropout_in_uint8_t = uint8_t(std::floor(params.p_dropout * 255.0));
125
+ params.rp_dropout = 1.f / params.p_dropout;
126
+ params.scale_softmax_rp_dropout = params.rp_dropout * params.scale_softmax;
127
+ TORCH_CHECK(p_dropout < 1.f);
128
+ #ifdef FLASHATTENTION_DISABLE_DROPOUT
129
+ TORCH_CHECK(p_dropout == 0.0f, "This flash attention build does not support dropout.");
130
+ #endif
131
+
132
+ // Causal is the special case where window_size_right == 0 and window_size_left < 0.
133
+ // Local is the more general case where window_size_right >= 0 or window_size_left >= 0.
134
+ params.is_causal = window_size_left < 0 && window_size_right == 0;
135
+
136
+ if (window_size_left < 0 && window_size_right >= 0) { window_size_left = seqlen_k; }
137
+ if (window_size_left >= 0 && window_size_right < 0) { window_size_right = seqlen_k; }
138
+ params.window_size_left = window_size_left;
139
+ params.window_size_right = window_size_right;
140
+
141
+ #ifdef FLASHATTENTION_DISABLE_LOCAL
142
+ TORCH_CHECK(params.is_causal || (window_size_left < 0 && window_size_right < 0),
143
+ "This flash attention build does not support local attention.");
144
+ #endif
145
+
146
+ params.is_seqlens_k_cumulative = true;
147
+
148
+ #ifdef FLASHATTENTION_DISABLE_UNEVEN_K
149
+ TORCH_CHECK(d == d_rounded, "This flash attention build does not support headdim not being a multiple of 32.");
150
+ #endif
151
+
152
+ params.unpadded_lse = unpadded_lse;
153
+ params.seqlenq_ngroups_swapped = seqlenq_ngroups_swapped;
154
+ }
155
+
156
+ void set_params_dgrad(Flash_bwd_params &params,
157
+ // sizes
158
+ const size_t b,
159
+ const size_t seqlen_q,
160
+ const size_t seqlen_k,
161
+ const size_t seqlen_q_rounded,
162
+ const size_t seqlen_k_rounded,
163
+ const size_t h,
164
+ const size_t h_k,
165
+ const size_t d,
166
+ const size_t d_rounded,
167
+ // device pointers
168
+ const at::Tensor q,
169
+ const at::Tensor k,
170
+ const at::Tensor v,
171
+ const at::Tensor out,
172
+ const at::Tensor dout,
173
+ at::Tensor dq,
174
+ at::Tensor dk,
175
+ at::Tensor dv,
176
+ void *cu_seqlens_q_d,
177
+ void *cu_seqlens_k_d,
178
+ void *dq_accum_d,
179
+ void *dk_accum_d,
180
+ void *dv_accum_d,
181
+ void *softmax_lse_d,
182
+ void *dsoftmax_sum_d,
183
+ float p_dropout,
184
+ float softmax_scale,
185
+ int window_size_left,
186
+ int window_size_right,
187
+ const float softcap,
188
+ bool deterministic,
189
+ const bool unpadded_lse) {
190
+
191
+ set_params_fprop(params,
192
+ b, seqlen_q, seqlen_k, seqlen_q_rounded, seqlen_k_rounded, h, h_k, d, d_rounded,
193
+ q, k, v, out,
194
+ cu_seqlens_q_d,
195
+ cu_seqlens_k_d,
196
+ nullptr,
197
+ nullptr,
198
+ softmax_lse_d,
199
+ p_dropout,
200
+ softmax_scale,
201
+ window_size_left,
202
+ window_size_right,
203
+ softcap,
204
+ false, // seqlenq_ngroups_swapped
205
+ unpadded_lse);
206
+
207
+ // Set the pointers and strides.
208
+ params.do_ptr = dout.data_ptr();
209
+ params.do_row_stride = dout.stride(-3);
210
+ params.do_head_stride = dout.stride(-2);
211
+ params.dq_ptr = dq.data_ptr();
212
+ params.dk_ptr = dk.data_ptr();
213
+ params.dv_ptr = dv.data_ptr();
214
+ params.dq_row_stride = dq.stride(-3);
215
+ params.dk_row_stride = dk.stride(-3);
216
+ params.dv_row_stride = dv.stride(-3);
217
+ params.dq_head_stride = dq.stride(-2);
218
+ params.dk_head_stride = dk.stride(-2);
219
+ params.dv_head_stride = dv.stride(-2);
220
+
221
+ if (cu_seqlens_q_d == nullptr) {
222
+ params.do_batch_stride = dout.stride(0);
223
+ params.dq_batch_stride = dq.stride(0);
224
+ params.dk_batch_stride = dk.stride(0);
225
+ params.dv_batch_stride = dv.stride(0);
226
+ }
227
+
228
+ params.dq_accum_ptr = dq_accum_d;
229
+ params.dk_accum_ptr = dk_accum_d;
230
+ params.dv_accum_ptr = dv_accum_d;
231
+
232
+ // Softmax sum
233
+ params.dsoftmax_sum = dsoftmax_sum_d;
234
+
235
+ params.deterministic = deterministic;
236
+ }
237
+
238
+ void run_mha_fwd(Flash_fwd_params &params, cudaStream_t stream, bool force_split_kernel=false) {
239
+ FP16_SWITCH(!params.is_bf16, [&] {
240
+ HEADDIM_SWITCH(params.d, [&] {
241
+ BOOL_SWITCH(params.is_causal, Is_causal, [&] {
242
+ if (params.num_splits <= 1 && !force_split_kernel) { // If we don't set it num_splits == 0
243
+ run_mha_fwd_<elem_type, kHeadDim, Is_causal>(params, stream);
244
+ } else {
245
+ run_mha_fwd_splitkv_dispatch<elem_type, kHeadDim, Is_causal>(params, stream);
246
+ }
247
+ });
248
+ });
249
+ });
250
+ }
251
+
252
+ // Find the number of splits that maximizes the occupancy. For example, if we have
253
+ // batch * n_heads = 48 and we have 108 SMs, having 2 splits (efficiency = 0.89) is
254
+ // better than having 3 splits (efficiency = 0.67). However, we also don't want too many
255
+ // splits as that would incur more HBM reads/writes.
256
+ // So we find the best efficiency, then find the smallest number of splits that gets 85%
257
+ // of the best efficiency.
258
+ inline int num_splits_heuristic(int batch_nheads_mblocks, int num_SMs, int num_n_blocks, int max_splits) {
259
+ // If we have enough to almost fill the SMs, then just use 1 split
260
+ if (batch_nheads_mblocks >= 0.8f * num_SMs) { return 1; }
261
+ max_splits = std::min({max_splits, num_SMs, num_n_blocks});
262
+ float max_efficiency = 0.f;
263
+ std::vector<float> efficiency;
264
+ efficiency.reserve(max_splits);
265
+ auto ceildiv = [](int a, int b) { return (a + b - 1) / b; };
266
+ // Some splits are not eligible. For example, if we have 64 blocks and choose 11 splits,
267
+ // we'll have 6 * 10 + 4 blocks. If we choose 12 splits, we'll have 6 * 11 + (-2) blocks
268
+ // (i.e. it's 11 splits anyway).
269
+ // So we check if the number of blocks per split is the same as the previous num_splits.
270
+ auto is_split_eligible = [&ceildiv, &num_n_blocks](int num_splits) {
271
+ return num_splits == 1 || ceildiv(num_n_blocks, num_splits) != ceildiv(num_n_blocks, num_splits - 1);
272
+ };
273
+ for (int num_splits = 1; num_splits <= max_splits; num_splits++) {
274
+ if (!is_split_eligible(num_splits)) {
275
+ efficiency.push_back(0.f);
276
+ } else {
277
+ float n_waves = float(batch_nheads_mblocks * num_splits) / num_SMs;
278
+ float eff = n_waves / ceil(n_waves);
279
+ // printf("num_splits = %d, eff = %f\n", num_splits, eff);
280
+ if (eff > max_efficiency) { max_efficiency = eff; }
281
+ efficiency.push_back(eff);
282
+ }
283
+ }
284
+ for (int num_splits = 1; num_splits <= max_splits; num_splits++) {
285
+ if (!is_split_eligible(num_splits)) { continue; }
286
+ if (efficiency[num_splits - 1] >= 0.85 * max_efficiency) {
287
+ // printf("num_splits chosen = %d\n", num_splits);
288
+ return num_splits;
289
+ }
290
+ }
291
+ return 1;
292
+ }
293
+
294
+ void set_params_splitkv(Flash_fwd_params &params, const int batch_size,
295
+ const int num_heads, const int head_size, const int max_seqlen_k, const int max_seqlen_q,
296
+ const int head_size_rounded, const float p_dropout,
297
+ const int num_splits, cudaDeviceProp *dprops, struct c10::TensorOptions opts) {
298
+
299
+ // This needs to match with run_mha_fwd_splitkv_dispatch
300
+ const int block_n = head_size <= 64 ? 256 : (head_size <= 128 ? 128 : 64);
301
+ const int num_n_blocks = (max_seqlen_k + block_n - 1) / block_n;
302
+ // Technically kBlockM = 64 only for the splitKV kernels, not the standard kernel.
303
+ // In any case we don't expect seqlen_q to be larger than 64 for inference.
304
+ const int num_m_blocks = (max_seqlen_q + 64 - 1) / 64;
305
+ params.num_splits = num_splits;
306
+ if (p_dropout == 0.0f) { // SplitKV is not implemented for dropout
307
+ if (num_splits < 1) {
308
+ // We multiply number of SMs by 2 to hard-code the fact that we're using 128 threads per block.
309
+ params.num_splits = num_splits_heuristic(batch_size * num_heads * num_m_blocks, dprops->multiProcessorCount * 2, num_n_blocks, 128);
310
+ }
311
+ if (params.num_splits > 1) {
312
+ at::Tensor softmax_lse_accum = torch::empty({params.num_splits, batch_size, num_heads, max_seqlen_q}, opts.dtype(at::kFloat));
313
+ at::Tensor out_accum = torch::empty({params.num_splits, batch_size, num_heads, max_seqlen_q, head_size_rounded}, opts.dtype(at::kFloat));
314
+ params.softmax_lseaccum_ptr = softmax_lse_accum.data_ptr();
315
+ params.oaccum_ptr = out_accum.data_ptr();
316
+ }
317
+ TORCH_CHECK(params.num_splits <= 128, "num_splits > 128 not supported");
318
+ }
319
+ }
320
+
321
+ void set_params_alibi(Flash_fwd_params &params, c10::optional<at::Tensor> &alibi_slopes_, int batch_size, int num_heads){
322
+ #ifdef FLASHATTENTION_DISABLE_ALIBI
323
+ TORCH_CHECK(!alibi_slopes_.has_value(), "This flash attention build does not support alibi.");
324
+ params.alibi_slopes_ptr = nullptr;
325
+ #else
326
+ if (alibi_slopes_.has_value()) {
327
+ auto alibi_slopes = alibi_slopes_.value();
328
+ TORCH_CHECK(alibi_slopes.dtype() == torch::kFloat32, "ALiBi slopes must have dtype fp32");
329
+ CHECK_DEVICE(alibi_slopes);
330
+ TORCH_CHECK(alibi_slopes.stride(-1) == 1, "ALiBi slopes tensor must have contiguous last dimension");
331
+ TORCH_CHECK(alibi_slopes.sizes() == torch::IntArrayRef({num_heads}) || alibi_slopes.sizes() == torch::IntArrayRef({batch_size, num_heads}));
332
+ params.alibi_slopes_ptr = alibi_slopes.data_ptr();
333
+ params.alibi_slopes_batch_stride = alibi_slopes.dim() == 2 ? alibi_slopes.stride(0) : 0;
334
+ } else {
335
+ params.alibi_slopes_ptr = nullptr;
336
+ }
337
+ #endif
338
+ }
339
+
340
+ std::vector<at::Tensor>
341
+ mha_fwd(at::Tensor &q, // batch_size x seqlen_q x num_heads x head_size
342
+ const at::Tensor &k, // batch_size x seqlen_k x num_heads_k x head_size
343
+ const at::Tensor &v, // batch_size x seqlen_k x num_heads_k x head_size
344
+ c10::optional<at::Tensor> &out_, // batch_size x seqlen_q x num_heads x head_size
345
+ c10::optional<at::Tensor> &alibi_slopes_, // num_heads or batch_size x num_heads
346
+ const float p_dropout,
347
+ const float softmax_scale,
348
+ bool is_causal,
349
+ int window_size_left,
350
+ int window_size_right,
351
+ const float softcap,
352
+ const bool return_softmax,
353
+ c10::optional<at::Generator> gen_) {
354
+
355
+ auto dprops = at::cuda::getCurrentDeviceProperties();
356
+ // bool is_sm75 = dprops->major == 7 && dprops->minor == 5;
357
+ bool is_sm8x = dprops->major == 8 && dprops->minor >= 0;
358
+ bool is_sm90 = dprops->major == 9 && dprops->minor == 0;
359
+ TORCH_CHECK(is_sm90 || is_sm8x, "FlashAttention only supports Ampere GPUs or newer.");
360
+ // We will support Turing in the near future
361
+ // TORCH_CHECK(is_sm90 || is_sm8x || is_sm75, "FlashAttention only supports Turing GPUs or newer.");
362
+
363
+ auto q_dtype = q.dtype();
364
+ TORCH_CHECK(q_dtype == torch::kFloat16 || q_dtype == torch::kBFloat16,
365
+ "FlashAttention only support fp16 and bf16 data type");
366
+ if (q_dtype == torch::kBFloat16) {
367
+ TORCH_CHECK(is_sm90 || is_sm8x, "bfloat16 is only supported on Ampere GPUs or newer");
368
+ }
369
+ TORCH_CHECK(k.dtype() == q_dtype, "query and key must have the same dtype");
370
+ TORCH_CHECK(v.dtype() == q_dtype, "query and value must have the same dtype");
371
+
372
+ CHECK_DEVICE(q); CHECK_DEVICE(k); CHECK_DEVICE(v);
373
+
374
+ TORCH_CHECK(q.stride(-1) == 1, "Input tensor must have contiguous last dimension");
375
+ TORCH_CHECK(k.stride(-1) == 1, "Input tensor must have contiguous last dimension");
376
+ TORCH_CHECK(v.stride(-1) == 1, "Input tensor must have contiguous last dimension");
377
+
378
+ const auto sizes = q.sizes();
379
+
380
+ const int batch_size = sizes[0];
381
+ int seqlen_q = sizes[1];
382
+ int num_heads = sizes[2];
383
+ const int head_size_og = sizes[3];
384
+ const int seqlen_k = k.size(1);
385
+ const int num_heads_k = k.size(2);
386
+ TORCH_CHECK(batch_size > 0, "batch size must be postive");
387
+ TORCH_CHECK(head_size_og <= 256, "FlashAttention forward only supports head dimension at most 256");
388
+ TORCH_CHECK(num_heads % num_heads_k == 0, "Number of heads in key/value must divide number of heads in query");
389
+
390
+ if (softcap > 0.f) { TORCH_CHECK(p_dropout == 0.f, "Softcapping does not support dropout for now"); }
391
+
392
+ if (window_size_left >= seqlen_k) { window_size_left = -1; }
393
+ if (window_size_right >= seqlen_k) { window_size_right = -1; }
394
+
395
+ // causal=true is the same as causal=false in this case
396
+ if (seqlen_q == 1 && !alibi_slopes_.has_value()) { is_causal = false; }
397
+ if (is_causal) { window_size_right = 0; }
398
+
399
+ // Faster to transpose q from (b, 1, (nheads_kv ngroups), d) to (b, ngroups, nheads_kv, d) in this case
400
+ // H/t Daniel Haziza
401
+ const int seqlenq_ngroups_swapped = seqlen_q == 1 && num_heads > num_heads_k && window_size_left < 0 && window_size_right < 0 && p_dropout == 0.f && head_size_og % 8 == 0 && !alibi_slopes_.has_value();
402
+ const int ngroups = num_heads / num_heads_k;
403
+ if (seqlenq_ngroups_swapped) {
404
+ q = q.reshape({batch_size, num_heads_k, ngroups, head_size_og}).transpose(1, 2);
405
+ seqlen_q = ngroups;
406
+ num_heads = num_heads_k;
407
+ }
408
+
409
+ CHECK_SHAPE(q, batch_size, seqlen_q, num_heads, head_size_og);
410
+ CHECK_SHAPE(k, batch_size, seqlen_k, num_heads_k, head_size_og);
411
+ CHECK_SHAPE(v, batch_size, seqlen_k, num_heads_k, head_size_og);
412
+
413
+ at::Tensor q_padded, k_padded, v_padded;
414
+ if (head_size_og % 8 != 0) {
415
+ q_padded = torch::nn::functional::pad(q, torch::nn::functional::PadFuncOptions({0, 8 - head_size_og % 8}));
416
+ k_padded = torch::nn::functional::pad(k, torch::nn::functional::PadFuncOptions({0, 8 - head_size_og % 8}));
417
+ v_padded = torch::nn::functional::pad(v, torch::nn::functional::PadFuncOptions({0, 8 - head_size_og % 8}));
418
+ } else {
419
+ q_padded = q;
420
+ k_padded = k;
421
+ v_padded = v;
422
+ }
423
+
424
+ at::Tensor out;
425
+ if (out_.has_value()) {
426
+ out = out_.value();
427
+ TORCH_CHECK(out.dtype() == q_dtype, "Output must have the same dtype as inputs");
428
+ CHECK_DEVICE(out);
429
+ TORCH_CHECK(out.stride(-1) == 1, "Output tensor must have contiguous last dimension");
430
+ CHECK_SHAPE(out, batch_size, sizes[1], sizes[2], head_size_og);
431
+ if (seqlenq_ngroups_swapped) {
432
+ out = out.reshape({batch_size, num_heads_k, ngroups, head_size_og}).transpose(1, 2);
433
+ }
434
+ if (head_size_og % 8 != 0) { out = torch::empty_like(q_padded); }
435
+ } else {
436
+ out = torch::empty_like(q_padded);
437
+ }
438
+
439
+ auto round_multiple = [](int x, int m) { return (x + m - 1) / m * m; };
440
+ const int head_size = round_multiple(head_size_og, 8);
441
+ const int head_size_rounded = round_multiple(head_size, 32);
442
+ const int seqlen_q_rounded = round_multiple(seqlen_q, 128);
443
+ const int seqlen_k_rounded = round_multiple(seqlen_k, 128);
444
+
445
+ // Otherwise the kernel will be launched from cuda:0 device
446
+ // Cast to char to avoid compiler warning about narrowing
447
+ at::cuda::CUDAGuard device_guard{(char)q.get_device()};
448
+
449
+ auto opts = q.options();
450
+
451
+ auto softmax_lse = torch::empty({batch_size, num_heads, seqlen_q}, opts.dtype(at::kFloat));
452
+ at::Tensor p;
453
+ // Only return softmax if there's dropout to reduce compilation time
454
+ if (return_softmax) {
455
+ TORCH_CHECK(p_dropout > 0.0f, "return_softmax is only supported when p_dropout > 0.0");
456
+ p = torch::empty({ batch_size, num_heads, seqlen_q_rounded, seqlen_k_rounded }, opts);
457
+ }
458
+
459
+ Flash_fwd_params params;
460
+ set_params_fprop(params,
461
+ batch_size,
462
+ seqlen_q, seqlen_k,
463
+ seqlen_q_rounded, seqlen_k_rounded,
464
+ num_heads, num_heads_k,
465
+ head_size, head_size_rounded,
466
+ q_padded, k_padded, v_padded, out,
467
+ /*cu_seqlens_q_d=*/nullptr,
468
+ /*cu_seqlens_k_d=*/nullptr,
469
+ /*seqused_k=*/nullptr,
470
+ return_softmax ? p.data_ptr() : nullptr,
471
+ softmax_lse.data_ptr(),
472
+ p_dropout,
473
+ softmax_scale,
474
+ window_size_left,
475
+ window_size_right,
476
+ softcap
477
+ );
478
+
479
+
480
+ set_params_splitkv(params, batch_size, num_heads,
481
+ head_size, seqlen_k, seqlen_q,
482
+ head_size_rounded, p_dropout, /*num_splits*/0, dprops, opts);
483
+
484
+ // number of times random will be generated per thread, to offset philox counter in thc random
485
+ // state
486
+ // We use a custom RNG that increases the offset by batch_size * nheads * 32.
487
+ int64_t counter_offset = params.b * params.h * 32;
488
+ auto options = torch::TensorOptions().dtype(torch::kFloat32).device(torch::kCUDA);
489
+ auto rng_state = torch::empty({2}, options.dtype(torch::kInt64));
490
+ // Forward kernel will populate memory with the seed and offset.
491
+ params.rng_state = reinterpret_cast<uint64_t*>(rng_state.data_ptr());
492
+
493
+ if (p_dropout > 0.0) {
494
+ auto gen = at::get_generator_or_default<at::CUDAGeneratorImpl>(
495
+ gen_, at::cuda::detail::getDefaultCUDAGenerator());
496
+ // See Note [Acquire lock when using random generators]
497
+ std::lock_guard<std::mutex> lock(gen->mutex_);
498
+ params.philox_args = gen->philox_cuda_state(counter_offset);
499
+ }
500
+
501
+ set_params_alibi(params, alibi_slopes_, batch_size, num_heads);
502
+
503
+ if (seqlen_k > 0) {
504
+ auto stream = at::cuda::getCurrentCUDAStream().stream();
505
+ run_mha_fwd(params, stream);
506
+ } else {
507
+ // If seqlen_k == 0, then we have an empty tensor. We need to set the output to 0.
508
+ out.zero_();
509
+ softmax_lse.fill_(std::numeric_limits<float>::infinity());
510
+ }
511
+
512
+ at::Tensor out_padded = out;
513
+ if (head_size_og % 8 != 0) {
514
+ out = out.index({"...", torch::indexing::Slice(torch::indexing::None, head_size_og)});
515
+ if (out_.has_value()) { out_.value().copy_(out); }
516
+ }
517
+
518
+ if (seqlenq_ngroups_swapped) {
519
+ out = out.transpose(1, 2).reshape({batch_size, 1, num_heads_k * seqlen_q, head_size_og});
520
+ out_padded = out_padded.transpose(1, 2).reshape({batch_size, 1, num_heads_k * seqlen_q, head_size_og});
521
+ q_padded = q_padded.transpose(1, 2).reshape({batch_size, 1, num_heads_k * seqlen_q, head_size_og});
522
+ softmax_lse = softmax_lse.reshape({batch_size, num_heads_k * seqlen_q, 1});
523
+ }
524
+ return {out, q_padded, k_padded, v_padded, out_padded, softmax_lse, p, rng_state};
525
+ }
526
+
527
+ std::vector<at::Tensor>
528
+ mha_varlen_fwd(at::Tensor &q, // total_q x num_heads x head_size, total_q := \sum_{i=0}^{b} s_i
529
+ const at::Tensor &k, // total_k x num_heads_k x head_size, total_k := \sum_{i=0}^{b} s_i or num_blocks x page_block_size x num_heads_k x head_size if there's a block_table.
530
+ const at::Tensor &v, // total_k x num_heads_k x head_size, total_k := \sum_{i=0}^{b} s_i or num_blocks x page_block_size x num_heads_k x head_size if there's a block_table.
531
+ c10::optional<at::Tensor> &out_, // total_q x num_heads x head_size, total_k := \sum_{i=0}^{b} s_i
532
+ const at::Tensor &cu_seqlens_q, // b+1
533
+ const at::Tensor &cu_seqlens_k, // b+1
534
+ c10::optional<at::Tensor> &seqused_k, // b. If given, only this many elements of each batch element's keys are used.
535
+ c10::optional<at::Tensor> &block_table_, // batch_size x max_num_blocks_per_seq
536
+ c10::optional<at::Tensor> &alibi_slopes_, // num_heads or b x num_heads
537
+ int max_seqlen_q,
538
+ const int max_seqlen_k,
539
+ const float p_dropout,
540
+ const float softmax_scale,
541
+ const bool zero_tensors,
542
+ bool is_causal,
543
+ int window_size_left,
544
+ int window_size_right,
545
+ const float softcap,
546
+ const bool return_softmax,
547
+ c10::optional<at::Generator> gen_) {
548
+
549
+ auto dprops = at::cuda::getCurrentDeviceProperties();
550
+ // bool is_sm75 = dprops->major == 7 && dprops->minor == 5;
551
+ bool is_sm8x = dprops->major == 8 && dprops->minor >= 0;
552
+ bool is_sm90 = dprops->major == 9 && dprops->minor == 0;
553
+ TORCH_CHECK(is_sm90 || is_sm8x, "FlashAttention only supports Ampere GPUs or newer.");
554
+ // We will support Turing in the near future
555
+ // TORCH_CHECK(is_sm90 || is_sm8x || is_sm75, "FlashAttention only supports Turing GPUs or newer.");
556
+
557
+ auto q_dtype = q.dtype();
558
+ TORCH_CHECK(q_dtype == torch::kFloat16 || q_dtype == torch::kBFloat16,
559
+ "FlashAttention only support fp16 and bf16 data type");
560
+ if (q_dtype == torch::kBFloat16) {
561
+ TORCH_CHECK(is_sm90 || is_sm8x, "bfloat16 is only supported on Ampere GPUs or newer");
562
+ }
563
+ TORCH_CHECK(k.dtype() == q_dtype, "query and key must have the same dtype");
564
+ TORCH_CHECK(v.dtype() == q_dtype, "query and value must have the same dtype");
565
+ TORCH_CHECK(cu_seqlens_q.dtype() == torch::kInt32, "cu_seqlens_q must have dtype int32");
566
+ TORCH_CHECK(cu_seqlens_k.dtype() == torch::kInt32, "cu_seqlens_k must have dtype int32");
567
+
568
+ CHECK_DEVICE(q); CHECK_DEVICE(k); CHECK_DEVICE(v);
569
+ CHECK_DEVICE(cu_seqlens_q);
570
+ CHECK_DEVICE(cu_seqlens_k);
571
+
572
+ at::Tensor block_table;
573
+ const bool paged_KV = block_table_.has_value();
574
+ if (paged_KV) {
575
+ block_table = block_table_.value();
576
+ CHECK_DEVICE(block_table);
577
+ TORCH_CHECK(block_table.dtype() == torch::kInt32, "block_table must have dtype torch.int32");
578
+ TORCH_CHECK(block_table.stride(-1) == 1, "block_table must have contiguous last dimension");
579
+ }
580
+
581
+ TORCH_CHECK(q.stride(-1) == 1, "Input tensor must have contiguous last dimension");
582
+ TORCH_CHECK(k.stride(-1) == 1, "Input tensor must have contiguous last dimension");
583
+ TORCH_CHECK(v.stride(-1) == 1, "Input tensor must have contiguous last dimension");
584
+ CHECK_CONTIGUOUS(cu_seqlens_q);
585
+ CHECK_CONTIGUOUS(cu_seqlens_k);
586
+
587
+ const auto sizes = q.sizes();
588
+
589
+ const int batch_size = cu_seqlens_q.numel() - 1;
590
+ int num_heads = sizes[1];
591
+ const int head_size_og = sizes[2];
592
+ const int num_heads_k = paged_KV ? k.size(2) : k.size(1);
593
+
594
+ if (softcap > 0.f) { TORCH_CHECK(p_dropout == 0.f, "Softcapping does not support dropout for now"); }
595
+
596
+ const int max_num_blocks_per_seq = !paged_KV ? 0 : block_table.size(1);
597
+ const int num_blocks = !paged_KV ? 0 : k.size(0);
598
+ const int page_block_size = !paged_KV ? 1 : k.size(1);
599
+ TORCH_CHECK(!paged_KV || page_block_size % 256 == 0, "Paged KV cache block size must be divisible by 256");
600
+
601
+ if (max_seqlen_q == 1 && !alibi_slopes_.has_value()) { is_causal = false; } // causal=true is the same as causal=false in this case
602
+ if (is_causal) { window_size_right = 0; }
603
+
604
+ void *cu_seqlens_q_d = cu_seqlens_q.data_ptr();
605
+
606
+ // Faster to transpose q from (b, 1, (nheads_kv ngroups), d) to (b, ngroups, nheads_kv, d) in this case
607
+ // H/t Daniel Haziza
608
+ const int seqlenq_ngroups_swapped = max_seqlen_q == 1 && num_heads > num_heads_k && window_size_left < 0 && window_size_right < 0 && p_dropout == 0.f && head_size_og % 8 == 0 && !alibi_slopes_.has_value();
609
+ const int ngroups = num_heads / num_heads_k;
610
+ if (seqlenq_ngroups_swapped) {
611
+ q = q.reshape({batch_size, num_heads_k, ngroups, head_size_og}).transpose(1, 2).reshape({batch_size * ngroups, num_heads_k, head_size_og});
612
+ max_seqlen_q = ngroups;
613
+ num_heads = num_heads_k;
614
+ cu_seqlens_q_d = nullptr;
615
+ }
616
+
617
+ const int total_q = q.sizes()[0];
618
+
619
+ TORCH_CHECK(batch_size > 0, "batch size must be positive");
620
+ TORCH_CHECK(head_size_og <= 256, "FlashAttention forward only supports head dimension at most 256");
621
+ TORCH_CHECK(num_heads % num_heads_k == 0, "Number of heads in key/value must divide number of heads in query");
622
+
623
+ if (window_size_left >= max_seqlen_k) { window_size_left = -1; }
624
+ if (window_size_right >= max_seqlen_k) { window_size_right = -1; }
625
+
626
+ CHECK_SHAPE(q, total_q, num_heads, head_size_og);
627
+ if (!paged_KV) {
628
+ const int total_k = k.size(0);
629
+ CHECK_SHAPE(k, total_k, num_heads_k, head_size_og);
630
+ CHECK_SHAPE(v, total_k, num_heads_k, head_size_og);
631
+ } else {
632
+ CHECK_SHAPE(k, num_blocks, page_block_size, num_heads_k, head_size_og);
633
+ CHECK_SHAPE(v, num_blocks, page_block_size, num_heads_k, head_size_og);
634
+ CHECK_SHAPE(block_table, batch_size, max_num_blocks_per_seq);
635
+ }
636
+
637
+ CHECK_SHAPE(cu_seqlens_q, batch_size + 1);
638
+ CHECK_SHAPE(cu_seqlens_k, batch_size + 1);
639
+ if (seqused_k.has_value()){
640
+ auto seqused_k_ = seqused_k.value();
641
+ TORCH_CHECK(seqused_k_.dtype() == torch::kInt32, "seqused_k must have dtype int32");
642
+ TORCH_CHECK(seqused_k_.is_cuda(), "seqused_k must be on CUDA device");
643
+ TORCH_CHECK(seqused_k_.is_contiguous(), "seqused_k must be contiguous");
644
+ CHECK_SHAPE(seqused_k_, batch_size);
645
+ }
646
+
647
+ at::Tensor q_padded, k_padded, v_padded;
648
+ if (head_size_og % 8 != 0) {
649
+ q_padded = torch::nn::functional::pad(q, torch::nn::functional::PadFuncOptions({0, 8 - head_size_og % 8}));
650
+ k_padded = torch::nn::functional::pad(k, torch::nn::functional::PadFuncOptions({0, 8 - head_size_og % 8}));
651
+ v_padded = torch::nn::functional::pad(v, torch::nn::functional::PadFuncOptions({0, 8 - head_size_og % 8}));
652
+ } else {
653
+ q_padded = q;
654
+ k_padded = k;
655
+ v_padded = v;
656
+ }
657
+
658
+ at::Tensor out;
659
+ if (out_.has_value()) {
660
+ out = out_.value();
661
+ TORCH_CHECK(out.dtype() == q_dtype, "Output must have the same dtype as inputs");
662
+ CHECK_DEVICE(out);
663
+ TORCH_CHECK(out.stride(-1) == 1, "Output tensor must have contiguous last dimension");
664
+ CHECK_SHAPE(out, sizes[0], sizes[1], head_size_og);
665
+ if (seqlenq_ngroups_swapped) {
666
+ out = out.reshape({batch_size, num_heads_k, ngroups, head_size_og}).transpose(1, 2).reshape({batch_size * ngroups, num_heads_k, head_size_og});
667
+ }
668
+ if (head_size_og % 8 != 0) { out = torch::empty_like(q_padded); }
669
+ } else {
670
+ out = torch::empty_like(q_padded);
671
+ }
672
+
673
+ auto round_multiple = [](int x, int m) { return (x + m - 1) / m * m; };
674
+ const int head_size = round_multiple(head_size_og, 8);
675
+ const int head_size_rounded = round_multiple(head_size, 32);
676
+ const int seqlen_q_rounded = round_multiple(max_seqlen_q, 128);
677
+ const int seqlen_k_rounded = round_multiple(max_seqlen_k, 128);
678
+
679
+ // Otherwise the kernel will be launched from cuda:0 device
680
+ // Cast to char to avoid compiler warning about narrowing
681
+ at::cuda::CUDAGuard device_guard{(char)q.get_device()};
682
+
683
+ auto opts = q.options();
684
+ auto softmax_lse = torch::empty({num_heads, total_q}, opts.dtype(at::kFloat));
685
+ at::Tensor p;
686
+ // Only return softmax if there's dropout to reduce compilation time
687
+ if (return_softmax) {
688
+ TORCH_CHECK(p_dropout > 0.0f, "return_softmax is only supported when p_dropout > 0.0");
689
+ p = torch::empty({ batch_size, num_heads, seqlen_q_rounded, seqlen_k_rounded }, opts);
690
+ }
691
+
692
+ if (zero_tensors) {
693
+ out.zero_();
694
+ softmax_lse.fill_(-std::numeric_limits<float>::infinity());
695
+ if (return_softmax) {p.zero_();}
696
+ }
697
+
698
+ Flash_fwd_params params;
699
+ set_params_fprop(params,
700
+ batch_size,
701
+ max_seqlen_q, max_seqlen_k,
702
+ seqlen_q_rounded, seqlen_k_rounded,
703
+ num_heads, num_heads_k,
704
+ head_size, head_size_rounded,
705
+ q_padded, k_padded, v_padded, out,
706
+ cu_seqlens_q_d,
707
+ cu_seqlens_k.data_ptr(),
708
+ seqused_k.has_value() ? seqused_k.value().data_ptr() : nullptr,
709
+ return_softmax ? p.data_ptr() : nullptr,
710
+ softmax_lse.data_ptr(),
711
+ p_dropout,
712
+ softmax_scale,
713
+ window_size_left,
714
+ window_size_right,
715
+ softcap,
716
+ seqlenq_ngroups_swapped,
717
+ /*unpadded_lse*/true);
718
+ params.total_q = total_q;
719
+
720
+ if (paged_KV) {
721
+ params.block_table = block_table.data_ptr<int>();
722
+ params.block_table_batch_stride = block_table.stride(0);
723
+ params.k_batch_stride = k_padded.stride(0);
724
+ params.v_batch_stride = v_padded.stride(0);
725
+ }
726
+ params.page_block_size = page_block_size;
727
+ if (seqlenq_ngroups_swapped) {
728
+ // Only apply split-k for decoding
729
+ set_params_splitkv(params, batch_size, num_heads,
730
+ head_size, max_seqlen_k, max_seqlen_q,
731
+ head_size_rounded, p_dropout, /*num_splits*/0, dprops, opts);
732
+ }
733
+
734
+ // number of times random will be generated per thread, to offset philox counter in thc random
735
+ // state
736
+ // We use a custom RNG that increases the offset by batch_size * nheads * 32.
737
+ int64_t counter_offset = params.b * params.h * 32;
738
+ auto options = torch::TensorOptions().dtype(torch::kFloat32).device(torch::kCUDA);
739
+ auto rng_state = torch::empty({2}, options.dtype(torch::kInt64));
740
+ // Forward kernel will populate memory with the seed and offset.
741
+ params.rng_state = reinterpret_cast<uint64_t*>(rng_state.data_ptr());
742
+
743
+ if (p_dropout > 0.0) {
744
+ auto gen = at::get_generator_or_default<at::CUDAGeneratorImpl>(
745
+ gen_, at::cuda::detail::getDefaultCUDAGenerator());
746
+ // See Note [Acquire lock when using random generators]
747
+ std::lock_guard<std::mutex> lock(gen->mutex_);
748
+ params.philox_args = gen->philox_cuda_state(counter_offset);
749
+ }
750
+
751
+ set_params_alibi(params, alibi_slopes_, batch_size, num_heads);
752
+
753
+ if (max_seqlen_k > 0) {
754
+ auto stream = at::cuda::getCurrentCUDAStream().stream();
755
+ run_mha_fwd(params, stream, paged_KV);
756
+ } else {
757
+ // If seqlen_k == 0, then we have an empty tensor. We need to set the output to 0.
758
+ out.zero_();
759
+ softmax_lse.fill_(std::numeric_limits<float>::infinity());
760
+ }
761
+
762
+ at::Tensor out_padded = out;
763
+ if (head_size_og % 8 != 0) {
764
+ out = out.index({"...", torch::indexing::Slice(torch::indexing::None, head_size_og)});
765
+ if (out_.has_value()) { out_.value().copy_(out); }
766
+ }
767
+
768
+ if (seqlenq_ngroups_swapped) {
769
+ int64_t size_before[] = {batch_size, max_seqlen_q, num_heads_k, head_size_og};
770
+ int64_t size_after[] = {batch_size, num_heads_k * max_seqlen_q, head_size_og};
771
+ out = out.reshape(size_before).transpose(1, 2).reshape(size_after);
772
+ out_padded = out_padded.reshape(size_before).transpose(1, 2).reshape(size_after);
773
+ q_padded = q_padded.reshape(size_before).transpose(1, 2).reshape(size_after);
774
+ softmax_lse = softmax_lse.reshape({num_heads * max_seqlen_q, batch_size});
775
+ }
776
+
777
+ return {out, q_padded, k_padded, v_padded, out_padded, softmax_lse, p, rng_state};
778
+ }
779
+
780
+ void run_mha_bwd(Flash_bwd_params &params, cudaStream_t stream) {
781
+ FP16_SWITCH(!params.is_bf16, [&] {
782
+ HEADDIM_SWITCH(params.d, [&] {
783
+ run_mha_bwd_<elem_type, kHeadDim>(params, stream);
784
+ });
785
+ });
786
+ }
787
+
788
+ std::vector<at::Tensor>
789
+ mha_bwd(const at::Tensor &dout, // batch_size x seqlen_q x num_heads, x head_size_og
790
+ const at::Tensor &q, // batch_size x seqlen_q x num_heads x head_size
791
+ const at::Tensor &k, // batch_size x seqlen_k x num_heads_k x head_size
792
+ const at::Tensor &v, // batch_size x seqlen_k x num_heads_k x head_size
793
+ const at::Tensor &out, // batch_size x seqlen_q x num_heads x head_size
794
+ const at::Tensor &softmax_lse, // b x h x seqlen_q
795
+ c10::optional<at::Tensor> &dq_, // batch_size x seqlen_q x num_heads x head_size
796
+ c10::optional<at::Tensor> &dk_, // batch_size x seqlen_k x num_heads_k x head_size
797
+ c10::optional<at::Tensor> &dv_, // batch_size x seqlen_k x num_heads_k x head_size
798
+ c10::optional<at::Tensor> &alibi_slopes_, // num_heads or batch_size x num_heads
799
+ const float p_dropout, // probability to drop
800
+ const float softmax_scale,
801
+ const bool is_causal,
802
+ int window_size_left,
803
+ int window_size_right,
804
+ const float softcap,
805
+ const bool deterministic,
806
+ c10::optional<at::Generator> gen_,
807
+ c10::optional<at::Tensor> &rng_state) {
808
+
809
+ #ifdef FLASHATTENTION_DISABLE_BACKWARD
810
+ TORCH_CHECK(false, "This flash attention build does not support backward.");
811
+ #endif
812
+ if (is_causal) { window_size_right = 0; }
813
+ auto dprops = at::cuda::getCurrentDeviceProperties();
814
+ // bool is_sm75 = dprops->major == 7 && dprops->minor == 5;
815
+ bool is_sm8x = dprops->major == 8 && dprops->minor >= 0;
816
+ bool is_sm80 = dprops->major == 8 && dprops->minor == 0;
817
+ bool is_sm90 = dprops->major == 9 && dprops->minor == 0;
818
+ TORCH_CHECK(is_sm90 || is_sm8x, "FlashAttention only supports Ampere GPUs or newer.");
819
+ // We will support Turing in the near future
820
+ // TORCH_CHECK(is_sm90 || is_sm8x || is_sm75, "FlashAttention only supports Turing GPUs or newer.");
821
+
822
+ bool is_dropout = p_dropout > 0.0;
823
+ auto stream = at::cuda::getCurrentCUDAStream().stream();
824
+
825
+ auto q_dtype = q.dtype();
826
+ TORCH_CHECK(q_dtype == torch::kFloat16 || q_dtype == torch::kBFloat16,
827
+ "FlashAttention only support fp16 and bf16 data type");
828
+ if (q_dtype == torch::kBFloat16) {
829
+ TORCH_CHECK(is_sm90 || is_sm8x, "bfloat16 is only supported on Ampere GPUs or newer");
830
+ }
831
+ TORCH_CHECK(k.dtype() == q_dtype, "query and key must have the same dtype");
832
+ TORCH_CHECK(v.dtype() == q_dtype, "query and value must have the same dtype");
833
+ TORCH_CHECK(out.dtype() == q_dtype, "query and out must have the same dtype");
834
+ TORCH_CHECK(dout.dtype() == q_dtype, "query and dout must have the same dtype");
835
+
836
+ CHECK_DEVICE(q); CHECK_DEVICE(k); CHECK_DEVICE(v);
837
+ CHECK_DEVICE(out); CHECK_DEVICE(dout); CHECK_DEVICE(softmax_lse);
838
+
839
+ TORCH_CHECK(q.stride(-1) == 1, "Input tensor must have contiguous last dimension");
840
+ TORCH_CHECK(k.stride(-1) == 1, "Input tensor must have contiguous last dimension");
841
+ TORCH_CHECK(v.stride(-1) == 1, "Input tensor must have contiguous last dimension");
842
+ TORCH_CHECK(out.stride(-1) == 1, "out tensor must have contiguous last dimension");
843
+ TORCH_CHECK(dout.stride(-1) == 1, "dout tensor must have contiguous last dimension");
844
+
845
+ const auto sizes = q.sizes();
846
+
847
+ const int batch_size = sizes[0];
848
+ const int seqlen_q = sizes[1];
849
+ const int num_heads = sizes[2];
850
+ const int head_size_og = dout.size(3);
851
+ const int head_size = sizes[3];
852
+ const int seqlen_k = k.size(1);
853
+ const int num_heads_k = k.size(2);
854
+ TORCH_CHECK(batch_size > 0, "batch size must be positive");
855
+ TORCH_CHECK(head_size % 8 == 0, "head_size should be a multiple of 8");
856
+ TORCH_CHECK(head_size <= 256, "FlashAttention backward only supports head dimension at most 256");
857
+ if (head_size > 192 && (head_size <= 224 || is_dropout)) {
858
+ TORCH_CHECK(is_sm80 || is_sm90, "FlashAttention backward for head dim 256 with dropout, or head dim 224 with/without dropout requires A100/A800 or H100/H800");
859
+ }
860
+ TORCH_CHECK(num_heads % num_heads_k == 0, "Number of heads in key/value must divide number of heads in query");
861
+
862
+ auto round_multiple = [](int x, int m) { return (x + m - 1) / m * m; };
863
+ const int head_size_rounded = round_multiple(head_size, 32);
864
+ const int seqlen_q_rounded = round_multiple(seqlen_q, 128);
865
+ const int seqlen_k_rounded = round_multiple(seqlen_k, 128);
866
+
867
+ TORCH_CHECK(head_size == round_multiple(head_size_og, 8), "head_size must be head_size_og rounded to a multiple of 8");
868
+
869
+ if (window_size_left >= seqlen_k) { window_size_left = -1; }
870
+ if (window_size_right >= seqlen_k) { window_size_right = -1; }
871
+
872
+ CHECK_SHAPE(q, batch_size, seqlen_q, num_heads, head_size);
873
+ CHECK_SHAPE(k, batch_size, seqlen_k, num_heads_k, head_size);
874
+ CHECK_SHAPE(v, batch_size, seqlen_k, num_heads_k, head_size);
875
+ CHECK_SHAPE(out, batch_size, seqlen_q, num_heads, head_size);
876
+ CHECK_SHAPE(dout, batch_size, seqlen_q, num_heads, head_size_og);
877
+
878
+ at::Tensor dq, dk, dv;
879
+ if (dq_.has_value()) {
880
+ dq = dq_.value();
881
+ TORCH_CHECK(dq.dtype() == q_dtype, "dq must have the same dtype as q");
882
+ CHECK_DEVICE(dq);
883
+ TORCH_CHECK(dq.stride(-1) == 1, "dq must have contiguous last dimension");
884
+ CHECK_SHAPE(dq, batch_size, seqlen_q, num_heads, head_size);
885
+ } else {
886
+ dq = torch::empty_like(q);
887
+ }
888
+ if (dk_.has_value()) {
889
+ dk = dk_.value();
890
+ TORCH_CHECK(dk.dtype() == q_dtype, "dk must have the same dtype as q");
891
+ CHECK_DEVICE(dk);
892
+ TORCH_CHECK(dk.stride(-1) == 1, "dk must have contiguous last dimension");
893
+ CHECK_SHAPE(dk, batch_size, seqlen_k, num_heads_k, head_size);
894
+ } else {
895
+ dk = torch::empty_like(k);
896
+ }
897
+ if (dv_.has_value()) {
898
+ dv = dv_.value();
899
+ TORCH_CHECK(dv.dtype() == q_dtype, "dv must have the same dtype as q");
900
+ CHECK_DEVICE(dv);
901
+ TORCH_CHECK(dv.stride(-1) == 1, "dv must have contiguous last dimension");
902
+ CHECK_SHAPE(dv, batch_size, seqlen_k, num_heads_k, head_size);
903
+ } else {
904
+ dv = torch::empty_like(v);
905
+ }
906
+
907
+ at::Tensor dout_padded;
908
+ if (head_size_og % 8 != 0) {
909
+ dout_padded = torch::nn::functional::pad(dout, torch::nn::functional::PadFuncOptions({0, 8 - head_size_og % 8}));
910
+ } else {
911
+ dout_padded = dout;
912
+ }
913
+
914
+ // bool loop = seqlen_k > blocksize_c;
915
+ // TODO: change later, for now set to true for simplicity
916
+ bool loop = true;
917
+
918
+ // Otherwise the kernel will be launched from cuda:0 device
919
+ // Cast to char to avoid compiler warning about narrowing
920
+ at::cuda::CUDAGuard device_guard{(char)q.get_device()};
921
+
922
+ auto opts = q.options();
923
+ auto softmax_d = torch::empty({batch_size, num_heads, seqlen_q_rounded}, opts.dtype(at::kFloat));
924
+ at::Tensor dq_accum;
925
+ at::Tensor dk_accum, dv_accum;
926
+ if (loop) {
927
+ if (!deterministic) {
928
+ dq_accum = torch::empty({batch_size, seqlen_q_rounded, num_heads, head_size_rounded}, opts.dtype(at::kFloat));
929
+ } else {
930
+ const int nsplits = (dprops->multiProcessorCount + batch_size * num_heads - 1) / (batch_size * num_heads);
931
+ dq_accum = torch::zeros({nsplits, batch_size, seqlen_q_rounded, num_heads, head_size_rounded}, opts.dtype(at::kFloat));
932
+ }
933
+ // dk_accum = torch::empty({batch_size, num_heads_k, seqlen_k_rounded, head_size_rounded}, opts.dtype(at::kFloat));
934
+ // dv_accum = torch::empty({batch_size, num_heads_k, seqlen_k_rounded, head_size_rounded}, opts.dtype(at::kFloat));
935
+ }
936
+
937
+ at::Tensor dk_expanded, dv_expanded;
938
+ if (num_heads_k != num_heads) { // MQA / GQA
939
+ dk_expanded = torch::empty({batch_size, seqlen_k, num_heads, head_size}, opts);
940
+ dv_expanded = torch::empty({batch_size, seqlen_k, num_heads, head_size}, opts);
941
+ } else {
942
+ dk_expanded = dk;
943
+ dv_expanded = dv;
944
+ }
945
+
946
+ Flash_bwd_params params;
947
+
948
+ set_params_dgrad(params,
949
+ batch_size,
950
+ seqlen_q, seqlen_k,
951
+ seqlen_q_rounded, seqlen_k_rounded,
952
+ num_heads, num_heads_k,
953
+ head_size, head_size_rounded,
954
+ q, k, v, out,
955
+ dout_padded, dq, dk_expanded, dv_expanded,
956
+ nullptr,
957
+ nullptr,
958
+ loop ? dq_accum.data_ptr() : nullptr,
959
+ // loop ? dk_accum.data_ptr() : nullptr,
960
+ // loop ? dv_accum.data_ptr() : nullptr,
961
+ nullptr,
962
+ nullptr,
963
+ softmax_lse.data_ptr(),
964
+ softmax_d.data_ptr(),
965
+ p_dropout,
966
+ softmax_scale,
967
+ window_size_left,
968
+ window_size_right,
969
+ softcap,
970
+ deterministic,
971
+ /*unpadded_lse*/false);
972
+ params.dq_accum_split_stride = !deterministic ? 0 : dq_accum.stride(0);
973
+
974
+ auto launch = &run_mha_bwd;
975
+
976
+ auto gen = at::get_generator_or_default<at::CUDAGeneratorImpl>(
977
+ gen_, at::cuda::detail::getDefaultCUDAGenerator());
978
+
979
+ // We use a custom RNG that increases the offset by batch_size * nheads * 32.
980
+ int64_t counter_offset = params.b * params.h * 32;
981
+
982
+ if ( rng_state.has_value() ) {
983
+ params.rng_state = reinterpret_cast<uint64_t*>(rng_state.value().data_ptr());
984
+ } else if( is_dropout ) {
985
+ // See Note [Acquire lock when using random generators]
986
+ std::lock_guard<std::mutex> lock(gen->mutex_);
987
+ params.philox_args = gen->philox_cuda_state(counter_offset);
988
+ auto seeds = at::cuda::philox::unpack(params.philox_args);
989
+ params.rng_state[0] = std::get<0>(seeds);
990
+ params.rng_state[1] = std::get<1>(seeds);
991
+ }
992
+
993
+ set_params_alibi(params, alibi_slopes_, batch_size, num_heads);
994
+
995
+ if (seqlen_q > 0) {
996
+ launch(params, stream);
997
+ } else {
998
+ // If seqlen_q == 0, then we have an empty tensor. We need to set the output to 0.
999
+ dk_expanded.zero_();
1000
+ dv_expanded.zero_();
1001
+ softmax_d.zero_();
1002
+ }
1003
+
1004
+ // For MQA/GQA we need to sum dK and dV across the groups
1005
+ if (num_heads_k != num_heads) {
1006
+ at::sum_out(dk, at::reshape(dk_expanded, {batch_size, seqlen_k, num_heads_k, num_heads / num_heads_k, head_size}), {3});
1007
+ at::sum_out(dv, at::reshape(dv_expanded, {batch_size, seqlen_k, num_heads_k, num_heads / num_heads_k, head_size}), {3});
1008
+ }
1009
+ if (head_size_og % 8 != 0) {
1010
+ dq = dq.index({"...", torch::indexing::Slice(torch::indexing::None, head_size_og)});
1011
+ dk = dk.index({"...", torch::indexing::Slice(torch::indexing::None, head_size_og)});
1012
+ dv = dv.index({"...", torch::indexing::Slice(torch::indexing::None, head_size_og)});
1013
+ }
1014
+
1015
+ return { dq, dk, dv, softmax_d };
1016
+ }
1017
+
1018
+ std::vector<at::Tensor>
1019
+ mha_varlen_bwd(const at::Tensor &dout, // total_q x num_heads, x head_size
1020
+ const at::Tensor &q, // total_q x num_heads x head_size, total_q := \sum_{i=0}^{b} s_i
1021
+ const at::Tensor &k, // total_k x num_heads_k x head_size, total_k := \sum_{i=0}^{b} s_i
1022
+ const at::Tensor &v, // total_k x num_heads_k x head_size, total_k := \sum_{i=0}^{b} s_i
1023
+ const at::Tensor &out, // total_q x num_heads x head_size
1024
+ const at::Tensor &softmax_lse, // h x total_q, softmax logsumexp
1025
+ c10::optional<at::Tensor> &dq_, // total_q x num_heads x head_size, total_q := \sum_{i=0}^{b} s_i
1026
+ c10::optional<at::Tensor> &dk_, // total_k x num_heads_k x head_size, total_k := \sum_{i=0}^{b} s_i
1027
+ c10::optional<at::Tensor> &dv_, // total_k x num_heads_k x head_size, total_k := \sum_{i=0}^{b} s_i
1028
+ const at::Tensor &cu_seqlens_q, // b+1
1029
+ const at::Tensor &cu_seqlens_k, // b+1
1030
+ c10::optional<at::Tensor> &alibi_slopes_, // num_heads or b x num_heads
1031
+ const int max_seqlen_q,
1032
+ const int max_seqlen_k, // max sequence length to choose the kernel
1033
+ const float p_dropout, // probability to drop
1034
+ const float softmax_scale,
1035
+ const bool zero_tensors,
1036
+ const bool is_causal,
1037
+ int window_size_left,
1038
+ int window_size_right,
1039
+ const float softcap,
1040
+ const bool deterministic,
1041
+ c10::optional<at::Generator> gen_,
1042
+ c10::optional<at::Tensor> &rng_state) {
1043
+
1044
+ #ifdef FLASHATTENTION_DISABLE_BACKWARD
1045
+ TORCH_CHECK(false, "This flash attention build does not support backward.");
1046
+ #endif
1047
+
1048
+ if (is_causal) { window_size_right = 0; }
1049
+ auto dprops = at::cuda::getCurrentDeviceProperties();
1050
+ // bool is_sm75 = dprops->major == 7 && dprops->minor == 5;
1051
+ bool is_sm8x = dprops->major == 8 && dprops->minor >= 0;
1052
+ bool is_sm80 = dprops->major == 8 && dprops->minor == 0;
1053
+ bool is_sm90 = dprops->major == 9 && dprops->minor == 0;
1054
+ TORCH_CHECK(is_sm90 || is_sm8x, "FlashAttention only supports Ampere GPUs or newer.");
1055
+ // We will support Turing in the near future
1056
+ // TORCH_CHECK(is_sm90 || is_sm8x || is_sm75, "FlashAttention only supports Turing GPUs or newer.");
1057
+ bool is_dropout = p_dropout > 0.0;
1058
+ auto stream = at::cuda::getCurrentCUDAStream().stream();
1059
+
1060
+ auto q_dtype = q.dtype();
1061
+ TORCH_CHECK(q_dtype == torch::kFloat16 || q_dtype == torch::kBFloat16,
1062
+ "FlashAttention only support fp16 and bf16 data type");
1063
+ if (q_dtype == torch::kBFloat16) {
1064
+ TORCH_CHECK(is_sm90 || is_sm8x, "bfloat16 is only supported on Ampere GPUs or newer");
1065
+ }
1066
+ TORCH_CHECK(k.dtype() == q_dtype, "query and key must have the same dtype");
1067
+ TORCH_CHECK(v.dtype() == q_dtype, "query and value must have the same dtype");
1068
+ TORCH_CHECK(out.dtype() == q_dtype, "query and out must have the same dtype");
1069
+ TORCH_CHECK(dout.dtype() == q_dtype, "query and dout must have the same dtype");
1070
+ TORCH_CHECK(cu_seqlens_q.dtype() == torch::kInt32, "cu_seqlens_q must have dtype int32");
1071
+ TORCH_CHECK(cu_seqlens_k.dtype() == torch::kInt32, "cu_seqlens_k must have dtype int32");
1072
+
1073
+ CHECK_DEVICE(q); CHECK_DEVICE(k); CHECK_DEVICE(v);
1074
+ CHECK_DEVICE(out); CHECK_DEVICE(dout); CHECK_DEVICE(softmax_lse);
1075
+ CHECK_DEVICE(cu_seqlens_q); CHECK_DEVICE(cu_seqlens_k);
1076
+
1077
+ TORCH_CHECK(q.stride(-1) == 1, "Input tensor must have contiguous last dimension");
1078
+ TORCH_CHECK(k.stride(-1) == 1, "Input tensor must have contiguous last dimension");
1079
+ TORCH_CHECK(v.stride(-1) == 1, "Input tensor must have contiguous last dimension");
1080
+ TORCH_CHECK(out.stride(-1) == 1, "out tensor must have contiguous last dimension");
1081
+ TORCH_CHECK(dout.stride(-1) == 1, "dout tensor must have contiguous last dimension");
1082
+ CHECK_CONTIGUOUS(cu_seqlens_q);
1083
+ CHECK_CONTIGUOUS(cu_seqlens_k);
1084
+
1085
+ const auto sizes = q.sizes();
1086
+
1087
+ const int total_q = sizes[0];
1088
+ const int batch_size = cu_seqlens_q.numel() - 1;
1089
+ const int num_heads = sizes[1];
1090
+ const int head_size_og = dout.size(2);
1091
+ const int head_size = sizes[2];
1092
+ const int total_k = k.size(0);
1093
+ const int num_heads_k = k.size(1);
1094
+ TORCH_CHECK(batch_size > 0, "batch size must be positive");
1095
+ TORCH_CHECK(head_size % 8 == 0, "head_size should be a multiple of 8");
1096
+ TORCH_CHECK(head_size <= 256, "FlashAttention backward only supports head dimension at most 256");
1097
+ if (head_size > 192 && (head_size <= 224 || is_dropout)) {
1098
+ TORCH_CHECK(is_sm80 || is_sm90, "FlashAttention backward for head dim 256 with dropout, or head dim 224 with/without dropout requires A100/A800 or H100/H800");
1099
+ }
1100
+ TORCH_CHECK(num_heads % num_heads_k == 0, "Number of heads in key/value must divide number of heads in query");
1101
+
1102
+ auto round_multiple = [](int x, int m) { return (x + m - 1) / m * m; };
1103
+ const int head_size_rounded = round_multiple(head_size, 32);
1104
+ const int seqlen_q_rounded = round_multiple(max_seqlen_q, 128);
1105
+ const int seqlen_k_rounded = round_multiple(max_seqlen_k, 128);
1106
+
1107
+ TORCH_CHECK(head_size == round_multiple(head_size_og, 8), "head_size must be head_size_og rounded to a multiple of 8");
1108
+
1109
+ if (window_size_left >= max_seqlen_k) { window_size_left = -1; }
1110
+ if (window_size_right >= max_seqlen_k) { window_size_right = -1; }
1111
+
1112
+ CHECK_SHAPE(q, total_q, num_heads, head_size);
1113
+ CHECK_SHAPE(k, total_k, num_heads_k, head_size);
1114
+ CHECK_SHAPE(v, total_k, num_heads_k, head_size);
1115
+ CHECK_SHAPE(out, total_q, num_heads, head_size);
1116
+ CHECK_SHAPE(dout, total_q, num_heads, head_size_og);
1117
+ CHECK_SHAPE(cu_seqlens_q, batch_size + 1);
1118
+ CHECK_SHAPE(cu_seqlens_k, batch_size + 1);
1119
+
1120
+ at::Tensor dq, dk, dv;
1121
+ if (dq_.has_value()) {
1122
+ dq = dq_.value();
1123
+ TORCH_CHECK(dq.dtype() == q_dtype, "dq must have the same dtype as q");
1124
+ CHECK_DEVICE(dq);
1125
+ TORCH_CHECK(dq.stride(-1) == 1, "dq must have contiguous last dimension");
1126
+ CHECK_SHAPE(dq, total_q, num_heads, head_size);
1127
+ } else {
1128
+ dq = torch::empty_like(q);
1129
+ }
1130
+ if (dk_.has_value()) {
1131
+ dk = dk_.value();
1132
+ TORCH_CHECK(dk.dtype() == q_dtype, "dk must have the same dtype as q");
1133
+ CHECK_DEVICE(dk);
1134
+ TORCH_CHECK(dk.stride(-1) == 1, "dk must have contiguous last dimension");
1135
+ CHECK_SHAPE(dk, total_k, num_heads_k, head_size);
1136
+ } else {
1137
+ dk = torch::empty_like(k);
1138
+ }
1139
+ if (dv_.has_value()) {
1140
+ dv = dv_.value();
1141
+ TORCH_CHECK(dv.dtype() == q_dtype, "dv must have the same dtype as q");
1142
+ CHECK_DEVICE(dv);
1143
+ TORCH_CHECK(dv.stride(-1) == 1, "dv must have contiguous last dimension");
1144
+ CHECK_SHAPE(dv, total_k, num_heads_k, head_size);
1145
+ } else {
1146
+ dv = torch::empty_like(v);
1147
+ }
1148
+
1149
+ at::Tensor dout_padded;
1150
+ if (head_size_og % 8 != 0) {
1151
+ dout_padded = torch::nn::functional::pad(dout, torch::nn::functional::PadFuncOptions({0, 8 - head_size_og % 8}));
1152
+ } else {
1153
+ dout_padded = dout;
1154
+ }
1155
+
1156
+ // bool loop = max_seqlen_k > blocksize_c;
1157
+ // TODO: change later, for now set to true for simplicity
1158
+ bool loop = true;
1159
+
1160
+ // Otherwise the kernel will be launched from cuda:0 device
1161
+ // Cast to char to avoid compiler warning about narrowing
1162
+ at::cuda::CUDAGuard device_guard{(char)q.get_device()};
1163
+
1164
+ auto opts = q.options();
1165
+ auto softmax_d = torch::empty({num_heads, total_q + 128 * batch_size}, opts.dtype(at::kFloat));
1166
+ at::Tensor dq_accum;
1167
+ if (loop) {
1168
+ // We don't want to allocate dq_accum of size (batch, seqlen_q_rounded, num_heads, head_size_rounded)
1169
+ // because that would be too large if there is a very long sequence and the rest of the sequences are short.
1170
+ // Instead, we allocate dq_accum of size (total_q + 128 * batch, num_heads, head_size_rounded).
1171
+ // Note that 128 is the max block size on the seqlen_q dimension.
1172
+ // For dQ, the i-th sequence is stored in indices from cu_seqlens[i] + 128 * i to
1173
+ // cu_seqlens[i + 1] * 128 * i - 1. This ensures that the i-th sequence and (i + 1)-th sequence will
1174
+ // be at least 128 apart. It's ok for us to do atomicAdds up to 128 rows beyond what we're normally
1175
+ // allowed to do. So we won't have to do any bound checking, and performance should stay the same.
1176
+ // Same holds for softmax_d, since LSE is stored in unpadded format.
1177
+ if (!deterministic) {
1178
+ dq_accum = torch::empty({total_q + 128 * batch_size, num_heads, head_size_rounded}, opts.dtype(at::kFloat));
1179
+ } else {
1180
+ const int nsplits = (dprops->multiProcessorCount + batch_size * num_heads - 1) / (batch_size * num_heads);
1181
+ dq_accum = torch::zeros({nsplits, total_q + 128 * batch_size, num_heads, head_size_rounded}, opts.dtype(at::kFloat));
1182
+ }
1183
+ }
1184
+
1185
+ at::Tensor dk_expanded, dv_expanded;
1186
+ if (num_heads_k != num_heads) { // MQA / GQA
1187
+ dk_expanded = torch::empty({total_k, num_heads, head_size}, opts);
1188
+ dv_expanded = torch::empty({total_k, num_heads, head_size}, opts);
1189
+ } else {
1190
+ dk_expanded = dk;
1191
+ dv_expanded = dv;
1192
+ }
1193
+
1194
+ if( zero_tensors ) {
1195
+ dq.zero_();
1196
+ dk_expanded.zero_();
1197
+ dv_expanded.zero_();
1198
+ softmax_d.zero_();
1199
+ }
1200
+
1201
+ Flash_bwd_params params;
1202
+
1203
+ set_params_dgrad(params,
1204
+ batch_size,
1205
+ max_seqlen_q, max_seqlen_k,
1206
+ seqlen_q_rounded, seqlen_k_rounded,
1207
+ num_heads, num_heads_k,
1208
+ head_size, head_size_rounded,
1209
+ q, k, v, out,
1210
+ dout_padded, dq, dk_expanded, dv_expanded,
1211
+ cu_seqlens_q.data_ptr(),
1212
+ cu_seqlens_k.data_ptr(),
1213
+ loop ? dq_accum.data_ptr() : nullptr,
1214
+ nullptr,
1215
+ nullptr,
1216
+ softmax_lse.data_ptr(),
1217
+ softmax_d.data_ptr(),
1218
+ p_dropout,
1219
+ softmax_scale,
1220
+ window_size_left,
1221
+ window_size_right,
1222
+ softcap,
1223
+ deterministic,
1224
+ /*unpadded_lse*/true);
1225
+ params.dq_accum_split_stride = !deterministic ? 0 : dq_accum.stride(0);
1226
+ params.total_q = total_q;
1227
+
1228
+ auto launch = &run_mha_bwd;
1229
+
1230
+ auto gen = at::get_generator_or_default<at::CUDAGeneratorImpl>(
1231
+ gen_, at::cuda::detail::getDefaultCUDAGenerator());
1232
+
1233
+ // We use a custom RNG that increases the offset by batch_size * nheads * 32.
1234
+ int64_t counter_offset = params.b * params.h * 32;
1235
+
1236
+ if ( rng_state.has_value() ) {
1237
+ params.rng_state = reinterpret_cast<uint64_t*>(rng_state.value().data_ptr());
1238
+ } else if( is_dropout ) {
1239
+ // See Note [Acquire lock when using random generators]
1240
+ std::lock_guard<std::mutex> lock(gen->mutex_);
1241
+ params.philox_args = gen->philox_cuda_state(counter_offset);
1242
+ auto seeds = at::cuda::philox::unpack(params.philox_args);
1243
+ params.rng_state[0] = std::get<0>(seeds);
1244
+ params.rng_state[1] = std::get<1>(seeds);
1245
+ }
1246
+
1247
+ set_params_alibi(params, alibi_slopes_, batch_size, num_heads);
1248
+
1249
+ if (max_seqlen_q > 0) {
1250
+ launch(params, stream);
1251
+ } else {
1252
+ // If seqlen_q == 0, then we have an empty tensor. We need to set the output to 0.
1253
+ dk_expanded.zero_();
1254
+ dv_expanded.zero_();
1255
+ softmax_d.zero_();
1256
+ }
1257
+
1258
+ // For MQA/GQA we need to sum dK and dV across the groups
1259
+ if (num_heads_k != num_heads) {
1260
+ at::sum_out(dk, at::reshape(dk_expanded, {total_k, num_heads_k, num_heads / num_heads_k, head_size}), {2});
1261
+ at::sum_out(dv, at::reshape(dv_expanded, {total_k, num_heads_k, num_heads / num_heads_k, head_size}), {2});
1262
+ }
1263
+ if (head_size_og % 8 != 0) {
1264
+ dq = dq.index({"...", torch::indexing::Slice(torch::indexing::None, head_size_og)});
1265
+ dk = dk.index({"...", torch::indexing::Slice(torch::indexing::None, head_size_og)});
1266
+ dv = dv.index({"...", torch::indexing::Slice(torch::indexing::None, head_size_og)});
1267
+ }
1268
+
1269
+ return { dq, dk, dv, softmax_d };
1270
+ }
1271
+
1272
+ std::vector<at::Tensor>
1273
+ mha_fwd_kvcache(at::Tensor &q, // batch_size x seqlen_q x num_heads x head_size
1274
+ const at::Tensor &kcache, // batch_size_c x seqlen_k x num_heads_k x head_size or num_blocks x page_block_size x num_heads_k x head_size if there's a block_table.
1275
+ const at::Tensor &vcache, // batch_size_c x seqlen_k x num_heads_k x head_size or num_blocks x page_block_size x num_heads_k x head_size if there's a block_table.
1276
+ c10::optional<const at::Tensor> &k_, // batch_size x seqlen_knew x num_heads_k x head_size
1277
+ c10::optional<const at::Tensor> &v_, // batch_size x seqlen_knew x num_heads_k x head_size
1278
+ c10::optional<const at::Tensor> &seqlens_k_, // batch_size
1279
+ c10::optional<const at::Tensor> &rotary_cos_, // seqlen_ro x (rotary_dim / 2)
1280
+ c10::optional<const at::Tensor> &rotary_sin_, // seqlen_ro x (rotary_dim / 2)
1281
+ c10::optional<const at::Tensor> &cache_batch_idx_, // indices to index into the KV cache
1282
+ c10::optional<at::Tensor> &block_table_, // batch_size x max_num_blocks_per_seq
1283
+ c10::optional<at::Tensor> &alibi_slopes_, // num_heads or batch_size x num_heads
1284
+ c10::optional<at::Tensor> &out_, // batch_size x seqlen_q x num_heads x head_size
1285
+ const float softmax_scale,
1286
+ bool is_causal,
1287
+ int window_size_left,
1288
+ int window_size_right,
1289
+ const float softcap,
1290
+ bool is_rotary_interleaved, // if true, rotary combines indices 0 & 1, else indices 0 & rotary_dim / 2
1291
+ int num_splits
1292
+ ) {
1293
+
1294
+ auto dprops = at::cuda::getCurrentDeviceProperties();
1295
+ // bool is_sm75 = dprops->major == 7 && dprops->minor == 5;
1296
+ bool is_sm8x = dprops->major == 8 && dprops->minor >= 0;
1297
+ bool is_sm90 = dprops->major == 9 && dprops->minor == 0;
1298
+ TORCH_CHECK(is_sm90 || is_sm8x, "FlashAttention only supports Ampere GPUs or newer.");
1299
+ // We will support Turing in the near future
1300
+ // TORCH_CHECK(is_sm90 || is_sm8x || is_sm75, "FlashAttention only supports Turing GPUs or newer.");
1301
+
1302
+ auto q_dtype = q.dtype();
1303
+ TORCH_CHECK(q_dtype == torch::kFloat16 || q_dtype == torch::kBFloat16,
1304
+ "FlashAttention only support fp16 and bf16 data type");
1305
+ if (q_dtype == torch::kBFloat16) {
1306
+ TORCH_CHECK(is_sm90 || is_sm8x, "bfloat16 is only supported on Ampere GPUs or newer");
1307
+ }
1308
+ TORCH_CHECK(kcache.dtype() == q_dtype, "query and key must have the same dtype");
1309
+ TORCH_CHECK(vcache.dtype() == q_dtype, "query and value must have the same dtype");
1310
+
1311
+ CHECK_DEVICE(q); CHECK_DEVICE(kcache); CHECK_DEVICE(vcache);
1312
+
1313
+ TORCH_CHECK(q.stride(-1) == 1, "Input tensor must have contiguous last dimension");
1314
+ TORCH_CHECK(kcache.stride(-1) == 1, "Input tensor must have contiguous last dimension");
1315
+ TORCH_CHECK(vcache.stride(-1) == 1, "Input tensor must have contiguous last dimension");
1316
+
1317
+ at::Tensor block_table;
1318
+ const bool paged_KV = block_table_.has_value();
1319
+ if (paged_KV) {
1320
+ TORCH_CHECK(!cache_batch_idx_.has_value(), "Paged KVcache does not support cache_batch_idx");
1321
+ block_table = block_table_.value();
1322
+ CHECK_DEVICE(block_table);
1323
+ TORCH_CHECK(block_table.dtype() == torch::kInt32, "block_table must have dtype torch.int32");
1324
+ TORCH_CHECK(block_table.stride(-1) == 1, "block_table must have contiguous last dimension");
1325
+ }
1326
+
1327
+ const auto sizes = q.sizes();
1328
+
1329
+ const int batch_size = sizes[0];
1330
+ int seqlen_q = sizes[1];
1331
+ int num_heads = sizes[2];
1332
+ const int head_size_og = sizes[3];
1333
+
1334
+ const int max_num_blocks_per_seq = !paged_KV ? 0 : block_table.size(1);
1335
+ const int num_blocks = !paged_KV ? 0 : kcache.size(0);
1336
+ const int page_block_size = !paged_KV ? 1 : kcache.size(1);
1337
+ TORCH_CHECK(!paged_KV || page_block_size % 256 == 0, "Paged KV cache block size must be divisible by 256");
1338
+ const int seqlen_k = !paged_KV ? kcache.size(1) : max_num_blocks_per_seq * page_block_size;
1339
+ const int num_heads_k = kcache.size(2);
1340
+ const int batch_size_c = !paged_KV ? kcache.size(0) : batch_size;
1341
+ TORCH_CHECK(batch_size > 0, "batch size must be postive");
1342
+ TORCH_CHECK(head_size_og <= 256, "FlashAttention forward only supports head dimension at most 256");
1343
+ TORCH_CHECK(num_heads % num_heads_k == 0, "Number of heads in key/value must divide number of heads in query");
1344
+
1345
+ // causal=true is the same as causal=false in this case
1346
+ if (seqlen_q == 1 && !alibi_slopes_.has_value()) { is_causal = false; }
1347
+ if (is_causal) { window_size_right = 0; }
1348
+
1349
+ // Faster to transpose q from (b, 1, (nheads_kv ngroups), d) to (b, ngroups, nheads_kv, d) in this case
1350
+ // H/t Daniel Haziza
1351
+ const int seqlenq_ngroups_swapped = seqlen_q == 1 && num_heads > num_heads_k && window_size_left < 0 && window_size_right < 0 && head_size_og % 8 == 0 && !alibi_slopes_.has_value();
1352
+ if (seqlenq_ngroups_swapped) {
1353
+ const int ngroups = num_heads / num_heads_k;
1354
+ q = q.reshape({batch_size, num_heads_k, ngroups, head_size_og}).transpose(1, 2);
1355
+ seqlen_q = ngroups;
1356
+ num_heads = num_heads_k;
1357
+ }
1358
+
1359
+ if (window_size_left >= seqlen_k) { window_size_left = -1; }
1360
+ if (window_size_right >= seqlen_k) { window_size_right = -1; }
1361
+
1362
+ CHECK_SHAPE(q, batch_size, seqlen_q, num_heads, head_size_og);
1363
+ if (!paged_KV) {
1364
+ CHECK_SHAPE(kcache, batch_size_c, seqlen_k, num_heads_k, head_size_og);
1365
+ CHECK_SHAPE(vcache, batch_size_c, seqlen_k, num_heads_k, head_size_og);
1366
+ } else {
1367
+ CHECK_SHAPE(kcache, num_blocks, page_block_size, num_heads_k, head_size_og);
1368
+ CHECK_SHAPE(vcache, num_blocks, page_block_size, num_heads_k, head_size_og);
1369
+ CHECK_SHAPE(block_table, batch_size, max_num_blocks_per_seq);
1370
+ }
1371
+
1372
+ at::Tensor q_padded, kcache_padded, vcache_padded;
1373
+ if (head_size_og % 8 != 0) {
1374
+ q_padded = torch::nn::functional::pad(q, torch::nn::functional::PadFuncOptions({0, 8 - head_size_og % 8}));
1375
+ kcache_padded = torch::nn::functional::pad(kcache, torch::nn::functional::PadFuncOptions({0, 8 - head_size_og % 8}));
1376
+ vcache_padded = torch::nn::functional::pad(vcache, torch::nn::functional::PadFuncOptions({0, 8 - head_size_og % 8}));
1377
+ } else {
1378
+ q_padded = q;
1379
+ kcache_padded = kcache;
1380
+ vcache_padded = vcache;
1381
+ }
1382
+
1383
+ at::Tensor out;
1384
+ if (out_.has_value()) {
1385
+ out = out_.value();
1386
+ TORCH_CHECK(out.dtype() == q_dtype, "Output must have the same dtype as inputs");
1387
+ CHECK_DEVICE(out);
1388
+ TORCH_CHECK(out.stride(-1) == 1, "Output tensor must have contiguous last dimension");
1389
+ CHECK_SHAPE(out, batch_size, seqlen_q, num_heads, head_size_og);
1390
+ if (head_size_og % 8 != 0) { out = torch::empty_like(q_padded); }
1391
+ } else {
1392
+ out = torch::empty_like(q_padded);
1393
+ }
1394
+
1395
+ auto round_multiple = [](int x, int m) { return (x + m - 1) / m * m; };
1396
+ const int head_size = round_multiple(head_size_og, 8);
1397
+ const int head_size_rounded = round_multiple(head_size, 32);
1398
+ const int seqlen_q_rounded = round_multiple(seqlen_q, 128);
1399
+ const int seqlen_k_rounded = round_multiple(seqlen_k, 128);
1400
+
1401
+ // Otherwise the kernel will be launched from cuda:0 device
1402
+ // Cast to char to avoid compiler warning about narrowing
1403
+ at::cuda::CUDAGuard device_guard{(char)q.get_device()};
1404
+
1405
+ auto opts = q.options();
1406
+
1407
+ auto softmax_lse = torch::empty({batch_size, num_heads, seqlen_q}, opts.dtype(at::kFloat));
1408
+
1409
+ Flash_fwd_params params;
1410
+ set_params_fprop(params,
1411
+ batch_size,
1412
+ seqlen_q, seqlen_k,
1413
+ seqlen_q_rounded, seqlen_k_rounded,
1414
+ num_heads, num_heads_k,
1415
+ head_size, head_size_rounded,
1416
+ q_padded, kcache_padded, vcache_padded, out,
1417
+ /*cu_seqlens_q_d=*/nullptr,
1418
+ /*cu_seqlens_k_d=*/nullptr,
1419
+ /*seqused_k=*/nullptr,
1420
+ /*p_ptr=*/nullptr,
1421
+ softmax_lse.data_ptr(),
1422
+ /*p_dropout=*/0.f,
1423
+ softmax_scale,
1424
+ window_size_left,
1425
+ window_size_right,
1426
+ softcap
1427
+ );
1428
+
1429
+ at::Tensor k, v, k_padded, v_padded;
1430
+ if (k_.has_value()) {
1431
+ TORCH_CHECK(v_.has_value(), "If key is supplied, value must also be passed in");
1432
+ TORCH_CHECK(seqlens_k_.has_value(), "If key is supplied, seqlens_k must also be passed in");
1433
+ TORCH_CHECK(seqlen_q <= seqlen_k, "If key is supplied, it must have seqlen <= the seqlen of the KV cache");
1434
+ k = k_.value();
1435
+ v = v_.value();
1436
+ TORCH_CHECK(k.dtype() == q_dtype, "Key must have the same dtype as query");
1437
+ TORCH_CHECK(v.dtype() == q_dtype, "Value must have the same dtype as query");
1438
+ CHECK_DEVICE(k); CHECK_DEVICE(v);
1439
+ TORCH_CHECK(k.stride(-1) == 1, "Key tensor must have contiguous last dimension");
1440
+ TORCH_CHECK(v.stride(-1) == 1, "Value tensor must have contiguous last dimension");
1441
+ int seqlen_knew = k.size(1);
1442
+ CHECK_SHAPE(k, batch_size, seqlen_knew, num_heads_k, head_size_og);
1443
+ CHECK_SHAPE(v, batch_size, seqlen_knew, num_heads_k, head_size_og);
1444
+ if (head_size_og % 8 != 0) {
1445
+ k_padded = torch::nn::functional::pad(k, torch::nn::functional::PadFuncOptions({0, 8 - head_size_og % 8}));
1446
+ v_padded = torch::nn::functional::pad(v, torch::nn::functional::PadFuncOptions({0, 8 - head_size_og % 8}));
1447
+ } else {
1448
+ k_padded = k;
1449
+ v_padded = v;
1450
+ }
1451
+ params.seqlen_knew = seqlen_knew;
1452
+ params.knew_ptr = k_padded.data_ptr();
1453
+ params.vnew_ptr = v_padded.data_ptr();
1454
+ // All stride are in elements, not bytes.
1455
+ params.knew_batch_stride = k_padded.stride(0);
1456
+ params.vnew_batch_stride = v_padded.stride(0);
1457
+ params.knew_row_stride = k_padded.stride(-3);
1458
+ params.vnew_row_stride = v_padded.stride(-3);
1459
+ params.knew_head_stride = k_padded.stride(-2);
1460
+ params.vnew_head_stride = v_padded.stride(-2);
1461
+ }
1462
+
1463
+ if (seqlens_k_.has_value()) {
1464
+ auto seqlens_k = seqlens_k_.value();
1465
+ TORCH_CHECK(seqlens_k.dtype() == torch::kInt32, "seqlens_k must have dtype int32");
1466
+ CHECK_DEVICE(seqlens_k);
1467
+ CHECK_CONTIGUOUS(seqlens_k);
1468
+ CHECK_SHAPE(seqlens_k, batch_size);
1469
+ params.cu_seqlens_k = static_cast<int *>(seqlens_k.data_ptr());
1470
+ }
1471
+ params.is_seqlens_k_cumulative = !(seqlens_k_.has_value());
1472
+
1473
+ if (rotary_cos_.has_value()) {
1474
+ TORCH_CHECK(k_.has_value(), "If rotary cos/sin are provided, new key / value to be appended to KV cache must also be provided");
1475
+ auto rotary_cos = rotary_cos_.value();
1476
+ CHECK_DEVICE(rotary_cos);
1477
+ params.rotary_dim = rotary_cos.size(1) * 2;
1478
+ TORCH_CHECK(params.rotary_dim <= head_size, "rotary_dim must be <= headdim");
1479
+ TORCH_CHECK(params.rotary_dim % 16 == 0, "Only rotary dimensions divisible by 16 are currently supported");
1480
+ const int seqlen_ro = rotary_cos.size(0);
1481
+ TORCH_CHECK(seqlen_ro >= seqlen_k, "cos/sin seqlen must be at least the seqlen of KV cache");
1482
+ CHECK_SHAPE(rotary_cos, seqlen_ro, params.rotary_dim / 2);
1483
+ CHECK_CONTIGUOUS(rotary_cos);
1484
+ TORCH_CHECK(rotary_cos.scalar_type() == q_dtype, "rotary_cos must have the same dtype as query");
1485
+
1486
+ TORCH_CHECK(rotary_sin_.has_value(), "If rotary cos is provided, rotary sin must also be provided");
1487
+ auto rotary_sin = rotary_sin_.value();
1488
+ CHECK_DEVICE(rotary_sin);
1489
+ CHECK_SHAPE(rotary_sin, seqlen_ro, params.rotary_dim / 2);
1490
+ CHECK_CONTIGUOUS(rotary_sin);
1491
+ TORCH_CHECK(rotary_sin.scalar_type() == q_dtype, "rotary_cos must have the same dtype as query");
1492
+ params.rotary_cos_ptr = rotary_cos.data_ptr();
1493
+ params.rotary_sin_ptr = rotary_sin.data_ptr();
1494
+ params.is_rotary_interleaved = is_rotary_interleaved;
1495
+ } else {
1496
+ params.rotary_dim = 0;
1497
+ }
1498
+
1499
+ if (cache_batch_idx_.has_value()) {
1500
+ auto cache_batch_idx = cache_batch_idx_.value();
1501
+ CHECK_DEVICE(cache_batch_idx);
1502
+ CHECK_CONTIGUOUS(cache_batch_idx);
1503
+ TORCH_CHECK(cache_batch_idx.scalar_type() == torch::kInt32, "cache_batch_idx must have dtype int32");
1504
+ params.cache_batch_idx = reinterpret_cast<int *>(cache_batch_idx.data_ptr());
1505
+ }
1506
+
1507
+ set_params_splitkv(params, batch_size, num_heads,
1508
+ head_size, seqlen_k, seqlen_q,
1509
+ head_size_rounded, /*dropout*/0.f, num_splits, dprops, opts);
1510
+
1511
+ if (paged_KV) {
1512
+ params.block_table = block_table.data_ptr<int>();
1513
+ params.block_table_batch_stride = block_table.stride(0);
1514
+ }
1515
+ params.page_block_size = page_block_size;
1516
+
1517
+
1518
+ set_params_alibi(params, alibi_slopes_, batch_size, num_heads);
1519
+
1520
+ auto stream = at::cuda::getCurrentCUDAStream().stream();
1521
+ // Only split kernel supports appending to KV cache, or indexing to the cache with cache_batch_idx,
1522
+ // or paged KV cache
1523
+ run_mha_fwd(params, stream, /*force_split_kernel=*/k_.has_value() || cache_batch_idx_.has_value() || paged_KV);
1524
+
1525
+ if (head_size_og % 8 != 0) {
1526
+ out = out.index({"...", torch::indexing::Slice(torch::indexing::None, head_size_og)});
1527
+ if (out_.has_value()) { out_.value().copy_(out); }
1528
+ if (k_.has_value()) {
1529
+ // It's expensive to copy the KV cache here for the case where head size not divisible by 8,
1530
+ // but we don't expect to get this case in practice. This is just so that the code works for that case.
1531
+ kcache.copy_(kcache_padded.index({"...", torch::indexing::Slice(torch::indexing::None, head_size_og)}));
1532
+ vcache.copy_(vcache_padded.index({"...", torch::indexing::Slice(torch::indexing::None, head_size_og)}));
1533
+ }
1534
+ }
1535
+
1536
+ if (seqlenq_ngroups_swapped) {
1537
+ out = out.transpose(1, 2).reshape({batch_size, 1, num_heads_k * seqlen_q, head_size_og});
1538
+ softmax_lse = softmax_lse.reshape({batch_size, num_heads_k * seqlen_q, 1});
1539
+ }
1540
+ return {out, softmax_lse};
1541
+ }
1542
+
1543
+ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
1544
+ m.doc() = "FlashAttention";
1545
+ m.def("fwd", &mha_fwd, "Forward pass");
1546
+ m.def("varlen_fwd", &mha_varlen_fwd, "Forward pass (variable length)");
1547
+ m.def("bwd", &mha_bwd, "Backward pass");
1548
+ m.def("varlen_bwd", &mha_varlen_bwd, "Backward pass (variable length)");
1549
+ m.def("fwd_kvcache", &mha_fwd_kvcache, "Forward pass, with KV-cache");
1550
+ }
flash-attention/csrc/flash_attn/src/flash_fwd_split_hdim128_bf16_causal_sm80.cu ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2023, Tri Dao.
2
+ // Splitting the different head dimensions to different files to speed up compilation.
3
+ // This file is auto-generated. See "generate_kernels.py"
4
+
5
+ #include "flash_fwd_launch_template.h"
6
+
7
+ template void run_mha_fwd_splitkv_dispatch<cutlass::bfloat16_t, 128, true>(Flash_fwd_params &params, cudaStream_t stream);
flash-attention/csrc/flash_attn/src/flash_fwd_split_hdim128_bf16_sm80.cu ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2023, Tri Dao.
2
+ // Splitting the different head dimensions to different files to speed up compilation.
3
+ // This file is auto-generated. See "generate_kernels.py"
4
+
5
+ #include "flash_fwd_launch_template.h"
6
+
7
+ template void run_mha_fwd_splitkv_dispatch<cutlass::bfloat16_t, 128, false>(Flash_fwd_params &params, cudaStream_t stream);
flash-attention/csrc/flash_attn/src/flash_fwd_split_hdim128_fp16_causal_sm80.cu ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2023, Tri Dao.
2
+ // Splitting the different head dimensions to different files to speed up compilation.
3
+ // This file is auto-generated. See "generate_kernels.py"
4
+
5
+ #include "flash_fwd_launch_template.h"
6
+
7
+ template void run_mha_fwd_splitkv_dispatch<cutlass::half_t, 128, true>(Flash_fwd_params &params, cudaStream_t stream);
flash-attention/csrc/flash_attn/src/flash_fwd_split_hdim128_fp16_sm80.cu ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2023, Tri Dao.
2
+ // Splitting the different head dimensions to different files to speed up compilation.
3
+ // This file is auto-generated. See "generate_kernels.py"
4
+
5
+ #include "flash_fwd_launch_template.h"
6
+
7
+ template void run_mha_fwd_splitkv_dispatch<cutlass::half_t, 128, false>(Flash_fwd_params &params, cudaStream_t stream);
flash-attention/csrc/flash_attn/src/flash_fwd_split_hdim160_bf16_causal_sm80.cu ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2023, Tri Dao.
2
+ // Splitting the different head dimensions to different files to speed up compilation.
3
+ // This file is auto-generated. See "generate_kernels.py"
4
+
5
+ #include "flash_fwd_launch_template.h"
6
+
7
+ template void run_mha_fwd_splitkv_dispatch<cutlass::bfloat16_t, 160, true>(Flash_fwd_params &params, cudaStream_t stream);
flash-attention/csrc/flash_attn/src/flash_fwd_split_hdim160_bf16_sm80.cu ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2023, Tri Dao.
2
+ // Splitting the different head dimensions to different files to speed up compilation.
3
+ // This file is auto-generated. See "generate_kernels.py"
4
+
5
+ #include "flash_fwd_launch_template.h"
6
+
7
+ template void run_mha_fwd_splitkv_dispatch<cutlass::bfloat16_t, 160, false>(Flash_fwd_params &params, cudaStream_t stream);
flash-attention/csrc/flash_attn/src/flash_fwd_split_hdim160_fp16_causal_sm80.cu ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2023, Tri Dao.
2
+ // Splitting the different head dimensions to different files to speed up compilation.
3
+ // This file is auto-generated. See "generate_kernels.py"
4
+
5
+ #include "flash_fwd_launch_template.h"
6
+
7
+ template void run_mha_fwd_splitkv_dispatch<cutlass::half_t, 160, true>(Flash_fwd_params &params, cudaStream_t stream);
flash-attention/csrc/flash_attn/src/flash_fwd_split_hdim160_fp16_sm80.cu ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2023, Tri Dao.
2
+ // Splitting the different head dimensions to different files to speed up compilation.
3
+ // This file is auto-generated. See "generate_kernels.py"
4
+
5
+ #include "flash_fwd_launch_template.h"
6
+
7
+ template void run_mha_fwd_splitkv_dispatch<cutlass::half_t, 160, false>(Flash_fwd_params &params, cudaStream_t stream);
flash-attention/csrc/flash_attn/src/flash_fwd_split_hdim192_bf16_causal_sm80.cu ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2023, Tri Dao.
2
+ // Splitting the different head dimensions to different files to speed up compilation.
3
+ // This file is auto-generated. See "generate_kernels.py"
4
+
5
+ #include "flash_fwd_launch_template.h"
6
+
7
+ template void run_mha_fwd_splitkv_dispatch<cutlass::bfloat16_t, 192, true>(Flash_fwd_params &params, cudaStream_t stream);
flash-attention/csrc/flash_attn/src/flash_fwd_split_hdim192_bf16_sm80.cu ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2023, Tri Dao.
2
+ // Splitting the different head dimensions to different files to speed up compilation.
3
+ // This file is auto-generated. See "generate_kernels.py"
4
+
5
+ #include "flash_fwd_launch_template.h"
6
+
7
+ template void run_mha_fwd_splitkv_dispatch<cutlass::bfloat16_t, 192, false>(Flash_fwd_params &params, cudaStream_t stream);
flash-attention/csrc/flash_attn/src/flash_fwd_split_hdim192_fp16_causal_sm80.cu ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2023, Tri Dao.
2
+ // Splitting the different head dimensions to different files to speed up compilation.
3
+ // This file is auto-generated. See "generate_kernels.py"
4
+
5
+ #include "flash_fwd_launch_template.h"
6
+
7
+ template void run_mha_fwd_splitkv_dispatch<cutlass::half_t, 192, true>(Flash_fwd_params &params, cudaStream_t stream);
flash-attention/csrc/flash_attn/src/flash_fwd_split_hdim192_fp16_sm80.cu ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2023, Tri Dao.
2
+ // Splitting the different head dimensions to different files to speed up compilation.
3
+ // This file is auto-generated. See "generate_kernels.py"
4
+
5
+ #include "flash_fwd_launch_template.h"
6
+
7
+ template void run_mha_fwd_splitkv_dispatch<cutlass::half_t, 192, false>(Flash_fwd_params &params, cudaStream_t stream);
flash-attention/csrc/flash_attn/src/flash_fwd_split_hdim224_bf16_causal_sm80.cu ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2023, Tri Dao.
2
+ // Splitting the different head dimensions to different files to speed up compilation.
3
+ // This file is auto-generated. See "generate_kernels.py"
4
+
5
+ #include "flash_fwd_launch_template.h"
6
+
7
+ template void run_mha_fwd_splitkv_dispatch<cutlass::bfloat16_t, 224, true>(Flash_fwd_params &params, cudaStream_t stream);
flash-attention/csrc/flash_attn/src/flash_fwd_split_hdim224_bf16_sm80.cu ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2023, Tri Dao.
2
+ // Splitting the different head dimensions to different files to speed up compilation.
3
+ // This file is auto-generated. See "generate_kernels.py"
4
+
5
+ #include "flash_fwd_launch_template.h"
6
+
7
+ template void run_mha_fwd_splitkv_dispatch<cutlass::bfloat16_t, 224, false>(Flash_fwd_params &params, cudaStream_t stream);
flash-attention/csrc/flash_attn/src/flash_fwd_split_hdim224_fp16_causal_sm80.cu ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2023, Tri Dao.
2
+ // Splitting the different head dimensions to different files to speed up compilation.
3
+ // This file is auto-generated. See "generate_kernels.py"
4
+
5
+ #include "flash_fwd_launch_template.h"
6
+
7
+ template void run_mha_fwd_splitkv_dispatch<cutlass::half_t, 224, true>(Flash_fwd_params &params, cudaStream_t stream);
flash-attention/csrc/flash_attn/src/flash_fwd_split_hdim224_fp16_sm80.cu ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2023, Tri Dao.
2
+ // Splitting the different head dimensions to different files to speed up compilation.
3
+ // This file is auto-generated. See "generate_kernels.py"
4
+
5
+ #include "flash_fwd_launch_template.h"
6
+
7
+ template void run_mha_fwd_splitkv_dispatch<cutlass::half_t, 224, false>(Flash_fwd_params &params, cudaStream_t stream);
flash-attention/csrc/flash_attn/src/flash_fwd_split_hdim256_bf16_causal_sm80.cu ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2023, Tri Dao.
2
+ // Splitting the different head dimensions to different files to speed up compilation.
3
+ // This file is auto-generated. See "generate_kernels.py"
4
+
5
+ #include "flash_fwd_launch_template.h"
6
+
7
+ template void run_mha_fwd_splitkv_dispatch<cutlass::bfloat16_t, 256, true>(Flash_fwd_params &params, cudaStream_t stream);
flash-attention/csrc/flash_attn/src/flash_fwd_split_hdim256_bf16_sm80.cu ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2023, Tri Dao.
2
+ // Splitting the different head dimensions to different files to speed up compilation.
3
+ // This file is auto-generated. See "generate_kernels.py"
4
+
5
+ #include "flash_fwd_launch_template.h"
6
+
7
+ template void run_mha_fwd_splitkv_dispatch<cutlass::bfloat16_t, 256, false>(Flash_fwd_params &params, cudaStream_t stream);
flash-attention/csrc/flash_attn/src/flash_fwd_split_hdim256_fp16_causal_sm80.cu ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2023, Tri Dao.
2
+ // Splitting the different head dimensions to different files to speed up compilation.
3
+ // This file is auto-generated. See "generate_kernels.py"
4
+
5
+ #include "flash_fwd_launch_template.h"
6
+
7
+ template void run_mha_fwd_splitkv_dispatch<cutlass::half_t, 256, true>(Flash_fwd_params &params, cudaStream_t stream);
flash-attention/csrc/flash_attn/src/flash_fwd_split_hdim256_fp16_sm80.cu ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2023, Tri Dao.
2
+ // Splitting the different head dimensions to different files to speed up compilation.
3
+ // This file is auto-generated. See "generate_kernels.py"
4
+
5
+ #include "flash_fwd_launch_template.h"
6
+
7
+ template void run_mha_fwd_splitkv_dispatch<cutlass::half_t, 256, false>(Flash_fwd_params &params, cudaStream_t stream);
flash-attention/csrc/flash_attn/src/flash_fwd_split_hdim32_bf16_causal_sm80.cu ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2023, Tri Dao.
2
+ // Splitting the different head dimensions to different files to speed up compilation.
3
+ // This file is auto-generated. See "generate_kernels.py"
4
+
5
+ #include "flash_fwd_launch_template.h"
6
+
7
+ template void run_mha_fwd_splitkv_dispatch<cutlass::bfloat16_t, 32, true>(Flash_fwd_params &params, cudaStream_t stream);
flash-attention/csrc/flash_attn/src/flash_fwd_split_hdim32_bf16_sm80.cu ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2023, Tri Dao.
2
+ // Splitting the different head dimensions to different files to speed up compilation.
3
+ // This file is auto-generated. See "generate_kernels.py"
4
+
5
+ #include "flash_fwd_launch_template.h"
6
+
7
+ template void run_mha_fwd_splitkv_dispatch<cutlass::bfloat16_t, 32, false>(Flash_fwd_params &params, cudaStream_t stream);
flash-attention/csrc/flash_attn/src/flash_fwd_split_hdim32_fp16_causal_sm80.cu ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2023, Tri Dao.
2
+ // Splitting the different head dimensions to different files to speed up compilation.
3
+ // This file is auto-generated. See "generate_kernels.py"
4
+
5
+ #include "flash_fwd_launch_template.h"
6
+
7
+ template void run_mha_fwd_splitkv_dispatch<cutlass::half_t, 32, true>(Flash_fwd_params &params, cudaStream_t stream);
flash-attention/csrc/flash_attn/src/flash_fwd_split_hdim32_fp16_sm80.cu ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2023, Tri Dao.
2
+ // Splitting the different head dimensions to different files to speed up compilation.
3
+ // This file is auto-generated. See "generate_kernels.py"
4
+
5
+ #include "flash_fwd_launch_template.h"
6
+
7
+ template void run_mha_fwd_splitkv_dispatch<cutlass::half_t, 32, false>(Flash_fwd_params &params, cudaStream_t stream);
flash-attention/csrc/flash_attn/src/flash_fwd_split_hdim64_bf16_causal_sm80.cu ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2023, Tri Dao.
2
+ // Splitting the different head dimensions to different files to speed up compilation.
3
+ // This file is auto-generated. See "generate_kernels.py"
4
+
5
+ #include "flash_fwd_launch_template.h"
6
+
7
+ template void run_mha_fwd_splitkv_dispatch<cutlass::bfloat16_t, 64, true>(Flash_fwd_params &params, cudaStream_t stream);
flash-attention/csrc/flash_attn/src/flash_fwd_split_hdim64_bf16_sm80.cu ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2023, Tri Dao.
2
+ // Splitting the different head dimensions to different files to speed up compilation.
3
+ // This file is auto-generated. See "generate_kernels.py"
4
+
5
+ #include "flash_fwd_launch_template.h"
6
+
7
+ template void run_mha_fwd_splitkv_dispatch<cutlass::bfloat16_t, 64, false>(Flash_fwd_params &params, cudaStream_t stream);
flash-attention/csrc/flash_attn/src/flash_fwd_split_hdim64_fp16_causal_sm80.cu ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2023, Tri Dao.
2
+ // Splitting the different head dimensions to different files to speed up compilation.
3
+ // This file is auto-generated. See "generate_kernels.py"
4
+
5
+ #include "flash_fwd_launch_template.h"
6
+
7
+ template void run_mha_fwd_splitkv_dispatch<cutlass::half_t, 64, true>(Flash_fwd_params &params, cudaStream_t stream);
flash-attention/csrc/flash_attn/src/flash_fwd_split_hdim64_fp16_sm80.cu ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2023, Tri Dao.
2
+ // Splitting the different head dimensions to different files to speed up compilation.
3
+ // This file is auto-generated. See "generate_kernels.py"
4
+
5
+ #include "flash_fwd_launch_template.h"
6
+
7
+ template void run_mha_fwd_splitkv_dispatch<cutlass::half_t, 64, false>(Flash_fwd_params &params, cudaStream_t stream);
flash-attention/csrc/flash_attn/src/flash_fwd_split_hdim96_bf16_causal_sm80.cu ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2023, Tri Dao.
2
+ // Splitting the different head dimensions to different files to speed up compilation.
3
+ // This file is auto-generated. See "generate_kernels.py"
4
+
5
+ #include "flash_fwd_launch_template.h"
6
+
7
+ template void run_mha_fwd_splitkv_dispatch<cutlass::bfloat16_t, 96, true>(Flash_fwd_params &params, cudaStream_t stream);
flash-attention/csrc/flash_attn/src/flash_fwd_split_hdim96_bf16_sm80.cu ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2023, Tri Dao.
2
+ // Splitting the different head dimensions to different files to speed up compilation.
3
+ // This file is auto-generated. See "generate_kernels.py"
4
+
5
+ #include "flash_fwd_launch_template.h"
6
+
7
+ template void run_mha_fwd_splitkv_dispatch<cutlass::bfloat16_t, 96, false>(Flash_fwd_params &params, cudaStream_t stream);
flash-attention/csrc/flash_attn/src/flash_fwd_split_hdim96_fp16_causal_sm80.cu ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2023, Tri Dao.
2
+ // Splitting the different head dimensions to different files to speed up compilation.
3
+ // This file is auto-generated. See "generate_kernels.py"
4
+
5
+ #include "flash_fwd_launch_template.h"
6
+
7
+ template void run_mha_fwd_splitkv_dispatch<cutlass::half_t, 96, true>(Flash_fwd_params &params, cudaStream_t stream);
flash-attention/csrc/flash_attn/src/flash_fwd_split_hdim96_fp16_sm80.cu ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2023, Tri Dao.
2
+ // Splitting the different head dimensions to different files to speed up compilation.
3
+ // This file is auto-generated. See "generate_kernels.py"
4
+
5
+ #include "flash_fwd_launch_template.h"
6
+
7
+ template void run_mha_fwd_splitkv_dispatch<cutlass::half_t, 96, false>(Flash_fwd_params &params, cudaStream_t stream);
flash-attention/csrc/flash_attn/src/generate_kernels.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copied from Driss Guessous's PR in PyTorch: https://github.com/pytorch/pytorch/pull/105602
2
+
3
+ # This file is run to generate the kernel instantiations for the flash_attn kernels
4
+ # They are written to several files in order to speed up compilation
5
+
6
+ import argparse
7
+ import itertools
8
+ from dataclasses import dataclass
9
+ from pathlib import Path
10
+ from typing import List, Optional
11
+
12
+ DTYPE_MAP = {
13
+ "fp16": "cutlass::half_t",
14
+ "bf16": "cutlass::bfloat16_t",
15
+ }
16
+
17
+ SM = [80] # Sm80 kernels support up to
18
+ HEAD_DIMENSIONS = [32, 64, 96, 128, 160, 192, 224, 256]
19
+ IS_CAUSAL = ["false", "true"]
20
+ KERNEL_IMPL_TEMPLATE_FWD = """#include "flash_fwd_launch_template.h"
21
+
22
+ template<>
23
+ void run_mha_fwd_<{DTYPE}, {HEAD_DIM}, {IS_CAUSAL}>(Flash_fwd_params &params, cudaStream_t stream) {{
24
+ run_mha_fwd_hdim{HEAD_DIM}<{DTYPE}, {IS_CAUSAL}>(params, stream);
25
+ }}
26
+ """
27
+
28
+ KERNEL_IMPL_TEMPLATE_FWD_SPLIT = """#include "flash_fwd_launch_template.h"
29
+
30
+ template void run_mha_fwd_splitkv_dispatch<{DTYPE}, {HEAD_DIM}, {IS_CAUSAL}>(Flash_fwd_params &params, cudaStream_t stream);
31
+ """
32
+
33
+ KERNEL_IMPL_TEMPLATE_BWD = """#include "flash_bwd_launch_template.h"
34
+
35
+ template<>
36
+ void run_mha_bwd_<{DTYPE}, {HEAD_DIM}>(Flash_bwd_params &params, cudaStream_t stream) {{
37
+ run_mha_bwd_hdim{HEAD_DIM}<{DTYPE}>(params, stream);
38
+ }}
39
+ """
40
+
41
+
42
+ @dataclass
43
+ class Kernel:
44
+ sm: int
45
+ dtype: str
46
+ head_dim: int
47
+ is_causal: bool
48
+ direction: str
49
+
50
+ @property
51
+ def template(self) -> str:
52
+ if self.direction == "fwd":
53
+ return KERNEL_IMPL_TEMPLATE_FWD.format(
54
+ DTYPE=DTYPE_MAP[self.dtype], HEAD_DIM=self.head_dim, IS_CAUSAL=self.is_causal
55
+ )
56
+ elif self.direction == "bwd":
57
+ return KERNEL_IMPL_TEMPLATE_BWD.format(
58
+ DTYPE=DTYPE_MAP[self.dtype], HEAD_DIM=self.head_dim
59
+ )
60
+ else:
61
+ return KERNEL_IMPL_TEMPLATE_FWD_SPLIT.format(
62
+ DTYPE=DTYPE_MAP[self.dtype], HEAD_DIM=self.head_dim, IS_CAUSAL=self.is_causal
63
+ )
64
+
65
+ @property
66
+ def filename(self) -> str:
67
+ return f"flash_{self.direction}_hdim{self.head_dim}_{self.dtype}_{'causal_' if self.is_causal == 'true' else ''}sm{self.sm}.cu"
68
+
69
+
70
+ def get_all_kernels() -> List[Kernel]:
71
+ for direction in ["fwd", "fwd_split"]:
72
+ for dtype, head_dim, is_causal, sm in itertools.product(DTYPE_MAP.keys(), HEAD_DIMENSIONS, IS_CAUSAL, SM):
73
+ yield Kernel(sm=sm, dtype=dtype, head_dim=head_dim, is_causal=is_causal, direction=direction)
74
+ for direction in ["bwd"]:
75
+ for dtype, head_dim, sm in itertools.product(DTYPE_MAP.keys(), HEAD_DIMENSIONS, SM):
76
+ yield Kernel(sm=sm, dtype=dtype, head_dim=head_dim, is_causal="false", direction=direction)
77
+
78
+
79
+ def write_kernel(kernel: Kernel, autogen_dir: Path) -> None:
80
+ prelude = """// Copyright (c) 2023, Tri Dao.
81
+ // Splitting the different head dimensions to different files to speed up compilation.
82
+ // This file is auto-generated. See "generate_kernels.py"\n
83
+ """
84
+ (autogen_dir / kernel.filename).write_text(prelude + kernel.template)
85
+
86
+
87
+ def main(output_dir: Optional[str]) -> None:
88
+ if output_dir is None:
89
+ output_dir = Path(__file__).parent
90
+ else:
91
+ output_dir = Path(output_dir)
92
+
93
+ for kernel in get_all_kernels():
94
+ write_kernel(kernel, output_dir)
95
+
96
+
97
+ if __name__ == "__main__":
98
+ parser = argparse.ArgumentParser(
99
+ prog="generate_kernels",
100
+ description="Generate the flash_attention kernels template instantiations",
101
+ )
102
+ # Set an optional output directory
103
+ parser.add_argument(
104
+ "-o",
105
+ "--output_dir",
106
+ required=False,
107
+ help="Where to generate the kernels "
108
+ " will default to the current directory ",
109
+ )
110
+ args = parser.parse_args()
111
+ main(args.output_dir)
flash-attention/csrc/flash_attn/src/kernel_traits.h ADDED
@@ -0,0 +1,344 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * Copyright (c) 2024, Tri Dao.
3
+ ******************************************************************************/
4
+
5
+ #pragma once
6
+
7
+ #include "cute/tensor.hpp"
8
+
9
+ #include "cutlass/cutlass.h"
10
+ #include "cutlass/layout/layout.h"
11
+ #include <cutlass/numeric_types.h>
12
+
13
+ using namespace cute;
14
+
15
+ template<int kHeadDim_, int kBlockM_, int kBlockN_, int kNWarps_, typename elem_type=cutlass::half_t>
16
+ struct Flash_kernel_traits {
17
+
18
+ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800
19
+ using Element = elem_type;
20
+ static constexpr bool Has_cp_async = true;
21
+ #else
22
+ using Element = cutlass::half_t;
23
+ static constexpr bool Has_cp_async = false;
24
+ #endif
25
+
26
+ using ElementAccum = float;
27
+ using index_t = int64_t;
28
+
29
+ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800
30
+ using MMA_Atom_Arch = std::conditional_t<
31
+ std::is_same_v<elem_type, cutlass::half_t>,
32
+ MMA_Atom<SM80_16x8x16_F32F16F16F32_TN>,
33
+ MMA_Atom<SM80_16x8x16_F32BF16BF16F32_TN>
34
+ >;
35
+ #else
36
+ using MMA_Atom_Arch = MMA_Atom<SM75_16x8x8_F32F16F16F32_TN>;
37
+ #endif
38
+
39
+ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 750
40
+ using SmemCopyAtom = Copy_Atom<SM75_U32x4_LDSM_N, elem_type>;
41
+ using SmemCopyAtomTransposed = Copy_Atom<SM75_U16x8_LDSM_T, elem_type>;
42
+ #else
43
+ using SmemCopyAtom = Copy_Atom<DefaultCopy, elem_type>;
44
+ using SmemCopyAtomTransposed = Copy_Atom<DefaultCopy, elem_type>;
45
+ #endif
46
+ };
47
+
48
+ // If Share_Q_K_smem is true, that forces Is_Q_in_regs to be true
49
+ template<int kHeadDim_, int kBlockM_, int kBlockN_, int kNWarps_, bool Is_Q_in_regs_=false, bool Share_Q_K_smem_=false, typename elem_type=cutlass::half_t,
50
+ typename Base=Flash_kernel_traits<kHeadDim_, kBlockM_, kBlockN_, kNWarps_, elem_type> >
51
+ struct Flash_fwd_kernel_traits : public Base {
52
+ using Element = typename Base::Element;
53
+ using ElementAccum = typename Base::ElementAccum;
54
+ using index_t = typename Base::index_t;
55
+ static constexpr bool Has_cp_async = Base::Has_cp_async;
56
+ using SmemCopyAtom = typename Base::SmemCopyAtom;
57
+ using SmemCopyAtomTransposed = typename Base::SmemCopyAtomTransposed;
58
+
59
+ static constexpr bool Share_Q_K_smem = Share_Q_K_smem_;
60
+ static constexpr bool Is_Q_in_regs = Is_Q_in_regs_ || Share_Q_K_smem;
61
+
62
+ // The number of threads.
63
+ static constexpr int kNWarps = kNWarps_;
64
+ static constexpr int kNThreads = kNWarps * 32;
65
+
66
+ static constexpr int kBlockM = kBlockM_;
67
+ static constexpr int kBlockN = kBlockN_;
68
+ static constexpr int kHeadDim = kHeadDim_;
69
+ static_assert(kHeadDim % 32 == 0);
70
+ static constexpr int kBlockKSmem = kHeadDim % 64 == 0 ? 64 : 32;
71
+ static constexpr int kBlockKGmem = kHeadDim % 128 == 0 ? 128 : (kHeadDim % 64 == 0 ? 64 : 32);
72
+ static constexpr int kSwizzle = kBlockKSmem == 32 ? 2 : 3;
73
+
74
+ using TiledMma = TiledMMA<
75
+ typename Base::MMA_Atom_Arch,
76
+ Layout<Shape<Int<kNWarps>,_1,_1>>, // 4x1x1 or 8x1x1 thread group
77
+ Tile<Int<16 * kNWarps>, _16, _16>>;
78
+
79
+ using SmemLayoutAtomQ = decltype(
80
+ composition(Swizzle<kSwizzle, 3, 3>{},
81
+ // This has to be kBlockKSmem, using kHeadDim gives wrong results for d=128
82
+ Layout<Shape<_8, Int<kBlockKSmem>>,
83
+ Stride<Int<kBlockKSmem>, _1>>{}));
84
+ using SmemLayoutQ = decltype(tile_to_shape(
85
+ SmemLayoutAtomQ{},
86
+ Shape<Int<kBlockM>, Int<kHeadDim>>{}));
87
+
88
+ using SmemLayoutKV = decltype(tile_to_shape(
89
+ SmemLayoutAtomQ{},
90
+ Shape<Int<kBlockN>, Int<kHeadDim>>{}));
91
+
92
+ // https://github.com/ColfaxResearch/cutlass-kernels/blob/a222587e6d59b93ba704853d3946fb686d8b8892/src/fmha/fmha_forward.cu#L434
93
+ using SmemLayoutVtransposed = decltype(
94
+ composition(SmemLayoutKV{}, make_layout(Shape<Int<kHeadDim>, Int<kBlockN>>{}, GenRowMajor{})));
95
+ using SmemLayoutVtransposedNoSwizzle = decltype(get_nonswizzle_portion(SmemLayoutVtransposed{}));
96
+
97
+ using SmemLayoutAtomO = decltype(
98
+ composition(Swizzle<kSwizzle, 3, 3>{},
99
+ Layout<Shape<Int<8>, Int<kBlockKSmem>>,
100
+ Stride<Int<kBlockKSmem>, _1>>{}));
101
+ using SmemLayoutO = decltype(tile_to_shape(
102
+ SmemLayoutAtomO{},
103
+ Shape<Int<kBlockM>, Int<kHeadDim>>{}));
104
+ using SmemCopyAtomO = Copy_Atom<DefaultCopy, Element>;
105
+ using SmemCopyAtomOaccum = Copy_Atom<DefaultCopy, ElementAccum>;
106
+
107
+ static constexpr int kSmemQSize = size(SmemLayoutQ{}) * sizeof(Element);
108
+ static constexpr int kSmemKVSize = size(SmemLayoutKV{}) * 2 * sizeof(Element);
109
+ static constexpr int kSmemSize = Share_Q_K_smem ? std::max(kSmemQSize, kSmemKVSize) : kSmemQSize + kSmemKVSize;
110
+
111
+ static constexpr int kGmemElemsPerLoad = sizeof(cute::uint128_t) / sizeof(Element);
112
+ static_assert(kHeadDim % kGmemElemsPerLoad == 0, "kHeadDim must be a multiple of kGmemElemsPerLoad");
113
+ // Using kBlockKSmem here is 6-10% faster than kBlockKGmem for d=128 because of bank conflicts.
114
+ // For example, for d=128, smem is split into 2 "pages", each page takes care of columns
115
+ // 0-63 and 64-127. If we have 16 threads per row for gmem read, when we write to smem,
116
+ // thread 0 - 7 will write to the first page and thread 8 - 15 will write to the second page,
117
+ // to the same banks.
118
+ static constexpr int kGmemThreadsPerRow = kBlockKSmem / kGmemElemsPerLoad;
119
+ static_assert(kNThreads % kGmemThreadsPerRow == 0, "kNThreads must be a multiple of kGmemThreadsPerRow");
120
+ using GmemLayoutAtom = Layout<Shape <Int<kNThreads / kGmemThreadsPerRow>, Int<kGmemThreadsPerRow>>,
121
+ Stride<Int<kGmemThreadsPerRow>, _1>>;
122
+
123
+ // We use CACHEGLOBAL instead of CACHEALWAYS for both Q and K/V, since we won't be reading
124
+ // from the same address by the same threadblock. This is slightly faster.
125
+ using Gmem_copy_struct = std::conditional_t<
126
+ Has_cp_async,
127
+ SM80_CP_ASYNC_CACHEGLOBAL<cute::uint128_t>,
128
+ DefaultCopy
129
+ >;
130
+ using GmemTiledCopyQKV = decltype(
131
+ make_tiled_copy(Copy_Atom<Gmem_copy_struct, Element>{},
132
+ GmemLayoutAtom{},
133
+ Layout<Shape<_1, _8>>{})); // Val layout, 8 vals per read
134
+ using GmemTiledCopyO = decltype(
135
+ make_tiled_copy(Copy_Atom<DefaultCopy, Element>{},
136
+ GmemLayoutAtom{},
137
+ Layout<Shape<_1, _8>>{})); // Val layout, 8 vals per store
138
+
139
+ using GmemLayoutAtomOaccum = std::conditional_t<
140
+ kBlockKSmem == 32,
141
+ Layout<Shape <_16, _8>, // Thread layout, 8 threads per row
142
+ Stride< _8, _1>>,
143
+ Layout<Shape <_8, _16>, // Thread layout, 16 threads per row
144
+ Stride< _16, _1>>
145
+ >;
146
+ using GmemTiledCopyOaccum = decltype(
147
+ make_tiled_copy(Copy_Atom<DefaultCopy, ElementAccum>{},
148
+ GmemLayoutAtomOaccum{},
149
+ Layout<Shape < _1, _4>>{})); // Val layout, 4 vals per store
150
+ using GmemLayoutAtomRotcossin = GmemLayoutAtom;
151
+ using GmemTiledCopyRotcossin = decltype(
152
+ make_tiled_copy(Copy_Atom<UniversalCopy<uint64_t>, Element>{},
153
+ GmemLayoutAtomRotcossin{},
154
+ Layout<Shape < _1, _4>>{})); // Val layout, 4 vals per load
155
+ using GmemTiledCopyRotcossinCont = decltype(
156
+ make_tiled_copy(Copy_Atom<DefaultCopy, Element>{},
157
+ GmemLayoutAtomRotcossin{},
158
+ Layout<Shape < _1, _8>>{})); // Val layout, 8 vals per load
159
+ };
160
+
161
+ // Is_V_in_regs is an option to reduce smem usage, but will increase register pressue.
162
+ // No_double_buffer is another option to reduce smem usage, but will slow things down.
163
+ template<int kHeadDim_, int kBlockM_, int kBlockN_, int kNWarps_,
164
+ int AtomLayoutMSdP_=1, int AtomLayoutNdKV=2, int AtomLayoutMdQ=2,
165
+ bool Is_V_in_regs_=false, bool No_double_buffer_=false, typename elem_type=cutlass::half_t,
166
+ typename Base=Flash_kernel_traits<kHeadDim_, kBlockM_, kBlockN_, kNWarps_, elem_type> >
167
+ struct Flash_bwd_kernel_traits : public Base {
168
+ using Element = typename Base::Element;
169
+ using ElementAccum = typename Base::ElementAccum;
170
+ using index_t = typename Base::index_t;
171
+ static constexpr bool Has_cp_async = Base::Has_cp_async;
172
+ using SmemCopyAtom = typename Base::SmemCopyAtom;
173
+ using SmemCopyAtomTransposed = typename Base::SmemCopyAtomTransposed;
174
+
175
+ static constexpr bool Is_V_in_regs = Is_V_in_regs_;
176
+ static constexpr bool No_double_buffer = No_double_buffer_;
177
+
178
+ // The number of threads.
179
+ static constexpr int kNWarps = kNWarps_;
180
+ static constexpr int kNThreads = kNWarps * 32;
181
+
182
+ static constexpr int kBlockM = kBlockM_;
183
+ static constexpr int kBlockN = kBlockN_;
184
+ static constexpr int kHeadDim = kHeadDim_;
185
+ static_assert(kHeadDim % 32 == 0);
186
+ static constexpr int kBlockKSmem = kHeadDim % 64 == 0 ? 64 : 32;
187
+ static constexpr int kBlockKGmem = kHeadDim % 128 == 0 ? 128 : (kHeadDim % 64 == 0 ? 64 : 32);
188
+ static constexpr int kSwizzle = kBlockKSmem == 32 ? 2 : 3;
189
+
190
+ static constexpr int AtomLayoutMSdP = AtomLayoutMSdP_;
191
+ static_assert(kNWarps % AtomLayoutMSdP == 0);
192
+ static_assert(kNWarps % AtomLayoutNdKV == 0);
193
+ static_assert(kNWarps % AtomLayoutMdQ == 0);
194
+
195
+ using TiledMmaSdP = TiledMMA<
196
+ typename Base::MMA_Atom_Arch,
197
+ Layout<Shape<Int<AtomLayoutMSdP>, Int<kNWarps / AtomLayoutMSdP>, _1>>,
198
+ Tile<Int<16 * AtomLayoutMSdP>, Int<16 * kNWarps / AtomLayoutMSdP>, _16>>;
199
+
200
+ using TiledMmadKV = TiledMMA<
201
+ typename Base::MMA_Atom_Arch,
202
+ Layout<Shape<Int<AtomLayoutNdKV>, Int<kNWarps / AtomLayoutNdKV>, _1>>,
203
+ Tile<Int<16 * AtomLayoutNdKV>, Int<16 * kNWarps / AtomLayoutNdKV>, _16>>;
204
+
205
+ using TiledMmadQ = TiledMMA<
206
+ typename Base::MMA_Atom_Arch,
207
+ Layout<Shape<Int<AtomLayoutMdQ>, Int<kNWarps / AtomLayoutMdQ>, _1>>, // 2x4x1 or 4x2x1 thread group
208
+ Tile<Int<16 * AtomLayoutMdQ>, Int<16 * kNWarps / AtomLayoutMdQ>, _16>>;
209
+
210
+ using SmemLayoutAtomQdO = decltype(
211
+ composition(Swizzle<kSwizzle, 3, 3>{},
212
+ Layout<Shape<_8, Int<kBlockKSmem>>,
213
+ Stride<Int<kBlockKSmem>, _1>>{}));
214
+ using SmemLayoutQdO = decltype(tile_to_shape(
215
+ SmemLayoutAtomQdO{},
216
+ make_shape(Int<kBlockM>{}, Int<kHeadDim>{})));
217
+
218
+ using SmemLayoutAtomKV = decltype(
219
+ composition(Swizzle<kSwizzle, 3, 3>{},
220
+ Layout<Shape<Int<kBlockM / kNWarps>, Int<kBlockKSmem>>,
221
+ Stride<Int<kBlockKSmem>, _1>>{}));
222
+ using SmemLayoutKV = decltype(tile_to_shape(
223
+ // SmemLayoutAtomQdO{},
224
+ SmemLayoutAtomKV{},
225
+ make_shape(Int<kBlockN>{}, Int<kHeadDim>{})));
226
+
227
+ using SmemLayoutKtransposed = decltype(
228
+ composition(SmemLayoutKV{}, make_layout(Shape<Int<kHeadDim>, Int<kBlockN>>{}, GenRowMajor{})));
229
+ using SmemLayoutKtransposedNoSwizzle = decltype(get_nonswizzle_portion(SmemLayoutKtransposed{}));
230
+
231
+ // TODO: generalize to other values of kBlockN
232
+ // TODO: what should be the Swizzle here? 3 is faster than 1, and 1 is faster than 2
233
+ // static constexpr int kPBlockN = kBlockN;
234
+ // Temporarily disabling this for hdim 256 on sm86 and sm89
235
+ // static_assert(kBlockN >= 64);
236
+ static_assert(kBlockN >= 32);
237
+ // TD [2023-03-19]: Idk why kPBlockN = 16 and kSwizzlePdS=3 is the fastest.
238
+ static constexpr int kPBlockN = kBlockN >= 64 ? 64 : 32;
239
+ static_assert(kPBlockN == 16 || kPBlockN == 32 || kPBlockN == 64);
240
+ // static constexpr int kSwizzlePdS = kPBlockN == 16 ? 1 : (kPBlockN == 32 ? 2 : 3);
241
+ static constexpr int kSwizzlePdS = 3;
242
+ using SmemLayoutAtomPdS = decltype(
243
+ composition(Swizzle<kSwizzlePdS, 3, 3>{},
244
+ Layout<Shape<Int<kBlockM>, Int<kPBlockN>>,
245
+ Stride<Int<kPBlockN>, _1>>{}));
246
+ using SmemLayoutPdS = decltype(tile_to_shape(
247
+ SmemLayoutAtomPdS{},
248
+ make_shape(Int<kBlockM>{}, Int<kBlockN>{})));
249
+ using SmemLayoutPdStransposed = decltype(
250
+ composition(SmemLayoutPdS{}, make_layout(Shape<Int<kBlockN>, Int<kBlockM>>{}, GenRowMajor{})));
251
+ using SmemLayoutPdStransposedNoSwizzle = decltype(get_nonswizzle_portion(SmemLayoutPdStransposed{}));
252
+
253
+ using SmemCopyAtomPdS = Copy_Atom<DefaultCopy, elem_type>;
254
+
255
+ using SmemLayoutQdOtransposed = decltype(
256
+ composition(SmemLayoutQdO{}, make_layout(Shape<Int<kHeadDim>, Int<kBlockM>>{}, GenRowMajor{})));
257
+ using SmemLayoutQdOtransposedNoSwizzle = decltype(get_nonswizzle_portion(SmemLayoutQdOtransposed{}));
258
+
259
+ using SmemLayoutAtomdKV = decltype(
260
+ composition(Swizzle<kSwizzle, 3, 3>{},
261
+ Layout<Shape<_8, Int<kBlockKSmem>>,
262
+ Stride<Int<kBlockKSmem>, _1>>{}));
263
+ using SmemLayoutdKV = decltype(tile_to_shape(
264
+ SmemLayoutAtomdKV{},
265
+ make_shape(Int<kBlockN>{}, Int<kHeadDim>{})));
266
+ using SmemCopyAtomdKV = Copy_Atom<DefaultCopy, elem_type>;
267
+
268
+ using SmemLayoutAtomdQ = decltype(
269
+ composition(Swizzle<kSwizzle, 3, 3>{},
270
+ Layout<Shape<_8, Int<kBlockKSmem>>,
271
+ Stride<Int<kBlockKSmem>, _1>>{}));
272
+ using SmemLayoutdQ = decltype(tile_to_shape(
273
+ SmemLayoutAtomdQ{},
274
+ make_shape(Int<kBlockM>{}, Int<kHeadDim>{})));
275
+ using SmemCopyAtomdQ = Copy_Atom<DefaultCopy, elem_type>;
276
+
277
+ // Double buffer for sQ
278
+ static constexpr int kSmemQdOSize = size(SmemLayoutQdO{}) * (No_double_buffer ? 2 : 3) * sizeof(Element);
279
+ static constexpr int kSmemKVSize = size(SmemLayoutKV{}) * 2 * sizeof(Element);
280
+ static constexpr int kSmemdSSize = size(SmemLayoutPdS{}) * sizeof(Element);
281
+ static constexpr int kSmemPSize = size(SmemLayoutPdS{}) * sizeof(Element);
282
+ static constexpr int kSmemdQSize = size(SmemLayoutdQ{}) * sizeof(Element);
283
+ static constexpr int kSmemSize = kSmemQdOSize
284
+ + (!Is_V_in_regs
285
+ ? kSmemKVSize + kSmemdSSize + std::max(kSmemPSize, kSmemdQSize)
286
+ : std::max(kSmemKVSize, kSmemKVSize / 2 + kSmemdSSize + std::max(kSmemPSize, kSmemdQSize)));
287
+ static constexpr int kSmemSize1colblock = kSmemQdOSize
288
+ + (!Is_V_in_regs
289
+ ? kSmemKVSize + kSmemdSSize + kSmemPSize
290
+ : std::max(kSmemKVSize, kSmemKVSize / 2 + kSmemdSSize + kSmemPSize));
291
+
292
+ static constexpr int kGmemElemsPerLoad = sizeof(cute::uint128_t) / sizeof(Element);
293
+ static_assert(kHeadDim % kGmemElemsPerLoad == 0, "kHeadDim must be a multiple of kGmemElemsPerLoad");
294
+ // Using kBlockKSmem instead of kHeadDim here to avoid bank conflicts, but doesn't seem
295
+ // to affect speed in practice.
296
+ static constexpr int kGmemThreadsPerRow = kBlockKSmem / kGmemElemsPerLoad;
297
+ static_assert(kNThreads % kGmemThreadsPerRow == 0, "kNThreads must be a multiple of kGmemThreadsPerRow");
298
+ using GmemLayoutAtom = Layout<Shape <Int<kNThreads / kGmemThreadsPerRow>, Int<kGmemThreadsPerRow>>,
299
+ Stride<Int<kGmemThreadsPerRow>, _1>>;
300
+
301
+ // We use CACHEGLOBAL instead of CACHEALWAYS for both Q and K/V, since we won't be reading
302
+ // from the same address by the same threadblock. This is slightly faster.
303
+ using Gmem_copy_struct = std::conditional_t<
304
+ Has_cp_async,
305
+ SM80_CP_ASYNC_CACHEGLOBAL<cute::uint128_t>,
306
+ DefaultCopy
307
+ >;
308
+ using GmemTiledCopyQKV = decltype(
309
+ make_tiled_copy(Copy_Atom<Gmem_copy_struct, elem_type>{},
310
+ GmemLayoutAtom{},
311
+ Layout<Shape<_1, _8>>{})); // Val layout, 8 vals per read
312
+ using GmemTiledCopydO = decltype(
313
+ make_tiled_copy(Copy_Atom<DefaultCopy, elem_type>{},
314
+ GmemLayoutAtom{},
315
+ Layout<Shape < _1, _8>>{})); // Val layout, 8 vals per store
316
+ using GmemTiledCopydKV = decltype(
317
+ make_tiled_copy(Copy_Atom<DefaultCopy, elem_type>{},
318
+ GmemLayoutAtom{},
319
+ Layout<Shape < _1, _8>>{})); // Val layout, 8 vals per store
320
+ using GmemTiledCopydQ = decltype(
321
+ make_tiled_copy(Copy_Atom<DefaultCopy, elem_type>{},
322
+ GmemLayoutAtom{},
323
+ Layout<Shape < _1, _8>>{})); // Val layout, 8 vals per store
324
+ using GmemLayoutAtomdQaccum = std::conditional_t<
325
+ kBlockKSmem == 32,
326
+ Layout<Shape <_32, _8>, // Thread layout, 8 threads per row
327
+ Stride< _8, _1>>,
328
+ Layout<Shape <_16, _16>, // Thread layout, 16 threads per row
329
+ Stride< _16, _1>>
330
+ >;
331
+ using GmemTiledCopydQaccum = decltype(
332
+ make_tiled_copy(Copy_Atom<DefaultCopy, ElementAccum>{},
333
+ GmemLayoutAtomdQaccum{},
334
+ Layout<Shape < _1, _4>>{})); // Val layout, 4 vals per store
335
+
336
+ using GmemTiledCopydQaccumAtomicAdd = decltype(
337
+ make_tiled_copy(Copy_Atom<DefaultCopy, ElementAccum>{},
338
+ Layout<Shape <_8, _32>, // Thread layout, 8 threads per row
339
+ Stride<_32, _1>>{},
340
+ Layout<Shape < _1, _1>>{})); // Val layout, 1 val per store
341
+
342
+ };
343
+
344
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
flash-attention/csrc/flash_attn/src/mask.h ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * Copyright (c) 2024, Tri Dao.
3
+ ******************************************************************************/
4
+
5
+ #pragma once
6
+
7
+ #include <cute/tensor.hpp>
8
+
9
+ namespace flash {
10
+
11
+ using namespace cute;
12
+
13
+ template <typename Engine, typename Layout>
14
+ __forceinline__ __device__ void apply_mask(Tensor<Engine, Layout> &tensor, const int max_seqlen_k,
15
+ const int col_idx_offset_ = 0) {
16
+ // tensor has shape (nrow=(2, MMA_M), ncol=(2, MMA_N))
17
+ static_assert(Layout::rank == 2, "Only support 2D Tensor");
18
+ const int lane_id = threadIdx.x % 32;
19
+ const int col_idx_offset = col_idx_offset_ + (lane_id % 4) * 2;
20
+ #pragma unroll
21
+ for (int nj = 0; nj < size<1, 1>(tensor); ++nj) {
22
+ const int col_idx_base = col_idx_offset + nj * 8;
23
+ #pragma unroll
24
+ for (int j = 0; j < size<1, 0>(tensor); ++j) {
25
+ const int col_idx = col_idx_base + j;
26
+ if (col_idx >= max_seqlen_k) {
27
+ // Without the "make_coord" we get wrong results
28
+ #pragma unroll
29
+ for (int mi = 0; mi < size<0>(tensor); ++mi) {
30
+ tensor(mi, make_coord(j, nj)) = -INFINITY;
31
+ }
32
+ }
33
+ }
34
+ }
35
+ }
36
+
37
+ template <bool HasWSLeft=true, typename Engine, typename Layout>
38
+ __forceinline__ __device__ void apply_mask_local(Tensor<Engine, Layout> &tensor, const int col_idx_offset_,
39
+ const int max_seqlen_k, const int row_idx_offset,
40
+ const int max_seqlen_q, const int warp_row_stride,
41
+ const int window_size_left, const int window_size_right) {
42
+ // tensor has shape (nrow=(2, MMA_M), ncol=(2, MMA_N))
43
+ static_assert(Layout::rank == 2, "Only support 2D Tensor");
44
+ const int lane_id = threadIdx.x % 32;
45
+ const int col_idx_offset = col_idx_offset_ + (lane_id % 4) * 2;
46
+ #pragma unroll
47
+ for (int mi = 0; mi < size<0, 1>(tensor); ++mi) {
48
+ const int row_idx_base = row_idx_offset + mi * warp_row_stride;
49
+ #pragma unroll
50
+ for (int i = 0; i < size<0, 0>(tensor); ++i) {
51
+ const int row_idx = row_idx_base + i * 8;
52
+ const int col_idx_limit_left = std::max(0, row_idx + max_seqlen_k - max_seqlen_q - window_size_left);
53
+ const int col_idx_limit_right = std::min(max_seqlen_k, row_idx + 1 + max_seqlen_k - max_seqlen_q + window_size_right);
54
+ #pragma unroll
55
+ for (int nj = 0; nj < size<1, 1>(tensor); ++nj) {
56
+ const int col_idx_base = col_idx_offset + nj * 8;
57
+ #pragma unroll
58
+ for (int j = 0; j < size<1, 0>(tensor); ++j) {
59
+ const int col_idx = col_idx_base + j;
60
+ if (col_idx >= col_idx_limit_right || (HasWSLeft && col_idx < col_idx_limit_left)) {
61
+ tensor(make_coord(i, mi), make_coord(j, nj)) = -INFINITY;
62
+ }
63
+ }
64
+ }
65
+ // if (cute::thread0()) {
66
+ // printf("mi = %d, i = %d, row_idx = %d, max_seqlen_k = %d\n", mi, i, row_idx, max_seqlen_k);
67
+ // print(tensor(make_coord(i, mi), _));
68
+ // // print(tensor(_, j + nj * size<1, 0>(tensor)));
69
+ // }
70
+ }
71
+ }
72
+ }
73
+
74
+ template <typename Engine, typename Layout>
75
+ __forceinline__ __device__ void apply_mask_causal(Tensor<Engine, Layout> &tensor, const int col_idx_offset_,
76
+ const int max_seqlen_k, const int row_idx_offset,
77
+ const int max_seqlen_q, const int warp_row_stride) {
78
+ // Causal masking is equivalent to local masking with window_size_left = infinity and window_size_right = 0
79
+ apply_mask_local</*HasWSLeft=*/false>(tensor, col_idx_offset_, max_seqlen_k, row_idx_offset,
80
+ max_seqlen_q, warp_row_stride, -1, 0);
81
+ }
82
+
83
+ template <typename Engine0, typename Layout0, typename Engine1, typename Layout1>
84
+ __forceinline__ __device__ void apply_mask_causal_w_idx(
85
+ Tensor<Engine0, Layout0> &tensor, Tensor<Engine1, Layout1> const &idx_rowcol,
86
+ const int col_idx_offset_, const int max_seqlen_k, const int row_idx_offset)
87
+ {
88
+ // tensor has shape (nrow=(2, MMA_M), ncol=(2, MMA_N))
89
+ static_assert(Layout0::rank == 2, "Only support 2D Tensor");
90
+ static_assert(Layout1::rank == 2, "Only support 2D Tensor");
91
+ CUTE_STATIC_ASSERT_V(size<0>(tensor) == size<0>(idx_rowcol));
92
+ CUTE_STATIC_ASSERT_V(size<1>(tensor) == size<1>(idx_rowcol));
93
+ #pragma unroll
94
+ for (int mi = 0; mi < size<0>(tensor); ++mi) {
95
+ const int col_idx_limit = std::min(max_seqlen_k, 1 + row_idx_offset + get<0>(idx_rowcol(mi, 0)));
96
+ #pragma unroll
97
+ for (int ni = 0; ni < size<1, 1>(tensor); ++ni) {
98
+ if (col_idx_offset_ + get<1>(idx_rowcol(0, ni)) >= col_idx_limit) {
99
+ tensor(mi, ni) = -INFINITY;
100
+ }
101
+ }
102
+ // if (cute::thread0()) {
103
+ // printf("ni = %d, j = %d, col_idx = %d, max_seqlen_k = %d\n", ni, j, col_idx, max_seqlen_k);
104
+ // print(tensor(_, make_coord(j, ni)));
105
+ // // print(tensor(_, j + ni * size<1, 0>(tensor)));
106
+ // }
107
+ }
108
+ }
109
+
110
+ template <bool Is_causal, bool Is_local, bool Has_alibi>
111
+ struct Mask {
112
+
113
+ const int max_seqlen_k, max_seqlen_q;
114
+ const int window_size_left, window_size_right;
115
+ const float alibi_slope;
116
+
117
+ __forceinline__ __device__ Mask(const int max_seqlen_k, const int max_seqlen_q,
118
+ const int window_size_left, const int window_size_right,
119
+ const float alibi_slope=0.f)
120
+ : max_seqlen_k(max_seqlen_k)
121
+ , max_seqlen_q(max_seqlen_q)
122
+ , window_size_left(window_size_left)
123
+ , window_size_right(window_size_right)
124
+ , alibi_slope(!Has_alibi ? 0.0 : alibi_slope) {
125
+ };
126
+
127
+ // Causal_mask: whether this particular iteration needs causal masking
128
+ template <bool Causal_mask=false, bool Is_even_MN=true, typename Engine, typename Layout>
129
+ __forceinline__ __device__ void apply_mask(Tensor<Engine, Layout> &tensor_,
130
+ const int col_idx_offset_,
131
+ const int row_idx_offset,
132
+ const int warp_row_stride) {
133
+ static_assert(!(Causal_mask && Is_local), "Cannot be both causal and local");
134
+ static_assert(Layout::rank == 3, "Only support 3D Tensor");
135
+ static_assert(decltype(size<0>(tensor_))::value == 4, "First dimension must be 4");
136
+ static constexpr bool Need_masking = Has_alibi || Causal_mask || Is_local || !Is_even_MN;
137
+ // if (cute::thread0()) { printf("Has_alibi = %d, Causal_mask=%d, Is_local=%d, Is_even_MN = %d, Need_masking = %d\n", Has_alibi, Causal_mask, Is_local, Is_even_MN, Need_masking); }
138
+ if constexpr (Need_masking) {
139
+ // Reshape tensor_ from (MMA=4, MMA_M, MMA_N) to (nrow=(2, MMA_M), ncol=(2, MMA_N))
140
+ Tensor tensor = make_tensor(tensor_.data(), flash::convert_layout_acc_rowcol(tensor_.layout()));
141
+ // Do we need both row and column indices, or just column incides?
142
+ static constexpr bool Col_idx_only = !(Has_alibi && !Is_causal) && !Is_local && !Causal_mask;
143
+ const int lane_id = threadIdx.x % 32;
144
+ const int col_idx_offset = col_idx_offset_ + (lane_id % 4) * 2;
145
+ if constexpr (Col_idx_only) {
146
+ #pragma unroll
147
+ for (int nj = 0; nj < size<1, 1>(tensor); ++nj) {
148
+ const int col_idx_base = col_idx_offset + nj * 8;
149
+ #pragma unroll
150
+ for (int j = 0; j < size<1, 0>(tensor); ++j) {
151
+ const int col_idx = col_idx_base + j;
152
+ #pragma unroll
153
+ for (int mi = 0; mi < size<0>(tensor); ++mi) {
154
+ // No causal, no local
155
+ if constexpr (Has_alibi) {
156
+ tensor(mi, make_coord(j, nj)) += alibi_slope * col_idx;
157
+ }
158
+ if constexpr (!Is_even_MN) {
159
+ if (col_idx >= max_seqlen_k) { tensor(mi, make_coord(j, nj)) = -INFINITY; }
160
+ }
161
+ }
162
+ }
163
+ }
164
+ } else {
165
+ #pragma unroll
166
+ for (int mi = 0; mi < size<0, 1>(tensor); ++mi) {
167
+ const int row_idx_base = row_idx_offset + mi * warp_row_stride;
168
+ #pragma unroll
169
+ for (int i = 0; i < size<0, 0>(tensor); ++i) {
170
+ const int row_idx = row_idx_base + i * 8;
171
+ const int col_idx_limit_left = std::max(0, row_idx + max_seqlen_k - max_seqlen_q - window_size_left);
172
+ const int col_idx_limit_right = std::min(max_seqlen_k, row_idx + 1 + max_seqlen_k - max_seqlen_q + window_size_right);
173
+ #pragma unroll
174
+ for (int nj = 0; nj < size<1, 1>(tensor); ++nj) {
175
+ const int col_idx_base = col_idx_offset + nj * 8;
176
+ #pragma unroll
177
+ for (int j = 0; j < size<1, 0>(tensor); ++j) {
178
+ const int col_idx = col_idx_base + j;
179
+ if constexpr (Has_alibi) {
180
+ if constexpr (Is_causal) {
181
+ tensor(make_coord(i, mi), make_coord(j, nj)) += alibi_slope * col_idx;
182
+ } else {
183
+ tensor(make_coord(i, mi), make_coord(j, nj)) -= alibi_slope * abs(row_idx + max_seqlen_k - max_seqlen_q - col_idx);
184
+
185
+ }
186
+ }
187
+ if constexpr (Causal_mask) {
188
+ if (col_idx >= col_idx_limit_right) {
189
+ tensor(make_coord(i, mi), make_coord(j, nj)) = -INFINITY;
190
+ }
191
+ }
192
+ if constexpr (Is_local) {
193
+ if (col_idx >= col_idx_limit_right || col_idx < col_idx_limit_left) {
194
+ tensor(make_coord(i, mi), make_coord(j, nj)) = -INFINITY;
195
+ }
196
+ }
197
+ if constexpr (!Causal_mask && !Is_local && !Is_even_MN) {
198
+ // Causal and Local already handles MN masking
199
+ if (col_idx >= max_seqlen_k) {
200
+ tensor(make_coord(i, mi), make_coord(j, nj)) = -INFINITY;
201
+ }
202
+ }
203
+ }
204
+ }
205
+ }
206
+ }
207
+ }
208
+ }
209
+ };
210
+
211
+ };
212
+
213
+ } // namespace flash
flash-attention/csrc/flash_attn/src/philox.cuh ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Pytorch also has an implementation of Philox RNG: https://github.com/pytorch/pytorch/blob/8ca3c881db3e3510fcb7725389f6a0633c9b992c/torch/csrc/jit/tensorexpr/cuda_random.h
2
+ #pragma once
3
+ // Philox CUDA.
4
+
5
+ namespace flash {
6
+
7
+ struct ull2 {
8
+ unsigned long long x;
9
+ unsigned long long y;
10
+ };
11
+
12
+ __forceinline__ __device__ uint2 mulhilo32(const unsigned int a, const unsigned int b) {
13
+ uint2 *res;
14
+ unsigned long long tmp;
15
+ asm ("mul.wide.u32 %0, %1, %2;\n\t"
16
+ : "=l"(tmp)
17
+ : "r"(a), "r"(b));
18
+ res = (uint2*)(&tmp);
19
+ return *res;
20
+ }
21
+
22
+ __forceinline__ __device__ uint4 philox_single_round(const uint4 ctr, const uint2 key) {
23
+ constexpr unsigned long kPhiloxSA = 0xD2511F53;
24
+ constexpr unsigned long kPhiloxSB = 0xCD9E8D57;
25
+ uint2 res0 = mulhilo32(kPhiloxSA, ctr.x);
26
+ uint2 res1 = mulhilo32(kPhiloxSB, ctr.z);
27
+ uint4 ret = {res1.y ^ ctr.y ^ key.x, res1.x, res0.y ^ ctr.w ^ key.y, res0.x};
28
+ return ret;
29
+ }
30
+
31
+ __forceinline__ __device__ uint4 philox(unsigned long long seed,
32
+ unsigned long long subsequence,
33
+ unsigned long long offset) {
34
+ constexpr unsigned long kPhilox10A = 0x9E3779B9;
35
+ constexpr unsigned long kPhilox10B = 0xBB67AE85;
36
+ uint2 key = reinterpret_cast<uint2&>(seed);
37
+ uint4 counter;
38
+ ull2 *tmp = reinterpret_cast<ull2*>(&counter);
39
+ tmp->x = offset;
40
+ tmp->y = subsequence;
41
+ #pragma unroll
42
+ for (int i = 0; i < 6; i++) {
43
+ counter = philox_single_round(counter, key);
44
+ key.x += (kPhilox10A);
45
+ key.y += (kPhilox10B);
46
+ }
47
+ uint4 output = philox_single_round(counter, key);
48
+ return output;
49
+ }
50
+
51
+ } // namespace flash
flash-attention/csrc/flash_attn/src/rotary.h ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * Copyright (c) 2024, Tri Dao.
3
+ ******************************************************************************/
4
+
5
+ #pragma once
6
+
7
+ #include <cute/tensor.hpp>
8
+
9
+ #include "utils.h"
10
+
11
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
12
+
13
+ namespace flash {
14
+
15
+ using namespace cute;
16
+
17
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
18
+
19
+ template <bool Is_even_K=true, bool Clear_OOB_K=true,
20
+ typename Engine0, typename Layout0, typename Engine1, typename Layout1,
21
+ typename Engine2, typename Layout2, typename Engine3, typename Layout3>
22
+ __forceinline__ __device__ void copy_rotary_interleaved(Tensor<Engine0, Layout0> const &S,
23
+ Tensor<Engine1, Layout1> &D,
24
+ Tensor<Engine2, Layout2> const &Cos,
25
+ Tensor<Engine2, Layout2> const &Sin,
26
+ Tensor<Engine3, Layout3> const &identity_MN,
27
+ const int max_MN, const int min_MN,
28
+ const int dim, const int rotary_dim) {
29
+ CUTE_STATIC_ASSERT_V(rank(S) == Int<3>{});
30
+ CUTE_STATIC_ASSERT_V(rank(D) == Int<3>{});
31
+ CUTE_STATIC_ASSERT_V(size<0>(S) == size<0>(D)); // MMA
32
+ CUTE_STATIC_ASSERT_V(size<1>(S) == size<1>(D)); // MMA_M
33
+ CUTE_STATIC_ASSERT_V(size<2>(S) == size<2>(D)); // MMA_K
34
+ CUTE_STATIC_ASSERT_V(size<1>(S) == size<1>(Cos)); // MMA_M
35
+ CUTE_STATIC_ASSERT_V(size<2>(S) == size<2>(Cos)); // MMA_K
36
+ CUTE_STATIC_ASSERT_V(size<1>(S) == size<1>(Sin)); // MMA_M
37
+ CUTE_STATIC_ASSERT_V(size<2>(S) == size<2>(Sin)); // MMA_K
38
+ CUTE_STATIC_ASSERT_V(size<0>(Cos) == size<0>(Sin)); // MMA_K
39
+ static_assert(decltype(size<0>(S))::value == decltype(size<0>(Cos))::value * 2);
40
+ static_assert(decltype(size<0>(Cos))::value % 2 == 0); // Since we do fast conversion from fp16/bf16 to fp32
41
+ Tensor rCos = make_fragment_like(Cos);
42
+ Tensor rSin = make_fragment_like(Sin);
43
+ Tensor rS = make_fragment_like(S);
44
+ #pragma unroll
45
+ for (int m = 0; m < size<1>(S); ++m) {
46
+ if (get<0>(identity_MN(0, m, 0)) >= min_MN && get<0>(identity_MN(0, m, 0)) < max_MN) {
47
+ #pragma unroll
48
+ for (int k = 0; k < size<2>(S); ++k) {
49
+ if (Is_even_K || get<1>(identity_MN(0, 0, k)) < dim) {
50
+ cute::copy(S(_, m, k), rS(_, m, k));
51
+ if (get<1>(identity_MN(0, 0, k)) < rotary_dim) {
52
+ cute::copy(Cos(_, m, k), rCos(_, m, k));
53
+ cute::copy(Sin(_, m, k), rSin(_, m, k));
54
+ Tensor S_fp32 = convert_type<float>(rS(_, m, k));
55
+ Tensor cos_fp32 = convert_type<float>(rCos(_, m, k));
56
+ Tensor sin_fp32 = convert_type<float>(rSin(_, m, k));
57
+ #pragma unroll
58
+ for (int i = 0; i < size<0>(rS) / 2; ++i) {
59
+ float real = S_fp32(2 * i) * cos_fp32(i) - S_fp32(2 * i + 1) * sin_fp32(i);
60
+ float imag = S_fp32(2 * i) * sin_fp32(i) + S_fp32(2 * i + 1) * cos_fp32(i);
61
+ S_fp32(2 * i) = real;
62
+ S_fp32(2 * i + 1) = imag;
63
+ }
64
+ // Idk but I need to copy for the convert_type to work
65
+ Tensor S_fp32_copy = make_fragment_like(S_fp32);
66
+ cute::copy(S_fp32, S_fp32_copy);
67
+ using T = typename Engine0::value_type;
68
+ Tensor S_og_type = convert_type<T>(S_fp32_copy);
69
+ cute::copy(S_og_type, rS(_, m, k));
70
+ }
71
+ cute::copy(rS(_, m, k), D(_, m, k));
72
+ } else if (Clear_OOB_K) {
73
+ cute::clear(D(_, m, k));
74
+ }
75
+ }
76
+ }
77
+ }
78
+ }
79
+
80
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
81
+
82
+ template <bool Is_even_K=true, bool Clear_OOB_K=true,
83
+ typename Engine0, typename Layout0, typename Engine1, typename Layout1,
84
+ typename Engine2, typename Layout2, typename Engine3, typename Layout3>
85
+ __forceinline__ __device__ void copy_rotary_contiguous(Tensor<Engine0, Layout0> const &S,
86
+ Tensor<Engine1, Layout1> &D,
87
+ Tensor<Engine2, Layout2> const &Cos,
88
+ Tensor<Engine2, Layout2> const &Sin,
89
+ Tensor<Engine3, Layout3> const &identity_MN,
90
+ const int max_MN, const int min_MN,
91
+ const int dim, const int rotary_dim) {
92
+ CUTE_STATIC_ASSERT_V(rank(S) == Int<3>{});
93
+ CUTE_STATIC_ASSERT_V(rank(D) == Int<3>{});
94
+ CUTE_STATIC_ASSERT_V(size<0>(S) == size<0>(D)); // MMA
95
+ CUTE_STATIC_ASSERT_V(size<1>(S) == size<1>(D)); // MMA_M
96
+ CUTE_STATIC_ASSERT_V(size<2>(S) == size<2>(D)); // MMA_K
97
+ CUTE_STATIC_ASSERT_V(size<1>(S) == size<1>(Cos)); // MMA_M
98
+ CUTE_STATIC_ASSERT_V(size<2>(S) == size<2>(Cos)); // MMA_K
99
+ CUTE_STATIC_ASSERT_V(size<1>(S) == size<1>(Sin)); // MMA_M
100
+ CUTE_STATIC_ASSERT_V(size<2>(S) == size<2>(Sin)); // MMA_K
101
+ CUTE_STATIC_ASSERT_V(size<0>(S) == size<0>(Cos)); // MMA
102
+ CUTE_STATIC_ASSERT_V(size<0>(Cos) == size<0>(Sin));
103
+ static_assert(decltype(size<0>(Cos))::value % 2 == 0); // Since we do fast conversion from fp16/bf16 to fp32
104
+ Tensor rCos = make_fragment_like(Cos);
105
+ Tensor rSin = make_fragment_like(Sin);
106
+ Tensor rS = make_fragment_like(S);
107
+ Tensor rS_other = make_fragment_like(rS(_, 0, 0));
108
+ #pragma unroll
109
+ for (int m = 0; m < size<1>(S); ++m) {
110
+ if (get<0>(identity_MN(0, m, 0)) >= min_MN && get<0>(identity_MN(0, m, 0)) < max_MN) {
111
+ #pragma unroll
112
+ for (int k = 0; k < size<2>(S); ++k) {
113
+ if (Is_even_K || get<1>(identity_MN(0, 0, k)) < dim) {
114
+ cute::copy(S(_, m, k), rS(_, m, k));
115
+ if (get<1>(identity_MN(0, 0, k)) < rotary_dim) {
116
+ const bool is_left = get<1>(identity_MN(0, 0, k)) < rotary_dim / 2;
117
+ Tensor gS_other = make_tensor(S(_, m, k).data() + (is_left ? rotary_dim / 2 : -rotary_dim / 2), S(_, m, k).layout());
118
+ cute::copy(gS_other, rS_other);
119
+ // if (cute::thread0()) { print_tensor(rS(_, m, k)); print_tensor(rS_other); }
120
+ Tensor gCos = make_tensor(Cos(_, m, k).data() + (is_left ? 0 : -rotary_dim / 2), Cos(_, m, k).layout());
121
+ Tensor gSin = make_tensor(Sin(_, m, k).data() + (is_left ? 0 : -rotary_dim / 2), Sin(_, m, k).layout());
122
+ cute::copy(gCos, rCos(_, m, k));
123
+ cute::copy(gSin, rSin(_, m, k));
124
+ // if (cute::thread0()) { print_tensor(rCos(_, m, k)); print_tensor(rSin(_, m, k)); }
125
+ Tensor S_fp32 = convert_type<float>(rS(_, m, k));
126
+ Tensor S_other_fp32 = convert_type<float>(rS_other);
127
+ Tensor cos_fp32 = convert_type<float>(rCos(_, m, k));
128
+ Tensor sin_fp32 = convert_type<float>(rSin(_, m, k));
129
+ #pragma unroll
130
+ for (int i = 0; i < size<0>(rS); ++i) {
131
+ S_fp32(i) = S_fp32(i) * cos_fp32(i) + S_other_fp32(i) * (is_left ? -sin_fp32(i) : sin_fp32(i));
132
+ }
133
+ // Idk but I need to copy for the convert_type to work
134
+ Tensor S_fp32_copy = make_fragment_like(S_fp32);
135
+ cute::copy(S_fp32, S_fp32_copy);
136
+ using T = typename Engine0::value_type;
137
+ Tensor S_og_type = convert_type<T>(S_fp32_copy);
138
+ cute::copy(S_og_type, rS(_, m, k));
139
+ // if (cute::thread0()) { print_tensor(rS(_, m, k)); }
140
+ }
141
+ cute::copy(rS(_, m, k), D(_, m, k));
142
+ } else if (Clear_OOB_K) {
143
+ cute::clear(D(_, m, k));
144
+ }
145
+ }
146
+ }
147
+ }
148
+ }
149
+
150
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
151
+
152
+ } // namespace flash
flash-attention/csrc/flash_attn/src/softmax.h ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * Copyright (c) 2024, Tri Dao.
3
+ ******************************************************************************/
4
+
5
+ #pragma once
6
+
7
+ #include <cmath>
8
+
9
+ #include <cute/tensor.hpp>
10
+
11
+ #include <cutlass/numeric_types.h>
12
+
13
+ #include "philox.cuh"
14
+ #include "utils.h"
15
+
16
+ namespace flash {
17
+
18
+ using namespace cute;
19
+
20
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
21
+
22
+ template<bool zero_init=true, typename Engine0, typename Layout0, typename Engine1, typename Layout1, typename Operator>
23
+ __device__ __forceinline__ void thread_reduce_(Tensor<Engine0, Layout0> const &tensor, Tensor<Engine1, Layout1> &summary, Operator &op) {
24
+ static_assert(Layout0::rank == 2, "Only support 2D Tensor");
25
+ static_assert(Layout1::rank == 1, "Only support 1D Tensor");
26
+ CUTE_STATIC_ASSERT_V(size<0>(summary) == size<0>(tensor));
27
+ #pragma unroll
28
+ for (int mi = 0; mi < size<0>(tensor); mi++) {
29
+ summary(mi) = zero_init ? tensor(mi, 0) : op(summary(mi), tensor(mi, 0));
30
+ #pragma unroll
31
+ for (int ni = 1; ni < size<1>(tensor); ni++) {
32
+ summary(mi) = op(summary(mi), tensor(mi, ni));
33
+ }
34
+ }
35
+ }
36
+
37
+ template<typename Engine0, typename Layout0, typename Engine1, typename Layout1, typename Operator>
38
+ __device__ __forceinline__ void quad_allreduce_(Tensor<Engine0, Layout0> &dst, Tensor<Engine1, Layout1> &src, Operator &op) {
39
+ CUTE_STATIC_ASSERT_V(size(dst) == size(src));
40
+ #pragma unroll
41
+ for (int i = 0; i < size(dst); i++){
42
+ dst(i) = Allreduce<4>::run(src(i), op);
43
+ }
44
+ }
45
+
46
+ template<bool zero_init=true, typename Engine0, typename Layout0, typename Engine1, typename Layout1, typename Operator>
47
+ __device__ __forceinline__ void reduce_(Tensor<Engine0, Layout0> const& tensor, Tensor<Engine1, Layout1> &summary, Operator &op) {
48
+ thread_reduce_<zero_init>(tensor, summary, op);
49
+ quad_allreduce_(summary, summary, op);
50
+ }
51
+
52
+ template<bool zero_init=true, typename Engine0, typename Layout0, typename Engine1, typename Layout1>
53
+ __device__ __forceinline__ void reduce_max(Tensor<Engine0, Layout0> const& tensor, Tensor<Engine1, Layout1> &max){
54
+ MaxOp<float> max_op;
55
+ reduce_<zero_init>(tensor, max, max_op);
56
+ }
57
+
58
+ template<bool zero_init=true, typename Engine0, typename Layout0, typename Engine1, typename Layout1>
59
+ __device__ __forceinline__ void reduce_sum(Tensor<Engine0, Layout0> const& tensor, Tensor<Engine1, Layout1> &sum){
60
+ SumOp<float> sum_op;
61
+ thread_reduce_<zero_init>(tensor, sum, sum_op);
62
+ }
63
+
64
+ // Apply the exp to all the elements.
65
+ template <bool Scale_max=true, typename Engine0, typename Layout0, typename Engine1, typename Layout1>
66
+ __forceinline__ __device__ void scale_apply_exp2(Tensor<Engine0, Layout0> &tensor, Tensor<Engine1, Layout1> const &max, const float scale) {
67
+ static_assert(Layout0::rank == 2, "Only support 2D Tensor");
68
+ static_assert(Layout1::rank == 1, "Only support 1D Tensor");
69
+ CUTE_STATIC_ASSERT_V(size<0>(max) == size<0>(tensor));
70
+ #pragma unroll
71
+ for (int mi = 0; mi < size<0>(tensor); ++mi) {
72
+ // If max is -inf, then all elements must have been -inf (possibly due to masking).
73
+ // We don't want (-inf - (-inf)) since that would give NaN.
74
+ // If we don't have float around M_LOG2E the multiplication is done in fp64.
75
+ const float max_scaled = max(mi) == -INFINITY ? 0.f : max(mi) * (Scale_max ? scale : float(M_LOG2E));
76
+ #pragma unroll
77
+ for (int ni = 0; ni < size<1>(tensor); ++ni) {
78
+ // Instead of computing exp(x - max), we compute exp2(x * log_2(e) -
79
+ // max * log_2(e)) This allows the compiler to use the ffma
80
+ // instruction instead of fadd and fmul separately.
81
+ // The following macro will disable the use of fma.
82
+ // See: https://github.com/pytorch/pytorch/issues/121558 for more details
83
+ // This macro is set in PyTorch and not FlashAttention
84
+ #ifdef UNFUSE_FMA
85
+ tensor(mi, ni) = exp2f(__fmul_rn(tensor(mi, ni), scale) - max_scaled);
86
+ #else
87
+ tensor(mi, ni) = exp2f(tensor(mi, ni) * scale - max_scaled);
88
+ #endif
89
+ }
90
+ }
91
+ }
92
+
93
+ // Apply the exp to all the elements.
94
+ template <bool zero_init=true, typename Engine0, typename Layout0, typename Engine1, typename Layout1>
95
+ __forceinline__ __device__ void max_scale_exp2_sum(Tensor<Engine0, Layout0> &tensor, Tensor<Engine1, Layout1> &max, Tensor<Engine1, Layout1> &sum, const float scale) {
96
+ static_assert(Layout0::rank == 2, "Only support 2D Tensor");
97
+ static_assert(Layout1::rank == 1, "Only support 1D Tensor");
98
+ CUTE_STATIC_ASSERT_V(size<0>(max) == size<0>(tensor));
99
+ #pragma unroll
100
+ for (int mi = 0; mi < size<0>(tensor); ++mi) {
101
+ MaxOp<float> max_op;
102
+ max(mi) = zero_init ? tensor(mi, 0) : max_op(max(mi), tensor(mi, 0));
103
+ #pragma unroll
104
+ for (int ni = 1; ni < size<1>(tensor); ni++) {
105
+ max(mi) = max_op(max(mi), tensor(mi, ni));
106
+ }
107
+ max(mi) = Allreduce<4>::run(max(mi), max_op);
108
+ // If max is -inf, then all elements must have been -inf (possibly due to masking).
109
+ // We don't want (-inf - (-inf)) since that would give NaN.
110
+ const float max_scaled = max(mi) == -INFINITY ? 0.f : max(mi) * scale;
111
+ sum(mi) = 0;
112
+ #pragma unroll
113
+ for (int ni = 0; ni < size<1>(tensor); ++ni) {
114
+ // Instead of computing exp(x - max), we compute exp2(x * log_2(e) -
115
+ // max * log_2(e)) This allows the compiler to use the ffma
116
+ // instruction instead of fadd and fmul separately.
117
+ tensor(mi, ni) = exp2f(tensor(mi, ni) * scale - max_scaled);
118
+ sum(mi) += tensor(mi, ni);
119
+ }
120
+ SumOp<float> sum_op;
121
+ sum(mi) = Allreduce<4>::run(sum(mi), sum_op);
122
+ }
123
+ }
124
+
125
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
126
+
127
+ template <int kNRows>
128
+ struct Softmax {
129
+
130
+ using TensorT = decltype(make_tensor<float>(Shape<Int<kNRows>>{}));
131
+ TensorT row_max, row_sum;
132
+
133
+ __forceinline__ __device__ Softmax() {};
134
+
135
+ template<bool Is_first, bool Check_inf=false, typename Tensor0, typename Tensor1>
136
+ __forceinline__ __device__ void softmax_rescale_o(Tensor0 &acc_s, Tensor1 &acc_o, float softmax_scale_log2) {
137
+ // Reshape acc_s from (MMA=4, MMA_M, MMA_N) to (nrow=(2, MMA_M), ncol=(2, MMA_N))
138
+ Tensor scores = make_tensor(acc_s.data(), flash::convert_layout_acc_rowcol(acc_s.layout()));
139
+ static_assert(decltype(size<0>(scores))::value == kNRows);
140
+ if (Is_first) {
141
+ flash::template reduce_max</*zero_init=*/true>(scores, row_max);
142
+ flash::scale_apply_exp2(scores, row_max, softmax_scale_log2);
143
+ flash::reduce_sum</*zero_init=*/true>(scores, row_sum);
144
+ } else {
145
+ Tensor scores_max_prev = make_fragment_like(row_max);
146
+ cute::copy(row_max, scores_max_prev);
147
+ flash::template reduce_max</*zero_init=*/false>(scores, row_max);
148
+ // Reshape acc_o from (MMA=4, MMA_M, MMA_K) to (nrow=(2, MMA_M), ncol=(2, MMA_K))
149
+ Tensor acc_o_rowcol = make_tensor(acc_o.data(), flash::convert_layout_acc_rowcol(acc_o.layout()));
150
+ static_assert(decltype(size<0>(acc_o_rowcol))::value == kNRows);
151
+ #pragma unroll
152
+ for (int mi = 0; mi < size(row_max); ++mi) {
153
+ float scores_max_cur = !Check_inf
154
+ ? row_max(mi)
155
+ : (row_max(mi) == -INFINITY ? 0.0f : row_max(mi));
156
+ float scores_scale = exp2f((scores_max_prev(mi) - scores_max_cur) * softmax_scale_log2);
157
+ row_sum(mi) *= scores_scale;
158
+ #pragma unroll
159
+ for (int ni = 0; ni < size<1>(acc_o_rowcol); ++ni) { acc_o_rowcol(mi, ni) *= scores_scale; }
160
+ }
161
+ flash::scale_apply_exp2(scores, row_max, softmax_scale_log2);
162
+ // We don't do the reduce across threads here since we don't need to use the row_sum.
163
+ // We do that reduce at the end when we need to normalize the softmax.
164
+ flash::reduce_sum</*zero_init=*/false>(scores, row_sum);
165
+ }
166
+ };
167
+
168
+ template<bool Is_dropout=false, bool Split=false, typename Tensor0>
169
+ __forceinline__ __device__ TensorT normalize_softmax_lse(Tensor0 &acc_o, float softmax_scale, float rp_dropout=1.0) {
170
+ SumOp<float> sum_op;
171
+ quad_allreduce_(row_sum, row_sum, sum_op);
172
+ TensorT lse = make_fragment_like(row_sum);
173
+ Tensor acc_o_rowcol = make_tensor(acc_o.data(), flash::convert_layout_acc_rowcol(acc_o.layout()));
174
+ static_assert(decltype(size<0>(acc_o_rowcol))::value == kNRows);
175
+ #pragma unroll
176
+ for (int mi = 0; mi < size<0>(acc_o_rowcol); ++mi) {
177
+ float sum = row_sum(mi);
178
+ float inv_sum = (sum == 0.f || sum != sum) ? 1.f : 1.f / sum;
179
+ lse(mi) = (sum == 0.f || sum != sum) ? (Split ? -INFINITY : INFINITY) : row_max(mi) * softmax_scale + __logf(sum);
180
+ float scale = !Is_dropout ? inv_sum : inv_sum * rp_dropout;
181
+ #pragma unroll
182
+ for (int ni = 0; ni < size<1>(acc_o_rowcol); ++ni) { acc_o_rowcol(mi, ni) *= scale; }
183
+ }
184
+ return lse;
185
+ };
186
+ };
187
+
188
+ } // namespace flash
flash-attention/csrc/flash_attn/src/static_switch.h ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Inspired by
2
+ // https://github.com/NVIDIA/DALI/blob/main/include/dali/core/static_switch.h
3
+ // and https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/Dispatch.h
4
+
5
+ #pragma once
6
+
7
+ /// @param COND - a boolean expression to switch by
8
+ /// @param CONST_NAME - a name given for the constexpr bool variable.
9
+ /// @param ... - code to execute for true and false
10
+ ///
11
+ /// Usage:
12
+ /// ```
13
+ /// BOOL_SWITCH(flag, BoolConst, [&] {
14
+ /// some_function<BoolConst>(...);
15
+ /// });
16
+ /// ```
17
+
18
+ #define BOOL_SWITCH(COND, CONST_NAME, ...) \
19
+ [&] { \
20
+ if (COND) { \
21
+ constexpr static bool CONST_NAME = true; \
22
+ return __VA_ARGS__(); \
23
+ } else { \
24
+ constexpr static bool CONST_NAME = false; \
25
+ return __VA_ARGS__(); \
26
+ } \
27
+ }()
28
+
29
+ #ifdef FLASHATTENTION_DISABLE_DROPOUT
30
+ #define DROPOUT_SWITCH(COND, CONST_NAME, ...) \
31
+ [&] { \
32
+ constexpr static bool CONST_NAME = false; \
33
+ return __VA_ARGS__(); \
34
+ }()
35
+ #else
36
+ #define DROPOUT_SWITCH BOOL_SWITCH
37
+ #endif
38
+
39
+ #ifdef FLASHATTENTION_DISABLE_ALIBI
40
+ #define ALIBI_SWITCH(COND, CONST_NAME, ...) \
41
+ [&] { \
42
+ constexpr static bool CONST_NAME = false; \
43
+ return __VA_ARGS__(); \
44
+ }()
45
+ #else
46
+ #define ALIBI_SWITCH BOOL_SWITCH
47
+ #endif
48
+
49
+ #ifdef FLASHATTENTION_DISABLE_UNEVEN_K
50
+ #define EVENK_SWITCH(COND, CONST_NAME, ...) \
51
+ [&] { \
52
+ constexpr static bool CONST_NAME = true; \
53
+ return __VA_ARGS__(); \
54
+ }()
55
+ #else
56
+ #define EVENK_SWITCH BOOL_SWITCH
57
+ #endif
58
+
59
+ #ifdef FLASHATTENTION_DISABLE_SOFTCAP
60
+ #define SOFTCAP_SWITCH(COND, CONST_NAME, ...) \
61
+ [&] { \
62
+ constexpr static bool CONST_NAME = false; \
63
+ return __VA_ARGS__(); \
64
+ }()
65
+ #else
66
+ #define SOFTCAP_SWITCH BOOL_SWITCH
67
+ #endif
68
+
69
+ #ifdef FLASHATTENTION_DISABLE_LOCAL
70
+ #define LOCAL_SWITCH(COND, CONST_NAME, ...) \
71
+ [&] { \
72
+ constexpr static bool CONST_NAME = false; \
73
+ return __VA_ARGS__(); \
74
+ }()
75
+ #else
76
+ #define LOCAL_SWITCH BOOL_SWITCH
77
+ #endif
78
+
79
+ #define FP16_SWITCH(COND, ...) \
80
+ [&] { \
81
+ if (COND) { \
82
+ using elem_type = cutlass::half_t; \
83
+ return __VA_ARGS__(); \
84
+ } else { \
85
+ using elem_type = cutlass::bfloat16_t; \
86
+ return __VA_ARGS__(); \
87
+ } \
88
+ }()
89
+
90
+ #define HEADDIM_SWITCH(HEADDIM, ...) \
91
+ [&] { \
92
+ if (HEADDIM <= 32) { \
93
+ constexpr static int kHeadDim = 32; \
94
+ return __VA_ARGS__(); \
95
+ } else if (HEADDIM <= 64) { \
96
+ constexpr static int kHeadDim = 64; \
97
+ return __VA_ARGS__(); \
98
+ } else if (HEADDIM <= 96) { \
99
+ constexpr static int kHeadDim = 96; \
100
+ return __VA_ARGS__(); \
101
+ } else if (HEADDIM <= 128) { \
102
+ constexpr static int kHeadDim = 128; \
103
+ return __VA_ARGS__(); \
104
+ } else if (HEADDIM <= 160) { \
105
+ constexpr static int kHeadDim = 160; \
106
+ return __VA_ARGS__(); \
107
+ } else if (HEADDIM <= 192) { \
108
+ constexpr static int kHeadDim = 192; \
109
+ return __VA_ARGS__(); \
110
+ } else if (HEADDIM <= 224) { \
111
+ constexpr static int kHeadDim = 224; \
112
+ return __VA_ARGS__(); \
113
+ } else if (HEADDIM <= 256) { \
114
+ constexpr static int kHeadDim = 256; \
115
+ return __VA_ARGS__(); \
116
+ } \
117
+ }()
flash-attention/csrc/flash_attn/src/utils.h ADDED
@@ -0,0 +1,393 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * Copyright (c) 2023, Tri Dao.
3
+ ******************************************************************************/
4
+
5
+ #pragma once
6
+
7
+ #include <assert.h>
8
+ #include <stdint.h>
9
+ #include <stdlib.h>
10
+
11
+ #include <cuda_fp16.h>
12
+
13
+ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800
14
+ #include <cuda_bf16.h>
15
+ #endif
16
+
17
+ #include <cute/tensor.hpp>
18
+
19
+ #include <cutlass/array.h>
20
+ #include <cutlass/cutlass.h>
21
+ #include <cutlass/numeric_conversion.h>
22
+ #include <cutlass/numeric_types.h>
23
+
24
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
25
+
26
+ namespace flash {
27
+
28
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
29
+
30
+ template<typename T>
31
+ __forceinline__ __device__ uint32_t relu2(const uint32_t x);
32
+
33
+ template<>
34
+ __forceinline__ __device__ uint32_t relu2<cutlass::half_t>(const uint32_t x) {
35
+ uint32_t res;
36
+ const uint32_t zero = 0u;
37
+ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800
38
+ asm volatile("max.f16x2 %0, %1, %2;\n" : "=r"(res) : "r"(x), "r"(zero));
39
+ #else
40
+ asm volatile( \
41
+ "{\n" \
42
+ "\t .reg .f16x2 sela;\n" \
43
+ "\t set.gtu.u32.f16x2 sela, %1, %2;\n" \
44
+ "\t and.b32 %0, sela, %1;\n"
45
+ "}\n" : "=r"(res) : "r"(x), "r"(zero));
46
+ #endif
47
+ return res;
48
+ }
49
+
50
+ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800
51
+ template<>
52
+ __forceinline__ __device__ uint32_t relu2<cutlass::bfloat16_t>(const uint32_t x) {
53
+ uint32_t res;
54
+ const uint32_t zero = 0u;
55
+ asm volatile("max.bf16x2 %0, %1, %2;\n" : "=r"(res) : "r"(x), "r"(zero));
56
+ return res;
57
+ }
58
+ #endif
59
+
60
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
61
+
62
+ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800
63
+
64
+ template<typename T>
65
+ __forceinline__ __device__ uint32_t convert_relu2(const float2 x);
66
+
67
+ template<>
68
+ __forceinline__ __device__ uint32_t convert_relu2<cutlass::half_t>(const float2 x) {
69
+ uint32_t res;
70
+ const uint32_t a = reinterpret_cast<const uint32_t&>(x.x);
71
+ const uint32_t b = reinterpret_cast<const uint32_t&>(x.y);
72
+ asm volatile("cvt.rn.relu.f16x2.f32 %0, %1, %2;\n" : "=r"(res) : "r"(b), "r"(a));
73
+ return res;
74
+ }
75
+
76
+ template<>
77
+ __forceinline__ __device__ uint32_t convert_relu2<cutlass::bfloat16_t>(const float2 x) {
78
+ uint32_t res;
79
+ const uint32_t a = reinterpret_cast<const uint32_t&>(x.x);
80
+ const uint32_t b = reinterpret_cast<const uint32_t&>(x.y);
81
+ asm volatile("cvt.rn.relu.bf16x2.f32 %0, %1, %2;\n" : "=r"(res) : "r"(b), "r"(a));
82
+ return res;
83
+ }
84
+
85
+ #endif
86
+
87
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
88
+
89
+ template<typename T>
90
+ struct MaxOp {
91
+ __device__ __forceinline__ T operator()(T const & x, T const & y) { return x > y ? x : y; }
92
+ };
93
+
94
+ template <>
95
+ struct MaxOp<float> {
96
+ // This is slightly faster
97
+ __device__ __forceinline__ float operator()(float const &x, float const &y) { return max(x, y); }
98
+ };
99
+
100
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
101
+
102
+ template<typename T>
103
+ struct SumOp {
104
+ __device__ __forceinline__ T operator()(T const & x, T const & y) { return x + y; }
105
+ };
106
+
107
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
108
+
109
+ template<int THREADS>
110
+ struct Allreduce {
111
+ static_assert(THREADS == 32 || THREADS == 16 || THREADS == 8 || THREADS == 4);
112
+ template<typename T, typename Operator>
113
+ static __device__ __forceinline__ T run(T x, Operator &op) {
114
+ constexpr int OFFSET = THREADS / 2;
115
+ x = op(x, __shfl_xor_sync(uint32_t(-1), x, OFFSET));
116
+ return Allreduce<OFFSET>::run(x, op);
117
+ }
118
+ };
119
+
120
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
121
+
122
+ template<>
123
+ struct Allreduce<2> {
124
+ template<typename T, typename Operator>
125
+ static __device__ __forceinline__ T run(T x, Operator &op) {
126
+ x = op(x, __shfl_xor_sync(uint32_t(-1), x, 1));
127
+ return x;
128
+ }
129
+ };
130
+
131
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
132
+
133
+ template<bool A_in_regs=false, bool B_in_regs=false, typename Tensor0, typename Tensor1,
134
+ typename Tensor2, typename Tensor3, typename Tensor4,
135
+ typename TiledMma, typename TiledCopyA, typename TiledCopyB,
136
+ typename ThrCopyA, typename ThrCopyB>
137
+ __forceinline__ __device__ void gemm(Tensor0 &acc, Tensor1 &tCrA, Tensor2 &tCrB, Tensor3 const& tCsA,
138
+ Tensor4 const& tCsB, TiledMma tiled_mma,
139
+ TiledCopyA smem_tiled_copy_A, TiledCopyB smem_tiled_copy_B,
140
+ ThrCopyA smem_thr_copy_A, ThrCopyB smem_thr_copy_B) {
141
+ CUTE_STATIC_ASSERT_V(size<1>(tCrA) == size<1>(acc)); // MMA_M
142
+ CUTE_STATIC_ASSERT_V(size<1>(tCrB) == size<2>(acc)); // MMA_N
143
+ CUTE_STATIC_ASSERT_V(size<2>(tCrA) == size<2>(tCrB)); // MMA_K
144
+ Tensor tCrA_copy_view = smem_thr_copy_A.retile_D(tCrA);
145
+ CUTE_STATIC_ASSERT_V(size<1>(tCsA) == size<1>(tCrA_copy_view)); // M
146
+ Tensor tCrB_copy_view = smem_thr_copy_B.retile_D(tCrB);
147
+ CUTE_STATIC_ASSERT_V(size<1>(tCsB) == size<1>(tCrB_copy_view)); // N
148
+ if (!A_in_regs) { cute::copy(smem_tiled_copy_A, tCsA(_, _, _0{}), tCrA_copy_view(_, _, _0{})); }
149
+ if (!B_in_regs) { cute::copy(smem_tiled_copy_B, tCsB(_, _, _0{}), tCrB_copy_view(_, _, _0{})); }
150
+ #pragma unroll
151
+ for (int i = 0; i < size<2>(tCrA); ++i) {
152
+ if (i < size<2>(tCrA) - 1) {
153
+ if (!A_in_regs) { cute::copy(smem_tiled_copy_A, tCsA(_, _, i + 1), tCrA_copy_view(_, _, i + 1)); }
154
+ if (!B_in_regs) { cute::copy(smem_tiled_copy_B, tCsB(_, _, i + 1), tCrB_copy_view(_, _, i + 1)); }
155
+ }
156
+ cute::gemm(tiled_mma, tCrA(_, _, i), tCrB(_, _, i), acc);
157
+ }
158
+ }
159
+
160
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
161
+
162
+ template<typename Tensor0, typename Tensor1, typename Tensor2, typename Tensor3,
163
+ typename TiledMma, typename TiledCopy, typename ThrCopy>
164
+ __forceinline__ __device__ void gemm_rs(Tensor0 &acc, Tensor1 &tCrA, Tensor2 &tCrB, Tensor3 const& tCsB,
165
+ TiledMma tiled_mma, TiledCopy smem_tiled_copy_B,
166
+ ThrCopy smem_thr_copy_B) {
167
+ CUTE_STATIC_ASSERT_V(size<1>(tCrA) == size<1>(acc)); // MMA_M
168
+ CUTE_STATIC_ASSERT_V(size<1>(tCrB) == size<2>(acc)); // MMA_N
169
+ CUTE_STATIC_ASSERT_V(size<2>(tCrA) == size<2>(tCrB)); // MMA_K
170
+ Tensor tCrB_copy_view = smem_thr_copy_B.retile_D(tCrB);
171
+ CUTE_STATIC_ASSERT_V(size<1>(tCsB) == size<1>(tCrB_copy_view)); // N
172
+ cute::copy(smem_tiled_copy_B, tCsB(_, _, _0{}), tCrB_copy_view(_, _, _0{}));
173
+ #pragma unroll
174
+ for (int i = 0; i < size<2>(tCrA); ++i) {
175
+ if (i < size<2>(tCrA) - 1) {
176
+ cute::copy(smem_tiled_copy_B, tCsB(_, _, i + 1), tCrB_copy_view(_, _, i + 1));
177
+ }
178
+ cute::gemm(tiled_mma, tCrA(_, _, i), tCrB(_, _, i), acc);
179
+ }
180
+ }
181
+
182
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
183
+
184
+ // Convert acc_layout from (MMA=4, MMA_M, MMA_N) to (nrow=(2, MMA_M), ncol=(2, MMA_N))
185
+ template<typename Layout>
186
+ __forceinline__ __device__ auto convert_layout_acc_rowcol(Layout acc_layout) {
187
+ static_assert(decltype(size<0>(acc_layout))::value == 4);
188
+ static_assert(decltype(rank(acc_layout))::value == 3);
189
+ auto l = logical_divide(acc_layout, Shape<_2>{}); // ((2, 2), MMA_M, MMA_N)
190
+ return make_layout(make_layout(get<0, 1>(l), get<1>(l)), make_layout(get<0, 0>(l), get<2>(l)));
191
+ };
192
+
193
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
194
+
195
+ // Convert acc_layout from (MMA=4, MMA_M, MMA_N) to ((4, 2), MMA_M, MMA_N / 2)
196
+ // if using m16n8k16, or to (4, MMA_M, MMA_N) if using m16n8k8.
197
+ template<typename MMA_traits, typename Layout>
198
+ __forceinline__ __device__ auto convert_layout_acc_Aregs(Layout acc_layout) {
199
+ using X = Underscore;
200
+ static_assert(decltype(size<0>(acc_layout))::value == 4);
201
+ static_assert(decltype(rank(acc_layout))::value == 3);
202
+ constexpr int mma_shape_K = get<2>(typename MMA_traits::Shape_MNK{});
203
+ static_assert(mma_shape_K == 8 || mma_shape_K == 16);
204
+ if constexpr (mma_shape_K == 8) {
205
+ return acc_layout;
206
+ } else {
207
+ auto l = logical_divide(acc_layout, Shape<X, X, _2>{}); // (4, MMA_M, (2, MMA_N / 2)))
208
+ return make_layout(make_layout(get<0>(l), get<2, 0>(l)), get<1>(l), get<2, 1>(l));
209
+ }
210
+ };
211
+
212
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
213
+
214
+ // Convert acc_layout from (MMA=4, MMA_M, MMA_N) to ((4, 2), MMA_M, MMA_N / 2)
215
+ template<typename Layout>
216
+ __forceinline__ __device__ auto convert_layout_acc_dropout(Layout acc_layout) {
217
+ using X = Underscore;
218
+ static_assert(decltype(size<0>(acc_layout))::value == 4);
219
+ static_assert(decltype(rank(acc_layout))::value == 3);
220
+ auto l = logical_divide(acc_layout, Shape<X, X, _2>{}); // (4, MMA_M, (2, MMA_N / 2)))
221
+ return make_layout(make_layout(get<0>(l), get<2, 0>(l)), get<1>(l), get<2, 1>(l));
222
+ };
223
+
224
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
225
+
226
+ template <typename To_type, typename Engine, typename Layout>
227
+ __forceinline__ __device__ auto convert_type(Tensor<Engine, Layout> const &tensor) {
228
+ using From_type = typename Engine::value_type;
229
+ constexpr int numel = decltype(size(tensor))::value;
230
+ cutlass::NumericArrayConverter<To_type, From_type, numel> convert_op;
231
+ // HACK: this requires tensor to be "contiguous"
232
+ auto frag = convert_op(*reinterpret_cast<const cutlass::Array<From_type, numel> *>(tensor.data()));
233
+ return make_tensor(make_rmem_ptr<To_type>(&frag), tensor.layout());
234
+ }
235
+
236
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
237
+
238
+ template <typename Engine, typename Layout>
239
+ __forceinline__ __device__ void relu_(Tensor<Engine, Layout> &tensor) {
240
+ constexpr int numel = decltype(size(tensor))::value;
241
+ static_assert(numel % 2 == 0);
242
+ using value_t = typename Engine::value_type;
243
+ // HACK: this requires tensor to be "contiguous"
244
+ Tensor tensor_uint32 = recast<uint32_t>(tensor);
245
+ #pragma unroll
246
+ for (int i = 0; i < size(tensor_uint32); ++i) {
247
+ tensor_uint32(i) = relu2<value_t>(tensor_uint32(i));
248
+ }
249
+ }
250
+
251
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
252
+
253
+ // On SM80 and above, we can fuse fp32 -> fp16/bf16 conversion and relu into 1 instruction
254
+ template <typename To_type, typename Engine, typename Layout>
255
+ __forceinline__ __device__ auto convert_type_relu(Tensor<Engine, Layout> const &tensor) {
256
+ using From_type = typename Engine::value_type;
257
+ static_assert(std::is_same_v<To_type, cutlass::half_t> || std::is_same_v<To_type, cutlass::bfloat16_t>);
258
+ static_assert(std::is_same_v<float, From_type>);
259
+ constexpr int numel = decltype(size(tensor))::value;
260
+ static_assert(numel % 2 == 0);
261
+ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800
262
+ // HACK: this requires tensor to be "contiguous"
263
+ Tensor tensor_float2 = recast<float2>(tensor);
264
+ Tensor out_uint32 = make_tensor<uint32_t>(tensor_float2.layout());
265
+ #pragma unroll
266
+ for (int i = 0; i < size(out_uint32); ++i) {
267
+ out_uint32(i) = convert_relu2<To_type>(tensor_float2(i));
268
+ }
269
+ Tensor out = make_tensor(make_rmem_ptr<To_type>(out_uint32.data()), tensor.layout());
270
+ #else
271
+ Tensor out = flash::convert_type<To_type>(tensor);
272
+ flash::relu_(out);
273
+ #endif
274
+ return out;
275
+ }
276
+
277
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
278
+
279
+ // Blocks until all but N previous cp.async.commit_group operations have committed.
280
+ // This differs from cute::cp_async_wait in that when N = 0 we don't call cp.async.wait_all
281
+ // (which is equivalent to commit_group then wait_group 0).
282
+ // Instead we just call cp.async.wait_group 0, which is slightly faster.
283
+ // https://github.com/NVIDIA/cutlass/blob/master/include/cute/arch/copy_sm80.hpp#L113
284
+ template <int N>
285
+ CUTE_HOST_DEVICE
286
+ void cp_async_wait() {
287
+ #if defined(CUTE_ARCH_CP_ASYNC_SM80_ENABLED)
288
+ asm volatile("cp.async.wait_group %0;\n" :: "n"(N));
289
+ #endif
290
+ }
291
+
292
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
293
+
294
+ template <bool Is_even_MN=true, bool Is_even_K=true, bool Clear_OOB_MN=false, bool Clear_OOB_K=true,
295
+ typename TiledCopy, typename Engine0, typename Layout0, typename Engine1, typename Layout1,
296
+ typename Engine2, typename Layout2, typename Engine3, typename Layout3>
297
+ __forceinline__ __device__ void copy(TiledCopy tiled_copy, Tensor<Engine0, Layout0> const &S,
298
+ Tensor<Engine1, Layout1> &D, Tensor<Engine2, Layout2> const &identity_MN,
299
+ Tensor<Engine3, Layout3> const &predicate_K, const int max_MN=0) {
300
+ CUTE_STATIC_ASSERT_V(rank(S) == Int<3>{});
301
+ CUTE_STATIC_ASSERT_V(rank(D) == Int<3>{});
302
+ CUTE_STATIC_ASSERT_V(size<0>(S) == size<0>(D)); // MMA
303
+ CUTE_STATIC_ASSERT_V(size<1>(S) == size<1>(D)); // MMA_M
304
+ CUTE_STATIC_ASSERT_V(size<2>(S) == size<2>(D)); // MMA_K
305
+ // There's no case where !Clear_OOB_K && Clear_OOB_MN
306
+ static_assert(!(Clear_OOB_MN && !Clear_OOB_K));
307
+ #pragma unroll
308
+ for (int m = 0; m < size<1>(S); ++m) {
309
+ if (Is_even_MN || get<0>(identity_MN(0, m, 0)) < max_MN) {
310
+ #pragma unroll
311
+ for (int k = 0; k < size<2>(S); ++k) {
312
+ if (Is_even_K || predicate_K(k)) {
313
+ cute::copy(tiled_copy, S(_, m, k), D(_, m, k));
314
+ } else if (Clear_OOB_K) {
315
+ cute::clear(D(_, m, k));
316
+ }
317
+ }
318
+ } else if (Clear_OOB_MN) {
319
+ cute::clear(D(_, m, _));
320
+ }
321
+ }
322
+ // TD [2023-04-13]: Strange that the code below can cause race condition.
323
+ // I think it's because the copies are under an if statement.
324
+ // if (Is_even_K) {
325
+ // #pragma unroll
326
+ // for (int m = 0; m < size<1>(S); ++m) {
327
+ // if (Is_even_MN || get<0>(identity_MN(0, m, 0)) < max_MN) {
328
+ // copy(tiled_copy, S(_, m, _), D(_, m, _));
329
+ // } else if (Clear_OOB_MN) {
330
+ // clear(D(_, m, _));
331
+ // }
332
+ // }
333
+ // } else { // It's slightly faster in this case if iterate over K first
334
+ // #pragma unroll
335
+ // for (int k = 0; k < size<2>(S); ++k) {
336
+ // if (predicate_K(k)) {
337
+ // #pragma unroll
338
+ // for (int m = 0; m < size<1>(S); ++m) {
339
+ // if (Is_even_MN || get<0>(identity_MN(0, m, 0)) < max_MN) {
340
+ // copy(tiled_copy, S(_, m, k), D(_, m, k));
341
+ // } else if (Clear_OOB_MN) {
342
+ // clear(D(_, m, k));
343
+ // }
344
+ // }
345
+ // } else if (Clear_OOB_K) { // There's no case where !Clear_OOB_K && Clear_OOB_MN
346
+ // if (Clear_OOB_MN || Is_even_MN) {
347
+ // clear(D(_, _, k));
348
+ // } else {
349
+ // #pragma unroll
350
+ // for (int m = 0; m < size<1>(S); ++m) {
351
+ // if (!(Is_even_MN || get<0>(identity_MN(0, m, 0)) < max_MN)) {
352
+ // clear(D(_, m, k));
353
+ // }
354
+ // }
355
+ // }
356
+ // }
357
+ // }
358
+ // }
359
+ }
360
+
361
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
362
+
363
+ template <bool Is_even_K=true,
364
+ typename Engine0, typename Layout0, typename Engine1, typename Layout1,
365
+ typename Engine2, typename Layout2, typename Engine3, typename Layout3>
366
+ __forceinline__ __device__ void copy_w_min_idx(Tensor<Engine0, Layout0> const &S,
367
+ Tensor<Engine1, Layout1> &D, Tensor<Engine2, Layout2> const &identity_MN,
368
+ Tensor<Engine3, Layout3> const &predicate_K,
369
+ const int max_MN=0, const int min_MN=0) {
370
+ CUTE_STATIC_ASSERT_V(rank(S) == Int<3>{});
371
+ CUTE_STATIC_ASSERT_V(rank(D) == Int<3>{});
372
+ CUTE_STATIC_ASSERT_V(size<0>(S) == size<0>(D)); // MMA
373
+ CUTE_STATIC_ASSERT_V(size<1>(S) == size<1>(D)); // MMA_M
374
+ CUTE_STATIC_ASSERT_V(size<2>(S) == size<2>(D)); // MMA_K
375
+ // if (threadIdx.x == 0 && blockIdx.z == 0) { printf("blockIdx.y = %d, max_MN = %d, min_MN = %d\n", blockIdx.y, max_MN, min_MN); }
376
+ #pragma unroll
377
+ for (int m = 0; m < size<1>(S); ++m) {
378
+ // if (threadIdx.x == 0 && blockIdx.z == 0) { printf("blockIdx.y = %d, m = %d\n", blockIdx.y, get<0>(identity_MN(0, m, 0))); }
379
+ if (get<0>(identity_MN(0, m, 0)) >= min_MN && get<0>(identity_MN(0, m, 0)) < max_MN) {
380
+ // if (threadIdx.x == 0 && blockIdx.z == 0) { printf("Inner loop, blockIdx.y = %d, m = %d\n", blockIdx.y, get<0>(identity_MN(0, m, 0))); }
381
+ #pragma unroll
382
+ for (int k = 0; k < size<2>(S); ++k) {
383
+ if (Is_even_K || predicate_K(k)) {
384
+ cute::copy(S(_, m, k), D(_, m, k));
385
+ }
386
+ }
387
+ }
388
+ }
389
+ }
390
+
391
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
392
+
393
+ } // namespace flash
flash-attention/csrc/ft_attention/README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Attention kernel from FasterTransformer
2
+
3
+ This CUDA extension wraps the single-query attention [kernel](https://github.com/NVIDIA/FasterTransformer/blob/release/v5.2.1_tag/src/fastertransformer/kernels/decoder_masked_multihead_attention/decoder_masked_multihead_attention_template.hpp) from
4
+ FasterTransformer v5.2.1 for benchmarking purpose.
5
+
6
+ ```sh
7
+ cd csrc/ft_attention && pip install .
8
+ ```
9
+
10
+ As of 2023-09-17, this extension is no longer used in the FlashAttention repo.
11
+ FlashAttention now has implemented
12
+ [`flash_attn_with_kvcache`](https://github.com/Dao-AILab/flash-attention/blob/main/flash_attn/flash_attention_interface.py)
13
+ with all the features of this `ft_attention` kernel (and more).
14
+
flash-attention/csrc/ft_attention/cuda_bf16_fallbacks.cuh ADDED
@@ -0,0 +1,257 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Downloaded from from FasterTransformer v5.2.1
2
+ // https://github.com/NVIDIA/FasterTransformer/blob/release/v5.2.1_tag/src/fastertransformer/utils/cuda_bf16_fallbacks.cuh
3
+ /*
4
+ * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
5
+ *
6
+ * Licensed under the Apache License, Version 2.0 (the "License");
7
+ * you may not use this file except in compliance with the License.
8
+ * You may obtain a copy of the License at
9
+ *
10
+ * http://www.apache.org/licenses/LICENSE-2.0
11
+ *
12
+ * Unless required by applicable law or agreed to in writing, software
13
+ * distributed under the License is distributed on an "AS IS" BASIS,
14
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ * See the License for the specific language governing permissions and
16
+ * limitations under the License.
17
+ */
18
+
19
+ #pragma once
20
+
21
+ #include "cuda_bf16_wrapper.h"
22
+ #include <cuda_fp16.h>
23
+
24
+ namespace fastertransformer {
25
+
26
+ #ifdef ENABLE_BF16
27
+ inline __device__ float2 bf1622float2(const __nv_bfloat162 val) {
28
+ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 800
29
+ float2 f_val;
30
+ f_val.x = __low2float(val);
31
+ f_val.y = __high2float(val);
32
+ return f_val;
33
+ #else
34
+ return __bfloat1622float2(val);
35
+ #endif
36
+ }
37
+
38
+ inline __device__ int16_t bf1622int16(__nv_bfloat162 val) {
39
+ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 800
40
+ float2 f_val;
41
+ f_val.x = max(min(__low2float(val), 127.f), -128.f);
42
+ f_val.y = max(min(__high2float(val), 127.f), -128.f);
43
+ union { int8_t int8[2]; int16_t int16; };
44
+ int8[0] = static_cast<int8_t>(static_cast<short>(f_val.x));
45
+ int8[1] = static_cast<int8_t>(static_cast<short>(f_val.y));
46
+ return int16;
47
+ #else
48
+ val = __hmin2(val, make_bfloat162(127., 127.));
49
+ val = __hmax2(val, make_bfloat162(-128., -128.));
50
+ union { int8_t int8[2]; int16_t int16; };
51
+ int8[0] = static_cast<int8_t>(static_cast<short>(val.x));
52
+ int8[1] = static_cast<int8_t>(static_cast<short>(val.y));
53
+ return int16;
54
+ #endif
55
+ }
56
+
57
+ inline __device__ __nv_bfloat162 float22bf162(const float2 val) {
58
+ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 800
59
+ return __floats2bfloat162_rn(val.x, val.y);
60
+ #else
61
+ return __float22bfloat162_rn(val);
62
+ #endif
63
+ }
64
+
65
+ inline __device__ __nv_bfloat162 bf162bf162(const __nv_bfloat16 val) {
66
+ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 800
67
+ __nv_bfloat162 val2;
68
+ val2.x = val;
69
+ val2.y = val;
70
+ return val2;
71
+ #else
72
+ return __bfloat162bfloat162(val);
73
+ #endif
74
+ }
75
+
76
+ inline __device__ __nv_bfloat162 bf16hadd2(const __nv_bfloat162 x, const __nv_bfloat162 y) {
77
+ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 800
78
+ float fxl, fxh, fyl, fyh;
79
+ fxl = __low2float(x);
80
+ fxh = __high2float(x);
81
+ fyl = __low2float(y);
82
+ fyh = __high2float(y);
83
+ return __floats2bfloat162_rn(fxl + fyl, fxh + fyh);
84
+ #else
85
+ return __hadd2(x, y);
86
+ #endif
87
+ }
88
+
89
+ inline __device__ __nv_bfloat16 bf16hadd(const __nv_bfloat16 x, const __nv_bfloat16 y) {
90
+ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 800
91
+ return __float2bfloat16( __bfloat162float(x) + __bfloat162float(y) );
92
+ #else
93
+ return __hadd(x, y);
94
+ #endif
95
+ }
96
+
97
+ inline __device__ __nv_bfloat162 bf16hsub2(const __nv_bfloat162 x, const __nv_bfloat162 y) {
98
+ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 800
99
+ float fxl, fxh, fyl, fyh;
100
+ fxl = __low2float(x);
101
+ fxh = __high2float(x);
102
+ fyl = __low2float(y);
103
+ fyh = __high2float(y);
104
+ return __floats2bfloat162_rn(fxl - fyl, fxh - fyh);
105
+ #else
106
+ return __hsub2(x, y);
107
+ #endif
108
+ }
109
+
110
+ inline __device__ __nv_bfloat16 bf16hsub(const __nv_bfloat16 x, const __nv_bfloat16 y) {
111
+ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 800
112
+ return __float2bfloat16( __bfloat162float(x) - __bfloat162float(y) );
113
+ #else
114
+ return __hsub(x, y);
115
+ #endif
116
+ }
117
+
118
+ inline __device__ __nv_bfloat162 bf16hmul2(const __nv_bfloat162 x, const __nv_bfloat162 y) {
119
+ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 800
120
+ float fxl, fxh, fyl, fyh;
121
+ fxl = __low2float(x);
122
+ fxh = __high2float(x);
123
+ fyl = __low2float(y);
124
+ fyh = __high2float(y);
125
+ return __floats2bfloat162_rn(fxl * fyl, fxh * fyh);
126
+ #else
127
+ return __hmul2(x, y);
128
+ #endif
129
+ }
130
+
131
+ inline __device__ __nv_bfloat16 bf16hmul(const __nv_bfloat16 x, const __nv_bfloat16 y) {
132
+ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 800
133
+ return __float2bfloat16( __bfloat162float(x) * __bfloat162float(y) );
134
+ #else
135
+ return __hmul(x, y);
136
+ #endif
137
+ }
138
+
139
+ inline __device__ __nv_bfloat162 bf16hfma2(const __nv_bfloat162 x, const __nv_bfloat162 y, const __nv_bfloat162 z) {
140
+ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 800
141
+ float fxl, fxh, fyl, fyh, fzl, fzh;
142
+ fxl = __low2float(x);
143
+ fxh = __high2float(x);
144
+ fyl = __low2float(y);
145
+ fyh = __high2float(y);
146
+ fzl = __low2float(z);
147
+ fzh = __high2float(z);
148
+ return __floats2bfloat162_rn(fxl * fyl + fzl, fxh * fyh + fzh);
149
+ #else
150
+ return __hfma2(x, y, z);
151
+ #endif
152
+ }
153
+
154
+ inline __device__ __nv_bfloat16 bf16hfma(const __nv_bfloat16 x, const __nv_bfloat16 y, const __nv_bfloat16 z) {
155
+ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 800
156
+ return __float2bfloat16( __bfloat162float(x) * __bfloat162float(y) + __bfloat162float(z));
157
+ #else
158
+ return __hfma(x, y, z);
159
+ #endif
160
+ }
161
+
162
+ inline __device__ __nv_bfloat162 bf16exp2(const __nv_bfloat162 x) {
163
+ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 800
164
+ float fxl, fxh;
165
+ fxl = __low2float(x);
166
+ fxh = __high2float(x);;
167
+ return __floats2bfloat162_rn(expf(fxl), expf(fxh));
168
+ #else
169
+ return h2exp(x);
170
+ #endif
171
+ }
172
+
173
+ #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 800)
174
+ inline __device__ __nv_bfloat162 operator*(const __nv_bfloat162 x, const __nv_bfloat162 y) { return bf16hmul2(x, y); };
175
+ inline __device__ __nv_bfloat162 operator+(const __nv_bfloat162 x, const __nv_bfloat162 y) { return bf16hadd2(x, y); };
176
+
177
+ inline __device__ __nv_bfloat162 make_bfloat162(const __nv_bfloat16 x, const __nv_bfloat16 y)
178
+ {
179
+ __nv_bfloat162 t; t.x = x; t.y = y; return t;
180
+ }
181
+
182
+ #endif
183
+
184
+ inline __device__ __nv_bfloat16 bf16hadd(__nv_bfloat16 a, __nv_bfloat16 b, __nv_bfloat16 c) {
185
+ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 800
186
+ return __float2bfloat16(__bfloat162float(a) + __bfloat162float(b) + __bfloat162float(c));
187
+ #else
188
+ return a + b + c;
189
+ #endif
190
+ }
191
+
192
+ inline __device__ __nv_bfloat16 bf16hadd(__nv_bfloat16 a, __nv_bfloat16 b, __nv_bfloat16 c, __nv_bfloat16 d) {
193
+ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 800
194
+ return __float2bfloat16(__bfloat162float(a) + __bfloat162float(b) + __bfloat162float(c) + __bfloat162float(d));
195
+ #else
196
+ return (__nv_bfloat16)((float)a + (float)b + (float)c + (float)d);
197
+ #endif
198
+ }
199
+
200
+ inline __device__ __nv_bfloat162 bf16hadd2(__nv_bfloat162 a, __nv_bfloat162 b, __nv_bfloat162 c) {
201
+ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 800
202
+ float fal, fah, fbl, fbh, fcl, fch;
203
+ fal = __low2float(a);
204
+ fah = __high2float(a);
205
+ fbl = __low2float(b);
206
+ fbh = __high2float(b);
207
+ fcl = __low2float(c);
208
+ fch = __high2float(c);
209
+ return __floats2bfloat162_rn(fal + fbl + fcl, fah + fbh + fch);
210
+ #else
211
+ return a + b + c;
212
+ #endif
213
+ }
214
+
215
+ inline __device__ __nv_bfloat16 bf16hmul(__nv_bfloat16 a, __nv_bfloat16 b, __nv_bfloat16 c) {
216
+ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 800
217
+ return __float2bfloat16(__bfloat162float(a) * __bfloat162float(b) * __bfloat162float(c));
218
+ #else
219
+ return a * b * c;
220
+ #endif
221
+ }
222
+
223
+ inline __device__ __nv_bfloat162 bf16hmul2(__nv_bfloat162 a, __nv_bfloat162 b, __nv_bfloat162 c) {
224
+ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 800
225
+ float fal, fah, fbl, fbh, fcl, fch;
226
+ fal = __low2float(a);
227
+ fah = __high2float(a);
228
+ fbl = __low2float(b);
229
+ fbh = __high2float(b);
230
+ fcl = __low2float(c);
231
+ fch = __high2float(c);
232
+ return __floats2bfloat162_rn(fal * fbl * fcl, fah * fbh * fch);
233
+ #else
234
+ return a * b * c;
235
+ #endif
236
+ }
237
+
238
+ inline __device__ __nv_bfloat162 bf16hfma2(__nv_bfloat162 a, __nv_bfloat162 b, __nv_bfloat162 c, __nv_bfloat162 d) {
239
+ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 800
240
+ float fal, fah, fbl, fbh, fcl, fch, fdl, fdh;
241
+ fal = __low2float(a);
242
+ fah = __high2float(a);
243
+ fbl = __low2float(b);
244
+ fbh = __high2float(b);
245
+ fcl = __low2float(c);
246
+ fch = __high2float(c);
247
+ fdl = __low2float(d);
248
+ fdh = __high2float(d);
249
+ return __floats2bfloat162_rn(fal * fbl * fcl + fdl, fah * fbh * fch + fdh);
250
+ #else
251
+ return a * b * c + d;
252
+ #endif
253
+ }
254
+
255
+ #endif // ENABLE_BF16
256
+
257
+ } // namespace fastertransformer
flash-attention/csrc/ft_attention/cuda_bf16_wrapper.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Downloaded from from FasterTransformer v5.2.1
2
+ // https://github.com/NVIDIA/FasterTransformer/blob/release/v5.2.1_tag/src/fastertransformer/utils/cuda_bf16_wrapper.h
3
+ /*
4
+ * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
5
+ *
6
+ * Licensed under the Apache License, Version 2.0 (the "License");
7
+ * you may not use this file except in compliance with the License.
8
+ * You may obtain a copy of the License at
9
+ *
10
+ * http://www.apache.org/licenses/LICENSE-2.0
11
+ *
12
+ * Unless required by applicable law or agreed to in writing, software
13
+ * distributed under the License is distributed on an "AS IS" BASIS,
14
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ * See the License for the specific language governing permissions and
16
+ * limitations under the License.
17
+ */
18
+
19
+ #pragma once
20
+
21
+ #ifdef ENABLE_BF16
22
+ #include <cuda_bf16.h>
23
+ #endif
flash-attention/csrc/ft_attention/decoder_masked_multihead_attention.cu ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Adapted from from FasterTransformer v5.2.1
2
+ // https://github.com/NVIDIA/FasterTransformer/blob/release/v5.2.1_tag/src/fastertransformer/kernels/decoder_masked_multihead_attention/decoder_masked_multihead_attention_128.cu
3
+ /*
4
+ * Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
5
+ *
6
+ * Licensed under the Apache License, Version 2.0 (the "License");
7
+ * you may not use this file except in compliance with the License.
8
+ * You may obtain a copy of the License at
9
+ *
10
+ * http://www.apache.org/licenses/LICENSE-2.0
11
+ *
12
+ * Unless required by applicable law or agreed to in writing, software
13
+ * distributed under the License is distributed on an "AS IS" BASIS,
14
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ * See the License for the specific language governing permissions and
16
+ * limitations under the License.
17
+ */
18
+
19
+ #include "decoder_masked_multihead_attention.h"
20
+ #include "decoder_masked_multihead_attention_utils.h"
21
+ #include "cuda_bf16_wrapper.h"
22
+ #include <assert.h>
23
+ #include <float.h>
24
+ #include <type_traits>
25
+
26
+ #include "decoder_masked_multihead_attention_template.hpp"
27
+
28
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
29
+
30
+ #define MMHA_LAUNCH_KERNEL(T, Dh, Dh_MAX, THDS_PER_KEY, THDS_PER_VALUE, THDS_PER_BLOCK, DO_CROSS_ATTENTION, stream) \
31
+ size_t smem_sz = mmha::smem_size_in_bytes<T, DO_CROSS_ATTENTION>(params, THDS_PER_VALUE, THDS_PER_BLOCK); \
32
+ auto kernel = mmha::masked_multihead_attention_kernel<T, Dh, Dh_MAX, THDS_PER_KEY, THDS_PER_VALUE, \
33
+ THDS_PER_BLOCK, DO_CROSS_ATTENTION>; \
34
+ cudaFuncSetAttribute(kernel, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_sz); \
35
+ dim3 grid(params.nnz_head_idx == nullptr ? params.num_heads : params.nnz_heads, params.batch_size); \
36
+ kernel<<<grid, THDS_PER_BLOCK, smem_sz, stream>>>(params)
37
+
38
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
39
+
40
+ // !!! Specialize the launcher for Cross attention
41
+ template<typename T, int Dh, int Dh_MAX, typename KERNEL_PARAMS_TYPE>
42
+ void mmha_launch_kernel(const KERNEL_PARAMS_TYPE& params, const cudaStream_t& stream)
43
+ {
44
+ constexpr int THREADS_PER_VALUE = Dh_MAX * sizeof(T) / 16;
45
+ constexpr bool DO_CROSS_ATTENTION = std::is_same<KERNEL_PARAMS_TYPE, Cross_multihead_attention_params<T>>::value;
46
+ int tlength = (DO_CROSS_ATTENTION) ? params.memory_max_len : params.timestep;
47
+ // printf("tlength, CROSS_ATTENTION = %d, %d\n", tlength, DO_CROSS_ATTENTION);
48
+ if (tlength < 32) {
49
+ MMHA_LAUNCH_KERNEL(T, Dh, Dh_MAX, 4, THREADS_PER_VALUE, 64, DO_CROSS_ATTENTION, stream);
50
+ }
51
+ else if (tlength < 2048) {
52
+ MMHA_LAUNCH_KERNEL(T, Dh, Dh_MAX, 2, THREADS_PER_VALUE, 128, DO_CROSS_ATTENTION, stream);
53
+ }
54
+ else {
55
+ MMHA_LAUNCH_KERNEL(T, Dh, Dh_MAX, 1, THREADS_PER_VALUE, 256, DO_CROSS_ATTENTION, stream);
56
+ }
57
+ }
58
+
59
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
60
+
61
+ #undef MMHA_LAUNCH_KERNEL
62
+
63
+ template<typename T, typename KERNEL_PARAMS_TYPE>
64
+ void multihead_attention_(const KERNEL_PARAMS_TYPE& params, const cudaStream_t& stream)
65
+ {
66
+ switch (params.hidden_size_per_head) {
67
+ case 32:
68
+ mmha_launch_kernel<T, 32, 32, KERNEL_PARAMS_TYPE>(params, stream);
69
+ break;
70
+ case 48:
71
+ mmha_launch_kernel<T, 48, 64, KERNEL_PARAMS_TYPE>(params, stream);
72
+ break;
73
+ case 64:
74
+ mmha_launch_kernel<T, 64, 64, KERNEL_PARAMS_TYPE>(params, stream);
75
+ break;
76
+ case 80:
77
+ mmha_launch_kernel<T, 80, 128, KERNEL_PARAMS_TYPE>(params, stream);
78
+ break;
79
+ case 96:
80
+ mmha_launch_kernel<T, 96, 128, KERNEL_PARAMS_TYPE>(params, stream);
81
+ break;
82
+ case 128:
83
+ mmha_launch_kernel<T, 128, 128, KERNEL_PARAMS_TYPE>(params, stream);
84
+ break;
85
+ case 160:
86
+ mmha_launch_kernel<T, 160, 256, KERNEL_PARAMS_TYPE>(params, stream);
87
+ break;
88
+ case 192:
89
+ mmha_launch_kernel<T, 192, 256, KERNEL_PARAMS_TYPE>(params, stream);
90
+ break;
91
+ case 224:
92
+ mmha_launch_kernel<T, 224, 256, KERNEL_PARAMS_TYPE>(params, stream);
93
+ break;
94
+ case 256:
95
+ mmha_launch_kernel<T, 256, 256, KERNEL_PARAMS_TYPE>(params, stream);
96
+ break;
97
+ default:
98
+ assert(false);
99
+ }
100
+ }
101
+
102
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
103
+
104
+ void masked_multihead_attention(const Masked_multihead_attention_params<float>& params, const cudaStream_t& stream)
105
+ {
106
+ multihead_attention_<float, Masked_multihead_attention_params<float>>(params, stream);
107
+ }
108
+
109
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
110
+
111
+ void masked_multihead_attention(const Masked_multihead_attention_params<uint16_t>& params, const cudaStream_t& stream)
112
+ {
113
+ multihead_attention_<uint16_t, Masked_multihead_attention_params<uint16_t>>(params, stream);
114
+ }
115
+
116
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
117
+
118
+ #ifdef ENABLE_BF16
119
+ void masked_multihead_attention(const Masked_multihead_attention_params<__nv_bfloat16>& params,
120
+ const cudaStream_t& stream)
121
+ {
122
+ multihead_attention_<__nv_bfloat16, Masked_multihead_attention_params<__nv_bfloat16>>(params, stream);
123
+ }
124
+ #endif
125
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
126
+
127
+ void cross_multihead_attention(const Cross_multihead_attention_params<float>& params, const cudaStream_t& stream)
128
+ {
129
+ multihead_attention_<float, Cross_multihead_attention_params<float>>(params, stream);
130
+ }
131
+
132
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
133
+
134
+ void cross_multihead_attention(const Cross_multihead_attention_params<uint16_t>& params, const cudaStream_t& stream)
135
+ {
136
+ multihead_attention_<uint16_t, Cross_multihead_attention_params<uint16_t>>(params, stream);
137
+ }
138
+
139
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
140
+
141
+ #ifdef ENABLE_BF16
142
+ void cross_multihead_attention(const Cross_multihead_attention_params<__nv_bfloat16>& params,
143
+ const cudaStream_t& stream)
144
+ {
145
+ multihead_attention_<__nv_bfloat16, Cross_multihead_attention_params<__nv_bfloat16>>(params, stream);
146
+ }
147
+ #endif
148
+
149
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
flash-attention/csrc/ft_attention/decoder_masked_multihead_attention.h ADDED
@@ -0,0 +1,192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Downloaded from from FasterTransformer v5.2.1
2
+ // https://github.com/NVIDIA/FasterTransformer/blob/release/v5.2.1_tag/src/fastertransformer/kernels/decoder_masked_multihead_attention.h
3
+ /*
4
+ * Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
5
+ *
6
+ * Licensed under the Apache License, Version 2.0 (the "License");
7
+ * you may not use this file except in compliance with the License.
8
+ * You may obtain a copy of the License at
9
+ *
10
+ * http://www.apache.org/licenses/LICENSE-2.0
11
+ *
12
+ * Unless required by applicable law or agreed to in writing, software
13
+ * distributed under the License is distributed on an "AS IS" BASIS,
14
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ * See the License for the specific language governing permissions and
16
+ * limitations under the License.
17
+ */
18
+
19
+ #pragma once
20
+
21
+ #include "cuda_bf16_wrapper.h"
22
+ #include <cuda_fp16.h>
23
+ #include <cuda_runtime_api.h>
24
+ #include <stdint.h>
25
+ #include <stdio.h>
26
+ #include <stdlib.h>
27
+
28
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
29
+
30
+ #define CHECK_CUDA(call) \
31
+ do { \
32
+ cudaError_t status_ = call; \
33
+ if (status_ != cudaSuccess) { \
34
+ fprintf(stderr, "CUDA error (%s:%d): %s\n", __FILE__, __LINE__, cudaGetErrorString(status_)); \
35
+ exit(1); \
36
+ } \
37
+ } while (0)
38
+
39
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
40
+
41
+ // The structure of parameters for the masked multihead attention kernel.
42
+ //
43
+ // We use the following terminology to describe the different dimensions.
44
+ //
45
+ // B: Batch size (number of sequences),
46
+ // L: Sequence length,
47
+ // D: Hidden dimension,
48
+ // H: Number of heads,
49
+ // Dh: Hidden dimension per head - Dh = D / H.
50
+
51
+ template<typename T>
52
+ struct Multihead_attention_params_base {
53
+
54
+ // The output buffer. Dimensions B x D.
55
+ T* out = nullptr;
56
+
57
+ // The input Qs and the associated bias. Dimensions B x D and D, resp.
58
+ const T *q = nullptr, *q_bias = nullptr;
59
+ // The input Ks and the associated bias. Dimensions B x D and D, resp.
60
+ const T *k = nullptr, *k_bias = nullptr;
61
+ // The input Vs and the associated bias. Dimensions B x D and D, resp.
62
+ const T *v = nullptr, *v_bias = nullptr;
63
+
64
+ // The cache for the Ks. The size must be at least B x L x D.
65
+ T* k_cache = nullptr;
66
+ // The cache for the Vs. The size must be at least B x L x D.
67
+ T* v_cache = nullptr;
68
+ // The indirections to use for cache when beam sampling.
69
+ const int* cache_indir = nullptr;
70
+
71
+ // Stride to handle the case when KQV is a single buffer
72
+ int stride_q = 0;
73
+ int stride_k = 0;
74
+ int stride_v = 0;
75
+
76
+ // The batch size.
77
+ int batch_size = 0;
78
+ // The beam width
79
+ int beam_width = 0;
80
+ // The sequence length.
81
+ int memory_max_len = 0;
82
+ // The number of heads (H).
83
+ int num_heads = 0;
84
+ int num_heads_kv = 0;
85
+ int num_heads_q_kv_ratio = 0;
86
+ // The hidden dimension per head (Dh).
87
+ int hidden_size_per_head = 0;
88
+ // The per-head latent space reserved for rotary embeddings.
89
+ int rotary_embedding_dim = 0;
90
+ bool neox_rotary_style = false;
91
+ float rotary_base = 0.0f;
92
+ // The maximum length of input sentences.
93
+ int max_input_length = 0;
94
+ // The current timestep. TODO(bhsueh) Check that do we only this param in cross attention?
95
+ int timestep = 0;
96
+ // The current timestep of each sentences (support different timestep for different sentences)
97
+
98
+ // The 1.f / sqrt(Dh). Computed on the host.
99
+ float inv_sqrt_dh = 0.0f;
100
+
101
+ // Used when we have some input context like gpt
102
+ const int* total_padding_tokens = nullptr;
103
+
104
+ const bool* masked_tokens = nullptr;
105
+ const int* prefix_prompt_lengths = nullptr;
106
+ int max_prefix_prompt_length = 0;
107
+
108
+ const T* relative_attention_bias = nullptr;
109
+ int relative_attention_bias_stride = 0;
110
+ // The slope per head of linear position bias to attention score (H).
111
+ const T* linear_bias_slopes = nullptr;
112
+
113
+ const T* ia3_key_weights = nullptr;
114
+ const T* ia3_value_weights = nullptr;
115
+ const int* ia3_tasks = nullptr;
116
+
117
+ const float* qkv_scale_out = nullptr;
118
+ const float* attention_out_scale = nullptr;
119
+ int int8_mode = 0;
120
+
121
+ const T *rotary_cos = nullptr;
122
+ const T *rotary_sin = nullptr;
123
+
124
+ const int *nnz_head_idx = nullptr;
125
+ int nnz_heads = 0;
126
+ };
127
+
128
+ template<typename T, bool CROSS_ATTENTION>
129
+ struct Multihead_attention_params: public Multihead_attention_params_base<T> {
130
+ // output cross attentions
131
+ float* cross_attention_out = nullptr;
132
+ int max_decoder_seq_len = 0;
133
+ bool is_return_cross_attentions = false;
134
+
135
+ // allows to exist attention eary
136
+ bool* finished = nullptr;
137
+
138
+ // required in case of cross attention
139
+ // will need it here till if constexpr in c++17
140
+ int* memory_length_per_sample = nullptr;
141
+
142
+ // required in case of masked attention with different length
143
+ const int* length_per_sample = nullptr;
144
+ };
145
+
146
+ template<typename T>
147
+ struct Multihead_attention_params<T, true>: public Multihead_attention_params_base<T> {
148
+ // output cross attentions
149
+ float* cross_attention_out = nullptr;
150
+ int max_decoder_seq_len = 0;
151
+ bool is_return_cross_attentions = false;
152
+
153
+ // allows to exist attention eary
154
+ bool* finished = nullptr;
155
+
156
+ // required in case of cross attention
157
+ int* memory_length_per_sample = nullptr;
158
+
159
+ // required in case of masked attention with different length
160
+ const int* length_per_sample = nullptr;
161
+ };
162
+
163
+ template<class T>
164
+ using Masked_multihead_attention_params = Multihead_attention_params<T, false>;
165
+
166
+ template<class T>
167
+ using Cross_multihead_attention_params = Multihead_attention_params<T, true>;
168
+
169
+ template<typename T>
170
+ struct outputCrossAttentionParam {
171
+ // max decoder output length
172
+ int max_decoder_seq_len = 0;
173
+ T* cross_attention_out = nullptr;
174
+ bool is_return_cross_attentions = false;
175
+ };
176
+
177
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
178
+
179
+ void masked_multihead_attention(const Masked_multihead_attention_params<float>& params, const cudaStream_t& stream);
180
+ void masked_multihead_attention(const Masked_multihead_attention_params<uint16_t>& params, const cudaStream_t& stream);
181
+ #ifdef ENABLE_BF16
182
+ void masked_multihead_attention(const Masked_multihead_attention_params<__nv_bfloat16>& params,
183
+ const cudaStream_t& stream);
184
+ #endif
185
+ void cross_multihead_attention(const Cross_multihead_attention_params<float>& params, const cudaStream_t& stream);
186
+ void cross_multihead_attention(const Cross_multihead_attention_params<uint16_t>& params, const cudaStream_t& stream);
187
+ #ifdef ENABLE_BF16
188
+ void cross_multihead_attention(const Cross_multihead_attention_params<__nv_bfloat16>& params,
189
+ const cudaStream_t& stream);
190
+ #endif
191
+
192
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
flash-attention/csrc/ft_attention/decoder_masked_multihead_attention_template.hpp ADDED
@@ -0,0 +1,1619 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Downloaded from from FasterTransformer v5.2.1
2
+ // https://github.com/NVIDIA/FasterTransformer/blob/release/v5.2.1_tag/src/fastertransformer/kernels/decoder_masked_multihead_attention/decoder_masked_multihead_attention_template.hpp
3
+ /*
4
+ * Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
5
+ *
6
+ * Licensed under the Apache License, Version 2.0 (the "License");
7
+ * you may not use this file except in compliance with the License.
8
+ * You may obtain a copy of the License at
9
+ *
10
+ * http://www.apache.org/licenses/LICENSE-2.0
11
+ *
12
+ * Unless required by applicable law or agreed to in writing, software
13
+ * distributed under the License is distributed on an "AS IS" BASIS,
14
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ * See the License for the specific language governing permissions and
16
+ * limitations under the License.
17
+ */
18
+ #pragma once
19
+
20
+ #include "decoder_masked_multihead_attention.h"
21
+ #include "decoder_masked_multihead_attention_utils.h"
22
+ #include "cuda_bf16_wrapper.h"
23
+ #include "cuda_bf16_fallbacks.cuh"
24
+ #include <assert.h>
25
+ #include <float.h>
26
+ #include <type_traits>
27
+
28
+ // #define MMHA_USE_HMMA_FOR_REDUCTION
29
+
30
+ // Below are knobs to extend FP32 accumulation for higher FP16 accuracy
31
+
32
+ // Does not seem to affect the accuracy that much
33
+ #define MMHA_USE_FP32_ACUM_FOR_FMA
34
+
35
+ // Seems to slightly improve the accuracy
36
+ #define MMHA_USE_FP32_ACUM_FOR_OUT
37
+
38
+ #if 0 && defined(MMHA_USE_FP32_ACUM_FOR_OUT)
39
+ // Does not seem to improve the accuracy
40
+ //#define MMHA_USE_FP32_ACUM_FOR_LOGITS
41
+ #endif
42
+
43
+ namespace mmha {
44
+
45
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
46
+
47
+ //
48
+ // We use the following terminology to describe the different dimensions.
49
+ //
50
+ // B: Batch size (number of sequences),
51
+ // L: Sequence length,
52
+ // D: Hidden dimension,
53
+ // H: Number of heads,
54
+ // Dh: Hidden dimension per head - Dh = D / H.
55
+ //
56
+ // The different kernels assign a threadblock for B x H pair. The grid has size (1, B, H). We use
57
+ // 64, 128 and 256 threads per block.
58
+ //
59
+ // Each threadblock loads Dh values from Q and its associated bias. The kernels run a loop to
60
+ // compute Q * K^T where K is loaded from a cache buffer -- except for the current timestep. The
61
+ // cache buffer helps with memory accesses and contains keys with bias.
62
+ //
63
+ // The layout of the cache buffer for the keys is [B, H, Dh/x, L, x] where x == 8 for FP16 and
64
+ // x == 4 for FP32 where the fastest moving dimension (contiguous data) is the rightmost one. The
65
+ // values for x are chosen to create chunks of 16 bytes.
66
+ //
67
+ // The different kernels use 1, 2 or 4 threads per key (THREADS_PER_KEY). The size of the LDGs
68
+ // depends on the number of threads per key. Each thread sums Dh / THREADS_PER_KEY elements. At
69
+ // the end of each iteration of the Q * K^T loop, we perform a reduction between lanes using an
70
+ // HMMA instruction (Tensor Core). Each Q * K^T valuey is stored in shared memory in FP32.
71
+ //
72
+ // After that loop, a parallel softmax is computed across the different Q * K^T values stored in
73
+ // shared memory.
74
+ //
75
+ // The kernel ends with a loop over the values in V. We use THREADS_PER_VALUE to control how many
76
+ // timesteps are computed by loop iteration. As with the keys, the values are read from a cache
77
+ // except for the current timestep. The layout of the cache buffer for the values is much simpler
78
+ // as it is [B, H, L, Dh].
79
+ //
80
+
81
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
82
+
83
+ template<typename T, int Dh>
84
+ struct Qk_vec_ {
85
+ };
86
+
87
+ template<>
88
+ struct Qk_vec_<float, 32> {
89
+ using Type = float;
90
+ };
91
+ template<>
92
+ struct Qk_vec_<float, 64> {
93
+ using Type = float2;
94
+ };
95
+ template<>
96
+ struct Qk_vec_<float, 128> {
97
+ using Type = float4;
98
+ };
99
+ template<>
100
+ struct Qk_vec_<float, 256> {
101
+ using Type = float4;
102
+ };
103
+ template<>
104
+ struct Qk_vec_<uint16_t, 32> {
105
+ using Type = uint32_t;
106
+ };
107
+ template<>
108
+ struct Qk_vec_<uint16_t, 64> {
109
+ using Type = uint32_t;
110
+ };
111
+ template<>
112
+ struct Qk_vec_<uint16_t, 128> {
113
+ using Type = uint2;
114
+ };
115
+ template<>
116
+ struct Qk_vec_<uint16_t, 256> {
117
+ using Type = uint4;
118
+ };
119
+ #ifdef ENABLE_BF16
120
+ template<>
121
+ struct Qk_vec_<__nv_bfloat16, 32> {
122
+ using Type = __nv_bfloat162;
123
+ };
124
+ template<>
125
+ struct Qk_vec_<__nv_bfloat16, 64> {
126
+ using Type = __nv_bfloat162;
127
+ };
128
+ template<>
129
+ struct Qk_vec_<__nv_bfloat16, 128> {
130
+ using Type = bf16_4_t;
131
+ };
132
+ template<>
133
+ struct Qk_vec_<__nv_bfloat16, 256> {
134
+ using Type = bf16_8_t;
135
+ };
136
+ #endif // ENABLE_BF16
137
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
138
+
139
+ template<typename T, int THREADS_PER_KEY>
140
+ struct K_vec_ {
141
+ };
142
+
143
+ template<>
144
+ struct K_vec_<float, 4> {
145
+ using Type = float;
146
+ };
147
+ template<>
148
+ struct K_vec_<float, 2> {
149
+ using Type = float2;
150
+ };
151
+ template<>
152
+ struct K_vec_<float, 1> {
153
+ using Type = float4;
154
+ };
155
+ template<>
156
+ struct K_vec_<uint16_t, 4> {
157
+ using Type = uint32_t;
158
+ };
159
+ template<>
160
+ struct K_vec_<uint16_t, 2> {
161
+ using Type = uint2;
162
+ };
163
+ template<>
164
+ struct K_vec_<uint16_t, 1> {
165
+ using Type = uint4;
166
+ };
167
+ #ifdef ENABLE_BF16
168
+ template<>
169
+ struct K_vec_<__nv_bfloat16, 4> {
170
+ using Type = __nv_bfloat162;
171
+ };
172
+ template<>
173
+ struct K_vec_<__nv_bfloat16, 2> {
174
+ using Type = bf16_4_t;
175
+ };
176
+ template<>
177
+ struct K_vec_<__nv_bfloat16, 1> {
178
+ using Type = bf16_8_t;
179
+ };
180
+ #endif // ENABLE_BF16
181
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
182
+
183
+ template<typename T, int V_VEC_SIZE>
184
+ struct V_vec_ {
185
+ };
186
+
187
+ template<>
188
+ struct V_vec_<float, 1> {
189
+ using Type = float;
190
+ };
191
+ template<>
192
+ struct V_vec_<float, 2> {
193
+ using Type = float2;
194
+ };
195
+ template<>
196
+ struct V_vec_<float, 4> {
197
+ using Type = float4;
198
+ };
199
+ template<>
200
+ struct V_vec_<uint16_t, 2> {
201
+ using Type = uint32_t;
202
+ };
203
+ template<>
204
+ struct V_vec_<uint16_t, 4> {
205
+ using Type = uint2;
206
+ };
207
+ template<>
208
+ struct V_vec_<uint16_t, 8> {
209
+ using Type = uint4;
210
+ };
211
+ #ifdef ENABLE_BF16
212
+ template<>
213
+ struct V_vec_<__nv_bfloat16, 2> {
214
+ using Type = __nv_bfloat162;
215
+ };
216
+ template<>
217
+ struct V_vec_<__nv_bfloat16, 4> {
218
+ using Type = bf16_4_t;
219
+ };
220
+ template<>
221
+ struct V_vec_<__nv_bfloat16, 8> {
222
+ using Type = bf16_8_t;
223
+ };
224
+ #endif // ENABLE_BF16
225
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
226
+
227
+ #ifdef MMHA_USE_FP32_ACUM_FOR_FMA
228
+ template<typename T>
229
+ struct Qk_vec_acum_fp32_ {
230
+ };
231
+
232
+ template<>
233
+ struct Qk_vec_acum_fp32_<float> {
234
+ using Type = float;
235
+ };
236
+ template<>
237
+ struct Qk_vec_acum_fp32_<float2> {
238
+ using Type = float2;
239
+ };
240
+ template<>
241
+ struct Qk_vec_acum_fp32_<float4> {
242
+ using Type = float4;
243
+ };
244
+ // template<> struct Qk_vec_acum_fp32_<uint16_t> { using Type = float; };
245
+ template<>
246
+ struct Qk_vec_acum_fp32_<uint32_t> {
247
+ using Type = float2;
248
+ };
249
+ template<>
250
+ struct Qk_vec_acum_fp32_<uint2> {
251
+ using Type = Float4_;
252
+ };
253
+ template<>
254
+ struct Qk_vec_acum_fp32_<uint4> {
255
+ using Type = Float8_;
256
+ };
257
+ template<>
258
+ struct Qk_vec_acum_fp32_<__nv_bfloat16> {
259
+ using Type = float;
260
+ };
261
+ template<>
262
+ struct Qk_vec_acum_fp32_<__nv_bfloat162> {
263
+ using Type = float2;
264
+ };
265
+ template<>
266
+ struct Qk_vec_acum_fp32_<bf16_4_t> {
267
+ using Type = Float4_;
268
+ };
269
+ template<>
270
+ struct Qk_vec_acum_fp32_<bf16_8_t> {
271
+ using Type = Float8_;
272
+ };
273
+
274
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
275
+
276
+ template<typename T>
277
+ struct K_vec_acum_fp32_ {
278
+ };
279
+
280
+ template<>
281
+ struct K_vec_acum_fp32_<float> {
282
+ using Type = float;
283
+ };
284
+ template<>
285
+ struct K_vec_acum_fp32_<float2> {
286
+ using Type = float2;
287
+ };
288
+ template<>
289
+ struct K_vec_acum_fp32_<float4> {
290
+ using Type = float4;
291
+ };
292
+ template<>
293
+ struct K_vec_acum_fp32_<uint32_t> {
294
+ using Type = float2;
295
+ };
296
+ template<>
297
+ struct K_vec_acum_fp32_<uint2> {
298
+ using Type = Float4_;
299
+ };
300
+ template<>
301
+ struct K_vec_acum_fp32_<uint4> {
302
+ using Type = Float8_;
303
+ };
304
+ template<>
305
+ struct K_vec_acum_fp32_<__nv_bfloat16> {
306
+ using Type = float;
307
+ };
308
+ template<>
309
+ struct K_vec_acum_fp32_<__nv_bfloat162> {
310
+ using Type = float2;
311
+ };
312
+ template<>
313
+ struct K_vec_acum_fp32_<bf16_4_t> {
314
+ using Type = Float4_;
315
+ };
316
+ template<>
317
+ struct K_vec_acum_fp32_<bf16_8_t> {
318
+ using Type = Float8_;
319
+ };
320
+ #endif
321
+
322
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
323
+
324
+ #ifdef MMHA_USE_FP32_ACUM_FOR_OUT
325
+ template<typename T>
326
+ struct V_vec_acum_fp32_ {
327
+ };
328
+
329
+ template<>
330
+ struct V_vec_acum_fp32_<float> {
331
+ using Type = float;
332
+ };
333
+ template<>
334
+ struct V_vec_acum_fp32_<float2> {
335
+ using Type = float2;
336
+ };
337
+ template<>
338
+ struct V_vec_acum_fp32_<float4> {
339
+ using Type = float4;
340
+ };
341
+ template<>
342
+ struct V_vec_acum_fp32_<uint32_t> {
343
+ using Type = float2;
344
+ };
345
+ template<>
346
+ struct V_vec_acum_fp32_<uint2> {
347
+ using Type = Float4_;
348
+ };
349
+ template<>
350
+ struct V_vec_acum_fp32_<uint4> {
351
+ using Type = Float8_;
352
+ };
353
+ #ifdef ENABLE_BF16
354
+ template<>
355
+ struct V_vec_acum_fp32_<__nv_bfloat162> {
356
+ using Type = float2;
357
+ };
358
+ template<>
359
+ struct V_vec_acum_fp32_<bf16_4_t> {
360
+ using Type = Float4_;
361
+ };
362
+ template<>
363
+ struct V_vec_acum_fp32_<bf16_8_t> {
364
+ using Type = Float8_;
365
+ };
366
+ #endif // ENABLE_BF16
367
+ #endif
368
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
369
+
370
+ template<int THREADS_PER_KEY, typename K_vec, int N>
371
+ inline __device__ float qk_dot_(const K_vec (&q)[N], const K_vec (&k)[N])
372
+ {
373
+ #ifdef MMHA_USE_FP32_ACUM_FOR_FMA
374
+ using K_vec_acum = typename K_vec_acum_fp32_<K_vec>::Type;
375
+ #else
376
+ using K_vec_acum = K_vec;
377
+ #endif
378
+ // Compute the parallel products for Q*K^T (treat vector lanes separately).
379
+ K_vec_acum qk_vec = mul<K_vec_acum, K_vec, K_vec>(q[0], k[0]);
380
+ #pragma unroll
381
+ for (int ii = 1; ii < N; ++ii) {
382
+ qk_vec = fma(q[ii], k[ii], qk_vec);
383
+ }
384
+
385
+ // Finalize the reduction across lanes.
386
+ float qk = sum(qk_vec);
387
+ #pragma unroll
388
+ for (int mask = THREADS_PER_KEY / 2; mask >= 1; mask /= 2) {
389
+ qk += __shfl_xor_sync(uint32_t(-1), qk, mask);
390
+ }
391
+ return qk;
392
+ }
393
+
394
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
395
+
396
+ template<typename T, int THREADS_PER_KEY>
397
+ struct Qk_dot {
398
+ template<typename K_vec, int N>
399
+ static inline __device__ float dot(const K_vec (&q)[N], const K_vec (&k)[N])
400
+ {
401
+ return qk_dot_<THREADS_PER_KEY>(q, k);
402
+ }
403
+ };
404
+
405
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
406
+
407
+ inline __device__ float4 hmma_fp32(const uint2& a, uint32_t b)
408
+ {
409
+ float4 c;
410
+ float zero = 0.f;
411
+ asm volatile("mma.sync.aligned.m16n8k8.row.col.f32.f16.f16.f32 \n"
412
+ " {%0, %1, %2, %3}, \n"
413
+ " {%4, %5}, \n"
414
+ " {%6}, \n"
415
+ " {%7, %7, %7, %7}; \n"
416
+
417
+ : "=f"(c.x), "=f"(c.y), "=f"(c.z), "=f"(c.w)
418
+ : "r"(a.x) "r"(a.y), "r"(b), "f"(zero));
419
+ return c;
420
+ }
421
+
422
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
423
+
424
+ template<int N>
425
+ inline __device__ float qk_hmma_dot_(const uint32_t (&q)[N], const uint32_t (&k)[N])
426
+ {
427
+ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 750
428
+ #ifdef MMHA_USE_FP32_ACUM_FOR_FMA
429
+ using K_vec_acum = typename K_vec_acum_fp32_<uint32_t>::Type;
430
+ #else
431
+ using K_vec_acum = uint32_t;
432
+ #endif
433
+ K_vec_acum qk_vec = mul<K_vec_acum, uint32_t, uint32_t>(q[0], k[0]);
434
+ #pragma unroll
435
+ for (int ii = 1; ii < N; ++ii) {
436
+ qk_vec = fma(q[ii], k[ii], qk_vec);
437
+ }
438
+ #ifdef MMHA_USE_FP32_ACUM_FOR_FMA
439
+ uint32_t qk_vec_ = float2_to_half2(qk_vec);
440
+ return hmma_fp32(make_uint2(qk_vec_, 0u), 0x3c003c00u).x;
441
+ #else
442
+ return hmma_fp32(make_uint2(qk_vec, 0u), 0x3c003c00u).x;
443
+ #endif
444
+ #else
445
+ return 0.f;
446
+ #endif
447
+ }
448
+
449
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
450
+
451
+ template<>
452
+ struct Qk_dot<uint16_t, 4> {
453
+ template<int N>
454
+ static inline __device__ float dot(const uint32_t (&q)[N], const uint32_t (&k)[N])
455
+ {
456
+ #if __CUDA_ARCH__ >= 750 && defined(MMHA_USE_HMMA_FOR_REDUCTION)
457
+ return qk_hmma_dot_(q, k);
458
+ #else
459
+ return qk_dot_<4>(q, k);
460
+ #endif // defined MMHA_USE_HMMA_FOR_REDUCTION
461
+ }
462
+ };
463
+
464
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
465
+
466
+ template<int WARPS_PER_BLOCK, int WARP_SIZE = 32>
467
+ inline __device__ float block_sum(float* red_smem, float sum)
468
+ {
469
+
470
+ // Decompose the thread index into warp / lane.
471
+ int warp = threadIdx.x / WARP_SIZE;
472
+ int lane = threadIdx.x % WARP_SIZE;
473
+
474
+ // Compute the sum per warp.
475
+ #pragma unroll
476
+ for (int mask = WARP_SIZE / 2; mask >= 1; mask /= 2) {
477
+ sum += __shfl_xor_sync(uint32_t(-1), sum, mask);
478
+ }
479
+
480
+ // Warp leaders store the data to shared memory.
481
+ if (lane == 0) {
482
+ red_smem[warp] = sum;
483
+ }
484
+
485
+ // Make sure the data is in shared memory.
486
+ __syncthreads();
487
+
488
+ // The warps compute the final sums.
489
+ if (lane < WARPS_PER_BLOCK) {
490
+ sum = red_smem[lane];
491
+ }
492
+
493
+ // Parallel reduction inside the warp.
494
+ #pragma unroll
495
+ for (int mask = WARPS_PER_BLOCK / 2; mask >= 1; mask /= 2) {
496
+ sum += __shfl_xor_sync(uint32_t(-1), sum, mask);
497
+ }
498
+
499
+ // Broadcast to other threads.
500
+ return __shfl_sync(uint32_t(-1), sum, 0);
501
+ }
502
+
503
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
504
+
505
+ inline __device__ void convert_from_float(float& dst, float src)
506
+ {
507
+ dst = src;
508
+ }
509
+
510
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
511
+
512
+ inline __device__ void convert_from_float(uint16_t& dst, float src)
513
+ {
514
+ dst = float_to_half(src);
515
+ }
516
+
517
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
518
+
519
+ inline __device__ void convert_from_float(uint32_t& dst, float2 src)
520
+ {
521
+ dst = float2_to_half2(src);
522
+ }
523
+
524
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
525
+ #ifdef ENABLE_BF16
526
+ inline __device__ void convert_from_float(__nv_bfloat16& dst, float src)
527
+ {
528
+ dst = __float2bfloat16(src);
529
+ }
530
+
531
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
532
+
533
+ inline __device__ void convert_from_float(__nv_bfloat162& dst, float2 src)
534
+ {
535
+ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800
536
+ dst = __float22bfloat162_rn(src);
537
+ #else
538
+ dst = __floats2bfloat162_rn(src.x, src.y);
539
+ #endif
540
+ }
541
+ #endif // ENABLE_BF16
542
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
543
+
544
+ inline __device__ void convert_from_float(uint2& dst, Float4_ src)
545
+ {
546
+ dst.x = float2_to_half2(src.x);
547
+ dst.y = float2_to_half2(src.y);
548
+ }
549
+
550
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
551
+
552
+ inline __device__ void convert_from_float(uint2& dst, float4 src)
553
+ {
554
+ convert_from_float(dst, Float4_{make_float2(src.x, src.y), make_float2(src.z, src.w)});
555
+ }
556
+
557
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
558
+
559
+ inline __device__ void convert_from_float(uint4& dst, Float8_ src)
560
+ {
561
+ dst.x = float2_to_half2(src.x);
562
+ dst.y = float2_to_half2(src.y);
563
+ dst.z = float2_to_half2(src.z);
564
+ dst.w = float2_to_half2(src.w);
565
+ }
566
+
567
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
568
+
569
+ #ifdef ENABLE_BF16
570
+ inline __device__ void convert_from_float(bf16_4_t& dst, Float4_ src)
571
+ {
572
+ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800
573
+ dst.x = __float22bfloat162_rn(src.x);
574
+ dst.y = __float22bfloat162_rn(src.y);
575
+ #else
576
+ dst.x = __floats2bfloat162_rn(src.x.x, src.x.y);
577
+ dst.y = __floats2bfloat162_rn(src.y.x, src.y.y);
578
+ #endif
579
+ }
580
+
581
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
582
+
583
+ inline __device__ void convert_from_float(bf16_4_t& dst, float4 src)
584
+ {
585
+ convert_from_float(dst, Float4_{make_float2(src.x, src.y), make_float2(src.z, src.w)});
586
+ }
587
+
588
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
589
+
590
+ inline __device__ void convert_from_float(bf16_8_t& dst, Float8_ src)
591
+ {
592
+ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800
593
+ dst.x = __float22bfloat162_rn(src.x);
594
+ dst.y = __float22bfloat162_rn(src.y);
595
+ dst.z = __float22bfloat162_rn(src.z);
596
+ dst.w = __float22bfloat162_rn(src.w);
597
+ #else
598
+ dst.x = __floats2bfloat162_rn(src.x.x, src.x.y);
599
+ dst.y = __floats2bfloat162_rn(src.y.x, src.y.y);
600
+ dst.z = __floats2bfloat162_rn(src.z.x, src.z.y);
601
+ dst.w = __floats2bfloat162_rn(src.w.x, src.w.y);
602
+ #endif
603
+ }
604
+ #endif // ENABLE_BF16
605
+
606
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
607
+
608
+ inline __device__ void convert_from_float(float2& dst, float2 src)
609
+ {
610
+ dst = src;
611
+ }
612
+
613
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
614
+
615
+ inline __device__ void convert_from_float(float4& dst, float4 src)
616
+ {
617
+ dst = src;
618
+ }
619
+
620
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
621
+
622
+ inline __device__ float convert_to_float(float4 u)
623
+ {
624
+ return u.x;
625
+ }
626
+
627
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
628
+
629
+ inline __device__ float convert_to_float(uint4 u)
630
+ {
631
+ float2 tmp = half2_to_float2(u.x);
632
+ return tmp.x;
633
+ }
634
+
635
+ #if defined(MMHA_USE_FP32_ACUM_FOR_LOGITS)
636
+
637
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
638
+
639
+ inline __device__ float cast_to_float(float u)
640
+ {
641
+ return u;
642
+ }
643
+
644
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
645
+
646
+ inline __device__ float2 cast_to_float(float2 u)
647
+ {
648
+ return u;
649
+ }
650
+
651
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
652
+
653
+ inline __device__ float4 cast_to_float(float4 u)
654
+ {
655
+ return u;
656
+ }
657
+
658
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
659
+
660
+ inline __device__ Float4_ cast_to_float(Float4_ u)
661
+ {
662
+ return u;
663
+ }
664
+
665
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
666
+
667
+ inline __device__ Float8_ cast_to_float(Float8_ u)
668
+ {
669
+ return u;
670
+ }
671
+
672
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
673
+
674
+ inline __device__ float2 cast_to_float(uint32_t u)
675
+ {
676
+ return half2_to_float2(u);
677
+ }
678
+
679
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
680
+
681
+ inline __device__ Float4_ cast_to_float(uint2 u)
682
+ {
683
+ Float4_ tmp;
684
+ tmp.x = half2_to_float2(u.x);
685
+ tmp.y = half2_to_float2(u.y);
686
+ return tmp;
687
+ }
688
+
689
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
690
+
691
+ inline __device__ Float8_ cast_to_float(uint4 u)
692
+ {
693
+ Float8_ tmp;
694
+ tmp.x = half2_to_float2(u.x);
695
+ tmp.y = half2_to_float2(u.y);
696
+ tmp.z = half2_to_float2(u.z);
697
+ tmp.w = half2_to_float2(u.w);
698
+ return tmp;
699
+ }
700
+
701
+ #endif
702
+
703
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
704
+
705
+ inline __device__ float float_from_int8(int8_t u)
706
+ {
707
+ return u;
708
+ }
709
+
710
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
711
+
712
+ inline __device__ float2 float_from_int8(int16_t u)
713
+ {
714
+ union {
715
+ int16_t int16;
716
+ int8_t int8[2];
717
+ };
718
+ int16 = u;
719
+ return make_float2(int8[0], int8[1]);
720
+ }
721
+
722
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
723
+
724
+ inline __device__ float4 float_from_int8(int32_t u)
725
+ {
726
+ union {
727
+ int32_t int32;
728
+ int8_t int8[4];
729
+ };
730
+ int32 = u;
731
+ return make_float4(int8[0], int8[1], int8[2], int8[3]);
732
+ }
733
+
734
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
735
+
736
+ // clang-format off
737
+ inline __device__ Float8_ float_from_int8(int64_t u)
738
+ {
739
+ union {
740
+ int64_t int64;
741
+ int16_t int16[4];
742
+ };
743
+ int64 = u;
744
+ return Float8_ {float_from_int8(int16[0]),
745
+ float_from_int8(int16[1]),
746
+ float_from_int8(int16[2]),
747
+ float_from_int8(int16[3])};
748
+ }
749
+ // clang-format on
750
+
751
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
752
+
753
+ inline __device__ int8_t cast_to_int8(float val)
754
+ {
755
+ union {
756
+ int8_t int8[2];
757
+ int16_t int16;
758
+ };
759
+ asm volatile("cvt.rni.sat.s8.f32 %0, %1;" : "=h"(int16) : "f"(val));
760
+ return int8[0];
761
+ }
762
+
763
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
764
+
765
+ inline __device__ int32_t cast_to_int8(float4 val)
766
+ {
767
+ union {
768
+ int8_t int8[4];
769
+ int32_t int32;
770
+ };
771
+ int8[0] = cast_to_int8(val.x);
772
+ int8[1] = cast_to_int8(val.y);
773
+ int8[2] = cast_to_int8(val.z);
774
+ int8[3] = cast_to_int8(val.w);
775
+ return int32;
776
+ }
777
+
778
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
779
+
780
+ inline __device__ int64_t cast_to_int8(Float8_ val)
781
+ {
782
+ union {
783
+ int8_t int8[8];
784
+ int64_t int64;
785
+ };
786
+ int8[0] = cast_to_int8(val.x.x);
787
+ int8[1] = cast_to_int8(val.x.y);
788
+ int8[2] = cast_to_int8(val.y.x);
789
+ int8[3] = cast_to_int8(val.y.y);
790
+ int8[4] = cast_to_int8(val.z.x);
791
+ int8[5] = cast_to_int8(val.z.y);
792
+ int8[6] = cast_to_int8(val.w.x);
793
+ int8[7] = cast_to_int8(val.w.y);
794
+ return int64;
795
+ }
796
+
797
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
798
+
799
+ template<typename T>
800
+ inline __device__ __host__ T div_up(T m, T n)
801
+ {
802
+ return (m + n - 1) / n;
803
+ }
804
+
805
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
806
+
807
+ template<typename T, bool DO_CROSS_ATTENTION>
808
+ inline size_t smem_size_in_bytes(const Multihead_attention_params<T, DO_CROSS_ATTENTION>& params,
809
+ int threads_per_value,
810
+ int threads_per_block)
811
+ {
812
+ // The amount of shared memory needed to store the Q*K^T values in float.
813
+ const int max_timesteps = min(params.timestep, params.memory_max_len);
814
+ size_t qk_sz = (DO_CROSS_ATTENTION) ? div_up(params.memory_max_len + 1, 4) * 16 : div_up(max_timesteps + 1, 4) * 16;
815
+
816
+ // The extra memory needed if we are not using floats for the final logits.
817
+ size_t logits_sz = 0;
818
+ #ifndef MMHA_USE_FP32_ACUM_FOR_LOGITS
819
+ if (sizeof(T) != 4) {
820
+ // TDOD
821
+ logits_sz = (DO_CROSS_ATTENTION) ? div_up(params.memory_max_len + 1, 4) * 4 * sizeof(T) :
822
+ div_up(max_timesteps + 1, 4) * 4 * sizeof(T);
823
+ }
824
+ #endif
825
+
826
+ // The total size needed during softmax.
827
+ size_t softmax_sz = qk_sz + logits_sz;
828
+
829
+ // The number of partial rows to reduce in the final reduction.
830
+ int rows_per_red = threads_per_block / threads_per_value;
831
+ // The amount of storage needed to finalize the outputs.
832
+ size_t red_sz = rows_per_red * params.hidden_size_per_head * sizeof(T) / 2;
833
+
834
+ size_t transpose_rotary_size = 0;
835
+ if (params.rotary_embedding_dim > 0 && params.neox_rotary_style) {
836
+ transpose_rotary_size = 2 * params.rotary_embedding_dim * sizeof(T);
837
+ }
838
+
839
+ // The max.
840
+ return max(max(softmax_sz, red_sz), transpose_rotary_size);
841
+ }
842
+
843
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
844
+
845
+ inline __device__ constexpr uint32_t shfl_mask(int threads)
846
+ {
847
+ return threads == 32 ? uint32_t(-1) : (1u << threads) - 1u;
848
+ }
849
+
850
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
851
+
852
+ template<
853
+ // The type of the inputs. Supported types: float and half.
854
+ typename T,
855
+ // The hidden dimension per head.
856
+ int Dh,
857
+ int Dh_MAX,
858
+ // The number of threads per key.
859
+ int THREADS_PER_KEY,
860
+ // The number of threads per value.
861
+ int THREADS_PER_VALUE,
862
+ // The number of threads in a threadblock.
863
+ int THREADS_PER_BLOCK,
864
+ bool DO_CROSS_ATTENTION>
865
+ __global__ void masked_multihead_attention_kernel(Multihead_attention_params<T, DO_CROSS_ATTENTION> params)
866
+ {
867
+
868
+ // Make sure the hidden dimension per head is a multiple of the number of threads per key.
869
+ static_assert(Dh_MAX % THREADS_PER_KEY == 0, "");
870
+ // Make sure the hidden dimension per head is a multiple of the number of threads per value.
871
+ static_assert(Dh_MAX % THREADS_PER_VALUE == 0, "");
872
+
873
+ // The size of a warp.
874
+ constexpr int WARP_SIZE = 32;
875
+ // The number of warps in a threadblock.
876
+ constexpr int WARPS_PER_BLOCK = THREADS_PER_BLOCK / WARP_SIZE;
877
+
878
+ // Use smem_size_in_bytes (above) to determine the amount of shared memory.
879
+ extern __shared__ char smem_[];
880
+
881
+ // The shared memory for the Q*K^T values and partial logits in softmax.
882
+ float* qk_smem = reinterpret_cast<float*>(smem_);
883
+
884
+ // The shared memory for the logits. For FP32, that's the same buffer as qk_smem.
885
+ char* logits_smem_ = smem_;
886
+ #ifndef MMHA_USE_FP32_ACUM_FOR_LOGITS
887
+ if (sizeof(T) != 4) {
888
+ // TODO - change to tlength
889
+ const int max_timesteps = min(params.timestep, params.memory_max_len);
890
+ logits_smem_ +=
891
+ (DO_CROSS_ATTENTION) ? div_up(params.memory_max_len + 1, 4) * 16 : div_up(max_timesteps + 1, 4) * 16;
892
+ }
893
+ T* logits_smem = reinterpret_cast<T*>(logits_smem_);
894
+ #else
895
+ float* logits_smem = reinterpret_cast<float*>(logits_smem_);
896
+ #endif
897
+
898
+ // The shared memory to do the final reduction for the output values. Reuse qk_smem.
899
+ T* out_smem = reinterpret_cast<T*>(smem_);
900
+
901
+ // The shared memory buffers for the block-wide reductions. One for max, one for sum.
902
+ __shared__ float red_smem[WARPS_PER_BLOCK * 2];
903
+
904
+ // A vector of Q or K elements for the current timestep.
905
+ using Qk_vec = typename Qk_vec_<T, Dh_MAX>::Type;
906
+
907
+ // Use alignment for safely casting the shared buffers as Qk_vec.
908
+ // Shared memory to store Q inputs.
909
+ __shared__ __align__(sizeof(Qk_vec)) T q_smem[Dh_MAX];
910
+
911
+ // This is one of the reasons we should have a separate kernel for cross attention
912
+ __shared__ __align__(sizeof(Qk_vec)) T bias_smem[DO_CROSS_ATTENTION ? Dh_MAX : 1];
913
+
914
+ // A vector of Q or K elements for the current timestep.
915
+ using Qk_vec = typename Qk_vec_<T, Dh_MAX>::Type;
916
+ // The number of elements per vector.
917
+ constexpr int QK_VEC_SIZE = sizeof(Qk_vec) / sizeof(T);
918
+ // Make sure the hidden size per head is a multiple of the vector size.
919
+ static_assert(Dh_MAX % QK_VEC_SIZE == 0, "");
920
+ // We will use block wide reduction if needed
921
+ // static_assert(Dh_MAX / QK_VEC_SIZE <= WARP_SIZE, "");
922
+ // The number of vectors per warp.
923
+ constexpr int QK_VECS_PER_WARP = Dh_MAX / QK_VEC_SIZE;
924
+
925
+ // The layout of the cache is [B, H, Dh/x, L, x] with x == 4/8 for FP32/FP16. Since each thread
926
+ // owns x elements, we have to decompose the linear index into chunks of x values and the posi-
927
+ // tion of the thread in that chunk.
928
+
929
+ // The number of elements in a chunk of 16B (that's the x in the above formula).
930
+ constexpr int QK_ELTS_IN_16B = 16 / sizeof(T);
931
+ // The number of K vectors in 16B.
932
+ constexpr int QK_VECS_IN_16B = 16 / sizeof(Qk_vec);
933
+
934
+ // The batch/beam idx
935
+ const int bi = blockIdx.y;
936
+ if (params.finished != nullptr && params.finished[bi] == true) {
937
+ return;
938
+ }
939
+ // The beam idx
940
+ const int beami = bi % params.beam_width;
941
+ // The "beam-aware" batch idx
942
+ const int bbi = bi / params.beam_width;
943
+ // The head.
944
+ // const int hi = blockIdx.x;
945
+ const int hi = params.nnz_head_idx == nullptr ? blockIdx.x : params.nnz_head_idx[blockIdx.x];
946
+ const int hi_kv = hi / params.num_heads_q_kv_ratio;
947
+ // Combine the batch and the head indices.
948
+ const int bhi = bi * params.num_heads + hi;
949
+ const int bhi_kv = bi * params.num_heads_kv + hi_kv;
950
+ // Combine the "beam-aware" batch idx and the head indices.
951
+ const int bbhi = bbi * params.beam_width * params.num_heads_kv + hi_kv;
952
+ // The thread in the block.
953
+ const int tidx = threadIdx.x;
954
+
955
+ const bool handle_kv = !DO_CROSS_ATTENTION || (DO_CROSS_ATTENTION && params.timestep == 0);
956
+
957
+ // While doing the product Q*K^T for the different keys we track the max.
958
+ float qk_max = -FLT_MAX;
959
+
960
+ float qk = 0.0F;
961
+
962
+ int q_base_offset = (params.stride_q == 0) ? bhi * Dh : bi * params.stride_q + hi * Dh;
963
+ int k_base_offset = (params.stride_k == 0) ? bhi_kv * Dh : bi * params.stride_k + hi_kv * Dh;
964
+ int v_base_offset = (params.stride_v == 0) ? bhi_kv * Dh : bi * params.stride_v + hi_kv * Dh;
965
+
966
+ const size_t bi_seq_len_offset = bi * params.memory_max_len;
967
+
968
+ // int tlength = (DO_CROSS_ATTENTION)? params.memory_length_per_sample[bi] - 1 : params.timestep;
969
+ int tlength = (DO_CROSS_ATTENTION) ? params.memory_length_per_sample[bi] - 1 :
970
+ (params.length_per_sample == nullptr) ?
971
+ params.timestep :
972
+ params.length_per_sample[bi] + params.max_prefix_prompt_length;
973
+ const int first_step = max(0, tlength + 1 - params.memory_max_len);
974
+ const int tlength_circ = tlength % params.memory_max_len;
975
+
976
+ // First QK_VECS_PER_WARP load Q and K + the bias values for the current timestep.
977
+ const bool is_masked = tidx >= QK_VECS_PER_WARP;
978
+
979
+ // The offset in the Q and K buffer also accounts for the batch.
980
+ int q_offset = q_base_offset + tidx * QK_VEC_SIZE;
981
+ int k_offset = k_base_offset + tidx * QK_VEC_SIZE;
982
+ // The offset in the bias buffer.
983
+ int q_bias_offset = hi * Dh + tidx * QK_VEC_SIZE;
984
+ int k_bias_offset = hi_kv * Dh + tidx * QK_VEC_SIZE;
985
+
986
+ const bool do_ia3 = handle_kv && params.ia3_tasks != nullptr;
987
+ const int ia3_task_id = do_ia3 ? params.ia3_tasks[bbi] : 0;
988
+
989
+ // Trigger the loads from the Q and K buffers.
990
+ Qk_vec q;
991
+ zero(q);
992
+ if (!is_masked && (Dh == Dh_MAX || tidx * QK_VEC_SIZE < Dh)) {
993
+ if (params.int8_mode == 2) {
994
+ using Packed_Int8_t = typename packed_type<int8_t, num_elems<Qk_vec>::value>::type;
995
+ using Packed_Float_t = typename packed_type<float, num_elems<Qk_vec>::value>::type;
996
+ const auto q_scaling = params.qkv_scale_out[0];
997
+ const auto q_quant =
998
+ *reinterpret_cast<const Packed_Int8_t*>(&reinterpret_cast<const int8_t*>(params.q)[q_offset]);
999
+
1000
+ convert_from_float(q, mul<Packed_Float_t, float>(q_scaling, float_from_int8(q_quant)));
1001
+ }
1002
+ else {
1003
+ q = *reinterpret_cast<const Qk_vec*>(&params.q[q_offset]);
1004
+ }
1005
+ }
1006
+
1007
+ Qk_vec k;
1008
+ zero(k);
1009
+ if (DO_CROSS_ATTENTION) {
1010
+ // The 16B chunk written by the thread.
1011
+ int co = tidx / QK_VECS_IN_16B;
1012
+ // The position of the thread in that 16B chunk.
1013
+ int ci = tidx % QK_VECS_IN_16B * QK_VEC_SIZE;
1014
+
1015
+ // Two chunks are separated by L * x elements. A thread write QK_VEC_SIZE elements.
1016
+ int offset = bhi_kv * params.memory_max_len * Dh + co * params.memory_max_len * QK_ELTS_IN_16B +
1017
+ // params.timestep*QK_ELTS_IN_16B +
1018
+ tlength * QK_ELTS_IN_16B + ci;
1019
+ k = !is_masked && (Dh == Dh_MAX || tidx * QK_VEC_SIZE < Dh) ?
1020
+ *reinterpret_cast<const Qk_vec*>(&params.k_cache[offset]) :
1021
+ k;
1022
+ }
1023
+ else {
1024
+ if (!is_masked && (Dh == Dh_MAX || tidx * QK_VEC_SIZE < Dh)) {
1025
+ if (params.int8_mode == 2) {
1026
+ using Packed_Int8_t = typename packed_type<int8_t, num_elems<Qk_vec>::value>::type;
1027
+ using Packed_Float_t = typename packed_type<float, num_elems<Qk_vec>::value>::type;
1028
+ const auto k_scaling = params.qkv_scale_out[1];
1029
+ const auto k_quant =
1030
+ *reinterpret_cast<const Packed_Int8_t*>(&reinterpret_cast<const int8_t*>(params.k)[k_offset]);
1031
+
1032
+ convert_from_float(k, mul<Packed_Float_t, float>(k_scaling, float_from_int8(k_quant)));
1033
+ }
1034
+ else {
1035
+ k = *reinterpret_cast<const Qk_vec*>(&params.k[k_offset]);
1036
+ }
1037
+ }
1038
+ }
1039
+
1040
+ // Trigger the loads from the Q and K bias buffers.
1041
+ Qk_vec q_bias;
1042
+ zero(q_bias);
1043
+ q_bias = (!is_masked && Dh == Dh_MAX || tidx * QK_VEC_SIZE < Dh) && params.q_bias != nullptr ?
1044
+ *reinterpret_cast<const Qk_vec*>(&params.q_bias[q_bias_offset]) :
1045
+ q_bias;
1046
+
1047
+ Qk_vec k_bias;
1048
+ zero(k_bias);
1049
+ if (handle_kv) {
1050
+ k_bias = !is_masked && (Dh == Dh_MAX || tidx * QK_VEC_SIZE < Dh) && params.k_bias != nullptr ?
1051
+ *reinterpret_cast<const Qk_vec*>(&params.k_bias[k_bias_offset]) :
1052
+ k_bias;
1053
+ }
1054
+
1055
+ // Computes the Q/K values with bias.
1056
+ q = add(q, q_bias);
1057
+ if (handle_kv) {
1058
+ k = add(k, k_bias);
1059
+ }
1060
+ if (do_ia3 && !is_masked) {
1061
+ k = mul<Qk_vec, Qk_vec, Qk_vec>(
1062
+ k,
1063
+ *reinterpret_cast<const Qk_vec*>(
1064
+ &params.ia3_key_weights[(ia3_task_id * params.num_heads + hi) * Dh + tidx * QK_VEC_SIZE]));
1065
+ }
1066
+
1067
+ // Padded len
1068
+ const int padd_len = (params.total_padding_tokens == nullptr) ? 0 : params.total_padding_tokens[bi];
1069
+ if (params.rotary_embedding_dim > 0 && !params.neox_rotary_style) {
1070
+ if (handle_kv) {
1071
+ if (params.rotary_cos == nullptr) {
1072
+ apply_rotary_embedding(q, k, tidx, params.rotary_embedding_dim, tlength - padd_len, params.rotary_base);
1073
+ } else {
1074
+ apply_rotary_embedding(q, k, tidx, params.rotary_embedding_dim, tlength - padd_len,
1075
+ params.rotary_cos + bi * params.rotary_embedding_dim / 2,
1076
+ params.rotary_sin + bi * params.rotary_embedding_dim / 2);
1077
+ }
1078
+ }
1079
+ else {
1080
+ if (params.rotary_cos == nullptr) {
1081
+ apply_rotary_embedding(q, tidx, params.rotary_embedding_dim, tlength - padd_len, params.rotary_base);
1082
+ } else {
1083
+ apply_rotary_embedding(q, tidx, params.rotary_embedding_dim, tlength - padd_len,
1084
+ params.rotary_cos + bi * params.rotary_embedding_dim / 2,
1085
+ params.rotary_sin + bi * params.rotary_embedding_dim / 2);
1086
+ }
1087
+ }
1088
+ }
1089
+ else if (params.rotary_embedding_dim > 0 && params.neox_rotary_style) {
1090
+ const bool do_rotary = !is_masked && QK_VEC_SIZE * tidx < params.rotary_embedding_dim;
1091
+
1092
+ T* q_smem = reinterpret_cast<T*>(smem_);
1093
+ T* k_smem = q_smem + params.rotary_embedding_dim;
1094
+
1095
+ const int half_rotary_dim = params.rotary_embedding_dim / 2;
1096
+ const int half_idx = (tidx * QK_VEC_SIZE) / half_rotary_dim;
1097
+ const int intra_half_idx = (tidx * QK_VEC_SIZE) % half_rotary_dim;
1098
+ const int smem_pitch = half_rotary_dim; // TODO: adjust for bank conflicts
1099
+
1100
+ assert(half_rotary_dim % QK_VEC_SIZE == 0);
1101
+
1102
+ if (do_rotary) {
1103
+ *reinterpret_cast<Qk_vec*>(q_smem + half_idx * smem_pitch + intra_half_idx) = q;
1104
+
1105
+ if (handle_kv) {
1106
+ *reinterpret_cast<Qk_vec*>(k_smem + half_idx * smem_pitch + intra_half_idx) = k;
1107
+ }
1108
+ }
1109
+
1110
+ __syncthreads();
1111
+
1112
+ const int transpose_idx = half_idx * (half_rotary_dim / 2) + intra_half_idx / 2;
1113
+ constexpr int tidx_factor = (QK_VEC_SIZE > 1) ? QK_VEC_SIZE / 2 : 1;
1114
+ if (do_rotary) {
1115
+ mmha::vec_from_smem_transpose(q, q_smem, transpose_idx, smem_pitch);
1116
+
1117
+ if (handle_kv) {
1118
+ mmha::vec_from_smem_transpose(k, k_smem, transpose_idx, smem_pitch);
1119
+
1120
+ if (params.rotary_cos == nullptr) {
1121
+ mmha::apply_rotary_embedding(
1122
+ q, k, transpose_idx / tidx_factor, params.rotary_embedding_dim, tlength - padd_len, params.rotary_base);
1123
+ } else {
1124
+ mmha::apply_rotary_embedding(
1125
+ q, k, transpose_idx / tidx_factor, params.rotary_embedding_dim, tlength - padd_len,
1126
+ params.rotary_cos + bi * params.rotary_embedding_dim / 2,
1127
+ params.rotary_sin + bi * params.rotary_embedding_dim / 2);
1128
+ }
1129
+
1130
+ mmha::write_smem_transpose(k, k_smem, transpose_idx, smem_pitch);
1131
+ }
1132
+ else {
1133
+ if (params.rotary_cos == nullptr) {
1134
+ mmha::apply_rotary_embedding(
1135
+ q, transpose_idx / tidx_factor, params.rotary_embedding_dim, tlength, params.rotary_base);
1136
+ } else {
1137
+ mmha::apply_rotary_embedding(
1138
+ q, transpose_idx / tidx_factor, params.rotary_embedding_dim, tlength,
1139
+ params.rotary_cos + bi * params.rotary_embedding_dim / 2,
1140
+ params.rotary_sin + bi * params.rotary_embedding_dim / 2);
1141
+ }
1142
+ }
1143
+ mmha::write_smem_transpose(q, q_smem, transpose_idx, smem_pitch);
1144
+ }
1145
+
1146
+ __syncthreads();
1147
+
1148
+ if (do_rotary) {
1149
+ q = *reinterpret_cast<Qk_vec*>(q_smem + half_idx * smem_pitch + intra_half_idx);
1150
+ if (handle_kv) {
1151
+ k = *reinterpret_cast<Qk_vec*>(k_smem + half_idx * smem_pitch + intra_half_idx);
1152
+ }
1153
+ }
1154
+
1155
+ __syncthreads();
1156
+ }
1157
+
1158
+ if (!is_masked) {
1159
+ // Store the Q values to shared memory.
1160
+ *reinterpret_cast<Qk_vec*>(&q_smem[tidx * QK_VEC_SIZE]) = q;
1161
+
1162
+ // Store Dh values of k_bias into smem, since will need to add later
1163
+ // if params.timestep == 0
1164
+ if (DO_CROSS_ATTENTION && params.timestep == 0) {
1165
+ *reinterpret_cast<Qk_vec*>(&bias_smem[tidx * QK_VEC_SIZE]) = k_bias;
1166
+ }
1167
+
1168
+ // Write the K values to the global memory cache.
1169
+ //
1170
+ // NOTE: The stores are uncoalesced as we have multiple chunks of 16B spread across the memory
1171
+ // system. We designed it this way as it allows much better memory loads (and there are many
1172
+ // more loads) + the stores are really "write and forget" since we won't need the ack before
1173
+ // the end of the kernel. There's plenty of time for the transactions to complete.
1174
+
1175
+ // The 16B chunk written by the thread.
1176
+ int co = tidx / QK_VECS_IN_16B;
1177
+ // The position of the thread in that 16B chunk.
1178
+ int ci = tidx % QK_VECS_IN_16B * QK_VEC_SIZE;
1179
+
1180
+ // Two chunks are separated by L * x elements. A thread write QK_VEC_SIZE elements.
1181
+ int offset = bhi_kv * params.memory_max_len * Dh + co * params.memory_max_len * QK_ELTS_IN_16B +
1182
+ // params.timestep*QK_ELTS_IN_16B +
1183
+ tlength_circ * QK_ELTS_IN_16B + ci;
1184
+
1185
+ if (handle_kv && hi % params.num_heads_q_kv_ratio == 0) {
1186
+ // Trigger the stores to global memory.
1187
+ if (Dh == Dh_MAX || co < Dh / QK_ELTS_IN_16B) {
1188
+ *reinterpret_cast<Qk_vec*>(&params.k_cache[offset]) = k;
1189
+ }
1190
+ }
1191
+
1192
+ // Compute \sum_i Q[i] * K^T[i] for the current timestep.
1193
+ #ifdef MMHA_USE_FP32_ACUM_FOR_FMA
1194
+ using Qk_vec_acum = typename Qk_vec_acum_fp32_<Qk_vec>::Type;
1195
+ #else
1196
+ using Qk_vec_acum = Qk_vec;
1197
+ #endif
1198
+ qk = dot<Qk_vec_acum, Qk_vec>(q, k);
1199
+ if (QK_VECS_PER_WARP <= WARP_SIZE) {
1200
+ #pragma unroll
1201
+ for (int mask = QK_VECS_PER_WARP / 2; mask >= 1; mask /= 2) {
1202
+ qk += __shfl_xor_sync(shfl_mask(QK_VECS_PER_WARP), qk, mask);
1203
+ }
1204
+ }
1205
+ }
1206
+
1207
+ if (QK_VECS_PER_WARP > WARP_SIZE) {
1208
+ constexpr int WARPS_PER_RED = (QK_VECS_PER_WARP + WARP_SIZE - 1) / WARP_SIZE;
1209
+ qk = block_sum<WARPS_PER_RED>(&red_smem[WARPS_PER_RED], qk);
1210
+ }
1211
+
1212
+ // Store that value in shared memory. Keep the Q*K^T value in register for softmax.
1213
+ if (tidx == 0) {
1214
+ // Normalize qk.
1215
+ qk *= params.inv_sqrt_dh;
1216
+ if (params.relative_attention_bias != nullptr) {
1217
+ qk = add(qk,
1218
+ params.relative_attention_bias[hi * params.relative_attention_bias_stride
1219
+ * params.relative_attention_bias_stride
1220
+ + (tlength - padd_len) * params.relative_attention_bias_stride
1221
+ + (tlength - padd_len)]);
1222
+ }
1223
+ // We don't need to apply the linear position bias here since qi - ki = 0 yields the position bias 0.
1224
+
1225
+ qk_max = qk;
1226
+ qk_smem[tlength - first_step] = qk;
1227
+ // qk_smem[params.timestep] = qk;
1228
+ }
1229
+
1230
+ // Make sure the data is in shared memory.
1231
+ __syncthreads();
1232
+
1233
+ // The type of queries and keys for the math in the Q*K^T product.
1234
+ using K_vec = typename K_vec_<T, THREADS_PER_KEY>::Type;
1235
+ // The number of elements per vector.
1236
+ constexpr int K_VEC_SIZE = sizeof(K_vec) / sizeof(T);
1237
+ // Make sure the hidden size per head is a multiple of the vector size.
1238
+ static_assert(Dh_MAX % K_VEC_SIZE == 0, "");
1239
+ // The number of elements per thread.
1240
+ constexpr int K_ELTS_PER_THREAD = Dh_MAX / THREADS_PER_KEY;
1241
+ // The number of vectors per thread.
1242
+ constexpr int K_VECS_PER_THREAD = K_ELTS_PER_THREAD / K_VEC_SIZE;
1243
+
1244
+ // The position the first key loaded by each thread from the cache buffer (for this B * H).
1245
+ int ko = tidx / THREADS_PER_KEY;
1246
+ // The position of the thread in the chunk of keys.
1247
+ int ki = tidx % THREADS_PER_KEY * K_VEC_SIZE;
1248
+
1249
+ static_assert(Dh_MAX == THREADS_PER_KEY * K_VEC_SIZE * K_VECS_PER_THREAD);
1250
+
1251
+ // Load the Q values from shared memory. The values are reused during the loop on K.
1252
+ K_vec q_vec[K_VECS_PER_THREAD];
1253
+ #pragma unroll
1254
+ for (int ii = 0; ii < K_VECS_PER_THREAD; ++ii) {
1255
+ q_vec[ii] = *reinterpret_cast<const K_vec*>(&q_smem[ki + ii * THREADS_PER_KEY * K_VEC_SIZE]);
1256
+ }
1257
+
1258
+ K_vec k_bias_vec[DO_CROSS_ATTENTION ? K_VECS_PER_THREAD : 1];
1259
+ if (DO_CROSS_ATTENTION && params.timestep == 0) {
1260
+ #pragma unroll
1261
+ for (int ii = 0; ii < K_VECS_PER_THREAD; ++ii) {
1262
+ k_bias_vec[ii] = *reinterpret_cast<const K_vec*>(&bias_smem[ki + ii * THREADS_PER_KEY * K_VEC_SIZE]);
1263
+ }
1264
+ }
1265
+
1266
+ // The number of timesteps loaded per iteration.
1267
+ constexpr int K_PER_ITER = THREADS_PER_BLOCK / THREADS_PER_KEY;
1268
+ // The number of keys per warp.
1269
+ constexpr int K_PER_WARP = WARP_SIZE / THREADS_PER_KEY;
1270
+
1271
+ // The base pointer for the key in the cache buffer.
1272
+ T* k_cache = &params.k_cache[bhi_kv * params.memory_max_len * Dh + ki];
1273
+ // Base pointer for the beam's batch, before offsetting with indirection buffer
1274
+ T* k_cache_batch = &params.k_cache[bbhi * params.memory_max_len * Dh + ki];
1275
+
1276
+ // Pick a number of keys to make sure all the threads of a warp enter (due to shfl_sync).
1277
+ // int ti_end = div_up(params.timestep, K_PER_WARP) * K_PER_WARP;
1278
+ int ti_end = div_up(tlength - first_step, K_PER_WARP) * K_PER_WARP + first_step;
1279
+
1280
+ // prefix prompt length if has
1281
+ const int prefix_prompt_length = (params.prefix_prompt_lengths == nullptr) ? 0 : params.prefix_prompt_lengths[bi];
1282
+
1283
+ // Iterate over the keys/timesteps to compute the various (Q*K^T)_{ti} values.
1284
+ const bool has_beams = params.cache_indir != nullptr;
1285
+ const int* beam_indices = has_beams ? &params.cache_indir[bi_seq_len_offset] : nullptr;
1286
+
1287
+ for (int ti = first_step + ko; ti < ti_end; ti += K_PER_ITER) {
1288
+ const int ti_circ = ti % params.memory_max_len;
1289
+
1290
+ // The keys loaded from the key cache.
1291
+ K_vec k[K_VECS_PER_THREAD];
1292
+ K_vec k_vec_zero;
1293
+ zero(k_vec_zero);
1294
+ #pragma unroll
1295
+ for (int ii = 0; ii < K_VECS_PER_THREAD; ++ii) {
1296
+ int jj = ii * params.memory_max_len + ti_circ;
1297
+ // if( ti < params.timestep ) {
1298
+ const bool within_bounds = (Dh == Dh_MAX || jj * QK_ELTS_IN_16B < Dh * params.memory_max_len);
1299
+ if (ti < tlength) {
1300
+ if (!within_bounds) {
1301
+ k[ii] = k_vec_zero;
1302
+ }
1303
+ else {
1304
+ if (has_beams) {
1305
+ const int beam_offset = beam_indices[ti_circ] * params.num_heads * params.memory_max_len * Dh;
1306
+ k[ii] = *reinterpret_cast<const K_vec*>(&k_cache_batch[beam_offset + jj * QK_ELTS_IN_16B]);
1307
+ }
1308
+ else {
1309
+ k[ii] = *reinterpret_cast<const K_vec*>(&k_cache_batch[jj * QK_ELTS_IN_16B]);
1310
+ }
1311
+ }
1312
+ // add bias and update k_cache
1313
+ if (DO_CROSS_ATTENTION && params.timestep == 0) {
1314
+ k[ii] = add(k[ii], k_bias_vec[ii]);
1315
+
1316
+ if (do_ia3) {
1317
+ k[ii] = mul<K_vec, K_vec, K_vec>(
1318
+ k[ii],
1319
+ *reinterpret_cast<const K_vec*>(
1320
+ &params.ia3_key_weights[(ia3_task_id * params.num_heads + hi) * Dh + ki
1321
+ + ii * THREADS_PER_KEY * K_VEC_SIZE]));
1322
+ }
1323
+
1324
+ if (Dh == Dh_MAX || jj * QK_ELTS_IN_16B < Dh * params.memory_max_len) {
1325
+ *reinterpret_cast<K_vec*>(&k_cache[jj * QK_ELTS_IN_16B]) = k[ii];
1326
+ }
1327
+ }
1328
+ }
1329
+ }
1330
+
1331
+ // Perform the dot product and normalize qk.
1332
+ //
1333
+ // WARNING: ALL THE THREADS OF A WARP MUST ENTER!!!
1334
+ float qk = Qk_dot<T, THREADS_PER_KEY>::dot(q_vec, k) * params.inv_sqrt_dh;
1335
+ bool is_mask = (params.masked_tokens != nullptr) && params.masked_tokens[bi_seq_len_offset + ti];
1336
+
1337
+ // Store the product to shared memory. There's one qk value per timestep. Update the max.
1338
+ // if( ti < params.timestep && tidx % THREADS_PER_KEY == 0 ) {
1339
+ if (ti < tlength && tidx % THREADS_PER_KEY == 0) {
1340
+ if (params.relative_attention_bias != nullptr) {
1341
+ qk = add(qk,
1342
+ params.relative_attention_bias[hi * params.relative_attention_bias_stride
1343
+ * params.relative_attention_bias_stride
1344
+ + tlength * params.relative_attention_bias_stride + ti]);
1345
+ }
1346
+ if (params.linear_bias_slopes != nullptr) {
1347
+ // Apply the linear position bias: (ki - qi) * slope[hi].
1348
+ // The padding token locates between the input context and the generated tokens.
1349
+ // We need to remove the number of padding tokens in the distance computation.
1350
+ // ti : 0 1 2 3 4 5 6 7 8 9(tlength)
1351
+ // token: i i i i p p p o o o where i=input, p=pad, o=output.
1352
+ // e.g. ti = 2, dist = (9 - 3) - 2 = 4.
1353
+ int max_context_length = params.max_prefix_prompt_length + params.max_input_length;
1354
+ float dist = (ti < max_context_length ? ti + padd_len : ti) - tlength;
1355
+
1356
+ qk += mul<float, T, float>(params.linear_bias_slopes[hi], dist);
1357
+ }
1358
+ qk_max = is_mask ? qk_max : fmaxf(qk_max, qk);
1359
+ qk_smem[ti - first_step] = qk;
1360
+ }
1361
+ }
1362
+
1363
+ // Perform the final reduction to compute the max inside each warp.
1364
+ //
1365
+ // NOTE: In a group of THREADS_PER_KEY threads, the leader already has the max value for the
1366
+ // group so it's not needed to run the reduction inside the group (again).
1367
+ #pragma unroll
1368
+ for (int mask = WARP_SIZE / 2; mask >= THREADS_PER_KEY; mask /= 2) {
1369
+ qk_max = fmaxf(qk_max, __shfl_xor_sync(uint32_t(-1), qk_max, mask));
1370
+ }
1371
+
1372
+ // Decompose the thread index into warp and lane.
1373
+ const int warp = tidx / WARP_SIZE;
1374
+ const int lane = tidx % WARP_SIZE;
1375
+
1376
+ // The warp leader writes the max to shared memory.
1377
+ if (lane == 0) {
1378
+ red_smem[warp] = qk_max;
1379
+ }
1380
+
1381
+ // Make sure the products are in shared memory.
1382
+ __syncthreads();
1383
+
1384
+ // The warps finalize the reduction.
1385
+ qk_max = lane < WARPS_PER_BLOCK ? red_smem[lane] : -FLT_MAX;
1386
+ #pragma unroll
1387
+ for (int mask = WARPS_PER_BLOCK / 2; mask >= 1; mask /= 2) {
1388
+ qk_max = fmaxf(qk_max, __shfl_xor_sync(uint32_t(-1), qk_max, mask));
1389
+ }
1390
+
1391
+ // Broadcast to all the threads in the warp.
1392
+ qk_max = __shfl_sync(uint32_t(-1), qk_max, 0);
1393
+
1394
+ // Compute the logits and start the sum.
1395
+ float sum = 0.f;
1396
+ // for( int ti = tidx; ti <= params.timestep; ti += THREADS_PER_BLOCK ) {
1397
+ for (int ti = first_step + tidx; ti <= tlength; ti += THREADS_PER_BLOCK) {
1398
+ bool is_mask = (params.masked_tokens != nullptr) && params.masked_tokens[bi_seq_len_offset + ti];
1399
+ float logit = is_mask ? 0.f : __expf(qk_smem[ti - first_step] - qk_max);
1400
+ sum += logit;
1401
+ qk_smem[ti - first_step] = logit;
1402
+ }
1403
+
1404
+ // Compute the sum.
1405
+ sum = block_sum<WARPS_PER_BLOCK>(&red_smem[WARPS_PER_BLOCK], sum);
1406
+
1407
+ // Normalize the logits.
1408
+ float inv_sum = __fdividef(1.f, sum + 1.e-6f);
1409
+ // for( int ti = tidx; ti <= params.timestep; ti += THREADS_PER_BLOCK ) {
1410
+ const size_t cross_attention_out_offset =
1411
+ params.is_return_cross_attentions ?
1412
+ bhi * params.max_decoder_seq_len * params.memory_max_len + params.timestep * params.memory_max_len :
1413
+ 0;
1414
+ for (int ti = first_step + tidx; ti <= tlength; ti += THREADS_PER_BLOCK) {
1415
+ float logit = qk_smem[ti - first_step] * inv_sum;
1416
+ if (params.is_return_cross_attentions) {
1417
+ params.cross_attention_out[cross_attention_out_offset + ti] = logit;
1418
+ }
1419
+ convert_from_float(logits_smem[ti - first_step], logit);
1420
+ }
1421
+
1422
+ // Put Values part below so we leverage __syncthreads
1423
+ // from the previous step
1424
+
1425
+ // The number of elements per vector.
1426
+ constexpr int V_VEC_SIZE = Dh_MAX / THREADS_PER_VALUE;
1427
+ // A vector of V elements for the current timestep.
1428
+ using V_vec = typename V_vec_<T, V_VEC_SIZE>::Type;
1429
+
1430
+ // The value computed by this thread.
1431
+ int vo = tidx / THREADS_PER_VALUE;
1432
+ // The hidden dimensions computed by this particular thread.
1433
+ int vi = tidx % THREADS_PER_VALUE * V_VEC_SIZE;
1434
+
1435
+ // The base pointer for the value in the cache buffer.
1436
+ T* v_cache = &params.v_cache[bhi_kv * params.memory_max_len * Dh + vi];
1437
+ // Base pointer for the beam's batch, before offsetting with indirection buffer
1438
+ T* v_cache_batch = &params.v_cache[bbhi * params.memory_max_len * Dh + vi];
1439
+
1440
+ // The number of values processed per iteration of the loop.
1441
+ constexpr int V_PER_ITER = THREADS_PER_BLOCK / THREADS_PER_VALUE;
1442
+
1443
+ // One group of threads computes the product(s) for the current timestep.
1444
+ V_vec v_bias;
1445
+ zero(v_bias);
1446
+ // if( vo == params.timestep % V_PER_ITER ) {
1447
+ if (Dh == Dh_MAX || vi < Dh) {
1448
+ if (handle_kv) {
1449
+ if (vo == tlength % V_PER_ITER) {
1450
+ // Trigger the loads from the V bias buffer.
1451
+ if (params.v_bias != nullptr) {
1452
+ v_bias = *reinterpret_cast<const V_vec*>(&params.v_bias[hi_kv * Dh + vi]);
1453
+ }
1454
+ if (DO_CROSS_ATTENTION) {
1455
+ *reinterpret_cast<V_vec*>(&bias_smem[vi]) = v_bias;
1456
+ }
1457
+ }
1458
+ }
1459
+ }
1460
+
1461
+ // From previous, before values, step
1462
+ // Also make sure the logits are in shared memory.
1463
+ __syncthreads();
1464
+
1465
+ // Values continued
1466
+ #ifdef MMHA_USE_FP32_ACUM_FOR_OUT
1467
+ using V_vec_acum = typename V_vec_acum_fp32_<V_vec>::Type;
1468
+ #else
1469
+ using V_vec_acum = V_vec;
1470
+ #endif
1471
+ // The partial outputs computed by each thread.
1472
+ V_vec_acum out;
1473
+ zero(out);
1474
+
1475
+ // Loop over the timesteps to compute the partial outputs.
1476
+ // for( int ti = vo; ti < params.timestep; ti += V_PER_ITER ) {
1477
+ if (Dh == Dh_MAX || vi < Dh) {
1478
+ for (int ti = first_step + vo; ti < tlength; ti += V_PER_ITER) {
1479
+ const int ti_circ = ti % params.memory_max_len;
1480
+
1481
+ // Fetch offset based on cache_indir when beam sampling
1482
+ const int beam_src = (params.cache_indir != nullptr) ? params.cache_indir[bi_seq_len_offset + ti_circ] : 0;
1483
+ const int beam_offset = beam_src * params.num_heads * params.memory_max_len * Dh;
1484
+ // Load the values from the cache.
1485
+ V_vec v = *reinterpret_cast<const V_vec*>(&v_cache_batch[beam_offset + ti_circ * Dh]);
1486
+ if (DO_CROSS_ATTENTION && params.timestep == 0) {
1487
+ v = add(v, *reinterpret_cast<V_vec*>(&bias_smem[vi]));
1488
+ if (do_ia3) {
1489
+ v = mul<V_vec, V_vec, V_vec>(
1490
+ v,
1491
+ *reinterpret_cast<const V_vec*>(
1492
+ &params.ia3_value_weights[(ia3_task_id * params.num_heads + hi) * Dh + vi]));
1493
+ }
1494
+ *reinterpret_cast<V_vec*>(&v_cache[ti * Dh]) = v;
1495
+ }
1496
+ // Load the logits from shared memory.
1497
+ #if defined(MMHA_USE_FP32_ACUM_FOR_LOGITS)
1498
+ float logit = logits_smem[ti - first_step];
1499
+ out = fma(logit, cast_to_float(v), out);
1500
+ #else
1501
+ T logit = logits_smem[ti - first_step];
1502
+
1503
+ // Update the partial sums.
1504
+ out = fma(logit, v, out);
1505
+ #endif
1506
+ }
1507
+ }
1508
+
1509
+ // One group of threads computes the product(s) for the current timestep.
1510
+ // if( vo == params.timestep % V_PER_ITER ) {
1511
+ if (vo == tlength % V_PER_ITER && (Dh == Dh_MAX || vi < Dh)) {
1512
+
1513
+ V_vec v;
1514
+ if (DO_CROSS_ATTENTION) {
1515
+ v = *reinterpret_cast<const V_vec*>(&v_cache[tlength * Dh]);
1516
+ }
1517
+ else {
1518
+ // Trigger the loads from the V buffer.
1519
+ const auto v_offset = v_base_offset + vi;
1520
+ if (params.int8_mode == 2) {
1521
+ using Packed_Int8_t = typename packed_type<int8_t, num_elems<V_vec>::value>::type;
1522
+ using Packed_Float_t = typename packed_type<float, num_elems<V_vec>::value>::type;
1523
+ const auto v_scaling = params.qkv_scale_out[2];
1524
+ const auto v_quant =
1525
+ *reinterpret_cast<const Packed_Int8_t*>(&reinterpret_cast<const int8_t*>(params.v)[v_offset]);
1526
+
1527
+ convert_from_float(v, mul<Packed_Float_t, float>(v_scaling, float_from_int8(v_quant)));
1528
+ }
1529
+ else {
1530
+ v = *reinterpret_cast<const V_vec*>(&params.v[v_offset]);
1531
+ }
1532
+ // Trigger the loads from the V bias buffer.
1533
+ // V_vec v_bias = *reinterpret_cast<const V_vec*>(&params.v_bias[hi*Dh + vi]);
1534
+ }
1535
+
1536
+ // Compute the V values with bias.
1537
+ if (handle_kv) {
1538
+ v = add(v, v_bias);
1539
+
1540
+ if (do_ia3) {
1541
+ v = mul<V_vec, V_vec, V_vec>(
1542
+ v,
1543
+ *reinterpret_cast<const V_vec*>(
1544
+ &params.ia3_value_weights[(ia3_task_id * params.num_heads + hi) * Dh + vi]));
1545
+ }
1546
+
1547
+ // Store the values with bias back to global memory in the cache for V.
1548
+ if (hi % params.num_heads_q_kv_ratio == 0) {
1549
+ //*reinterpret_cast<V_vec*>(&v_cache[params.timestep*Dh]) = v;
1550
+ *reinterpret_cast<V_vec*>(&v_cache[tlength_circ * Dh]) = v;
1551
+ }
1552
+ }
1553
+
1554
+ // Initialize the output value with the current timestep.
1555
+ #if defined(MMHA_USE_FP32_ACUM_FOR_LOGITS)
1556
+ // out = fma(logits_smem[params.timestep], cast_to_float(v), out);
1557
+ out = fma(logits_smem[tlength - first_step], cast_to_float(v), out);
1558
+ #else
1559
+ // out = fma(logits_smem[params.timestep], v, out);
1560
+ out = fma(logits_smem[tlength - first_step], v, out);
1561
+ #endif
1562
+ }
1563
+
1564
+ // Make sure we can start writing to shared memory.
1565
+ __syncthreads();
1566
+
1567
+ // Run the final reduction amongst the different groups computing different partial outputs.
1568
+ if (Dh == Dh_MAX || vi < Dh) {
1569
+ #pragma unroll
1570
+ for (int active_groups = V_PER_ITER; active_groups >= 2; active_groups /= 2) {
1571
+
1572
+ // The midpoint in the number of active groups.
1573
+ int midpoint = active_groups / 2;
1574
+
1575
+ // The upper part of active threads store to shared memory.
1576
+ if (vo >= midpoint && vo < active_groups && (Dh == Dh_MAX || vi < Dh)) {
1577
+ #ifdef MMHA_USE_FP32_ACUM_FOR_OUT
1578
+ convert_from_float(*reinterpret_cast<V_vec*>(&out_smem[(vo - midpoint) * Dh + vi]), out);
1579
+ #else
1580
+ *reinterpret_cast<V_vec*>(&out_smem[(vo - midpoint) * Dh + vi]) = out;
1581
+ #endif
1582
+ }
1583
+ __syncthreads();
1584
+
1585
+ // The bottom warps update their values.
1586
+ if (vo < midpoint && (Dh == Dh_MAX || vi < Dh)) {
1587
+ out = add(*reinterpret_cast<const V_vec*>(&out_smem[vo * Dh + vi]), out);
1588
+ }
1589
+ __syncthreads();
1590
+ }
1591
+ }
1592
+
1593
+ // Output the final values.
1594
+ if (vo == 0 && (Dh == Dh_MAX || vi < Dh)) {
1595
+ #ifdef MMHA_USE_FP32_ACUM_FOR_OUT
1596
+ if (params.int8_mode == 2) {
1597
+ using Packed_Int8_t = typename packed_type<int8_t, num_elems<V_vec_acum>::value>::type;
1598
+ out = mul<V_vec_acum, float>(*params.attention_out_scale, out);
1599
+ *reinterpret_cast<Packed_Int8_t*>(&(reinterpret_cast<int8_t*>(params.out)[bhi * Dh + vi])) =
1600
+ cast_to_int8(out);
1601
+ }
1602
+ else {
1603
+ convert_from_float(*reinterpret_cast<V_vec*>(&params.out[bhi * Dh + vi]), out);
1604
+ }
1605
+ #else
1606
+ // TODO: support int8_mode?
1607
+ *reinterpret_cast<V_vec*>(&params.out[bhi * Dh + vi]) = out;
1608
+ #endif
1609
+ }
1610
+ }
1611
+
1612
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
1613
+
1614
+ } // namespace mmha
1615
+
1616
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
1617
+
1618
+ template<typename T, int Dh, int Dh_MAX, typename KERNEL_PARAMS_TYPE>
1619
+ void mmha_launch_kernel(const KERNEL_PARAMS_TYPE& params, const cudaStream_t& stream);
flash-attention/csrc/ft_attention/decoder_masked_multihead_attention_utils.h ADDED
@@ -0,0 +1,2017 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Downloaded from from FasterTransformer v5.2.1
2
+ // https://github.com/NVIDIA/FasterTransformer/blob/release/v5.2.1_tag/src/fastertransformer/kernels/decoder_masked_multihead_attention_utils.h
3
+ /*
4
+ * Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
5
+ *
6
+ * Licensed under the Apache License, Version 2.0 (the "License");
7
+ * you may not use this file except in compliance with the License.
8
+ * You may obtain a copy of the License at
9
+ *
10
+ * http://www.apache.org/licenses/LICENSE-2.0
11
+ *
12
+ * Unless required by applicable law or agreed to in writing, software
13
+ * distributed under the License is distributed on an "AS IS" BASIS,
14
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ * See the License for the specific language governing permissions and
16
+ * limitations under the License.
17
+ */
18
+
19
+ #pragma once
20
+
21
+ #include "cuda_bf16_wrapper.h"
22
+ #include "cuda_bf16_fallbacks.cuh"
23
+ #include <stdint.h>
24
+
25
+ using namespace fastertransformer;
26
+
27
+ namespace mmha {
28
+
29
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
30
+
31
+ struct Float8_ {
32
+ float2 x;
33
+ float2 y;
34
+ float2 z;
35
+ float2 w;
36
+ };
37
+
38
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
39
+
40
+ struct Float4_ {
41
+ float2 x;
42
+ float2 y;
43
+ };
44
+
45
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
46
+
47
+ #ifdef ENABLE_BF16
48
+ struct bf16_4_t {
49
+ __nv_bfloat162 x;
50
+ __nv_bfloat162 y;
51
+ };
52
+
53
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
54
+
55
+ struct bf16_8_t {
56
+ __nv_bfloat162 x;
57
+ __nv_bfloat162 y;
58
+ __nv_bfloat162 z;
59
+ __nv_bfloat162 w;
60
+ };
61
+ #endif
62
+
63
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
64
+
65
+ template<typename T>
66
+ struct num_elems;
67
+ template<>
68
+ struct num_elems<float> {
69
+ static constexpr int value = 1;
70
+ };
71
+ template<>
72
+ struct num_elems<float2> {
73
+ static constexpr int value = 2;
74
+ };
75
+ template<>
76
+ struct num_elems<float4> {
77
+ static constexpr int value = 4;
78
+ };
79
+ template<>
80
+ struct num_elems<Float4_> {
81
+ static constexpr int value = 4;
82
+ };
83
+ template<>
84
+ struct num_elems<Float8_> {
85
+ static constexpr int value = 8;
86
+ };
87
+
88
+ template<>
89
+ struct num_elems<uint32_t> {
90
+ static constexpr int value = 2;
91
+ };
92
+ template<>
93
+ struct num_elems<uint2> {
94
+ static constexpr int value = 4;
95
+ };
96
+ template<>
97
+ struct num_elems<uint4> {
98
+ static constexpr int value = 8;
99
+ };
100
+
101
+ #ifdef ENABLE_BF16
102
+ template<>
103
+ struct num_elems<__nv_bfloat162> {
104
+ static constexpr int value = 2;
105
+ };
106
+ template<>
107
+ struct num_elems<bf16_4_t> {
108
+ static constexpr int value = 4;
109
+ };
110
+ template<>
111
+ struct num_elems<bf16_8_t> {
112
+ static constexpr int value = 8;
113
+ };
114
+ #endif
115
+
116
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
117
+
118
+ template<typename T, int N>
119
+ struct packed_type;
120
+ template<typename T>
121
+ struct packed_type<T, 1> {
122
+ using type = T;
123
+ };
124
+ template<>
125
+ struct packed_type<int8_t, 2> {
126
+ using type = int16_t;
127
+ };
128
+ template<>
129
+ struct packed_type<int8_t, 4> {
130
+ using type = int32_t;
131
+ };
132
+ template<>
133
+ struct packed_type<int8_t, 8> {
134
+ using type = int64_t;
135
+ };
136
+
137
+ template<>
138
+ struct packed_type<float, 2> {
139
+ using type = float2;
140
+ };
141
+ template<>
142
+ struct packed_type<float, 4> {
143
+ using type = float4;
144
+ };
145
+ template<>
146
+ struct packed_type<float, 8> {
147
+ using type = Float8_;
148
+ };
149
+
150
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
151
+
152
+ inline __device__ float add(float a, float b)
153
+ {
154
+ return a + b;
155
+ }
156
+
157
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
158
+
159
+ inline __device__ float2 add(float2 a, float2 b)
160
+ {
161
+ float2 c;
162
+ c.x = add(a.x, b.x);
163
+ c.y = add(a.y, b.y);
164
+ return c;
165
+ }
166
+
167
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
168
+
169
+ inline __device__ float4 add(float4 a, float4 b)
170
+ {
171
+ float4 c;
172
+ c.x = add(a.x, b.x);
173
+ c.y = add(a.y, b.y);
174
+ c.z = add(a.z, b.z);
175
+ c.w = add(a.w, b.w);
176
+ return c;
177
+ }
178
+
179
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
180
+
181
+ #ifdef ENABLE_BF16
182
+ inline __device__ __nv_bfloat16 add(__nv_bfloat16 a, __nv_bfloat16 b)
183
+ {
184
+ return a + b;
185
+ }
186
+
187
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
188
+
189
+ inline __device__ __nv_bfloat162 add(__nv_bfloat162 a, __nv_bfloat162 b)
190
+ {
191
+ return bf16hadd2(a, b);
192
+ }
193
+
194
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
195
+
196
+ inline __device__ bf16_4_t add(bf16_4_t a, bf16_4_t b)
197
+ {
198
+ bf16_4_t c;
199
+ c.x = add(a.x, b.x);
200
+ c.y = add(a.y, b.y);
201
+ return c;
202
+ }
203
+
204
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
205
+
206
+ inline __device__ bf16_8_t add(bf16_8_t a, bf16_8_t b)
207
+ {
208
+ bf16_8_t c;
209
+ c.x = add(a.x, b.x);
210
+ c.y = add(a.y, b.y);
211
+ c.z = add(a.z, b.z);
212
+ c.w = add(a.w, b.w);
213
+ return c;
214
+ }
215
+ #endif // ENABLE_BF16
216
+
217
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
218
+
219
+ inline __device__ uint16_t add(uint16_t a, uint16_t b)
220
+ {
221
+ uint16_t c;
222
+ asm volatile("add.f16 %0, %1, %2;\n" : "=h"(c) : "h"(a), "h"(b));
223
+ return c;
224
+ }
225
+
226
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
227
+
228
+ inline __device__ uint32_t add(uint32_t a, uint32_t b)
229
+ {
230
+ uint32_t c;
231
+ asm volatile("add.f16x2 %0, %1, %2;\n" : "=r"(c) : "r"(a), "r"(b));
232
+ return c;
233
+ }
234
+
235
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
236
+
237
+ inline __device__ uint2 add(uint2 a, uint2 b)
238
+ {
239
+ uint2 c;
240
+ c.x = add(a.x, b.x);
241
+ c.y = add(a.y, b.y);
242
+ return c;
243
+ }
244
+
245
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
246
+
247
+ inline __device__ uint4 add(uint4 a, uint4 b)
248
+ {
249
+ uint4 c;
250
+ c.x = add(a.x, b.x);
251
+ c.y = add(a.y, b.y);
252
+ c.z = add(a.z, b.z);
253
+ c.w = add(a.w, b.w);
254
+ return c;
255
+ }
256
+
257
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
258
+
259
+ inline __device__ uint16_t float_to_half(float f)
260
+ {
261
+ union {
262
+ uint32_t u32;
263
+ uint16_t u16[2];
264
+ } tmp;
265
+ #if 0 && defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800 // Is it better?
266
+ float zero = 0.f;
267
+ asm volatile("cvt.rn.f16x2.f32 %0, %1, %2;\n" : "=r"(tmp.u32) : "f"(zero), "f"(f));
268
+ #else
269
+ asm volatile("cvt.rn.f16.f32 %0, %1;\n" : "=h"(tmp.u16[0]) : "f"(f));
270
+ #endif
271
+ return tmp.u16[0];
272
+ }
273
+
274
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
275
+
276
+ inline __device__ uint32_t float2_to_half2(float2 f)
277
+ {
278
+ union {
279
+ uint32_t u32;
280
+ uint16_t u16[2];
281
+ } tmp;
282
+ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800
283
+ asm volatile("cvt.rn.f16x2.f32 %0, %1, %2;\n" : "=r"(tmp.u32) : "f"(f.y), "f"(f.x));
284
+ #else
285
+ asm volatile("cvt.rn.f16.f32 %0, %1;\n" : "=h"(tmp.u16[0]) : "f"(f.x));
286
+ asm volatile("cvt.rn.f16.f32 %0, %1;\n" : "=h"(tmp.u16[1]) : "f"(f.y));
287
+ #endif
288
+ return tmp.u32;
289
+ }
290
+
291
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
292
+
293
+ inline __device__ float half_to_float(uint16_t h)
294
+ {
295
+ float f;
296
+ asm volatile("cvt.f32.f16 %0, %1;\n" : "=f"(f) : "h"(h));
297
+ return f;
298
+ }
299
+
300
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
301
+
302
+ inline __device__ float2 half2_to_float2(uint32_t v)
303
+ {
304
+ uint16_t lo, hi;
305
+ asm volatile("mov.b32 {%0, %1}, %2;\n" : "=h"(lo), "=h"(hi) : "r"(v));
306
+ return make_float2(half_to_float(lo), half_to_float(hi));
307
+ }
308
+
309
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
310
+
311
+ inline __device__ float add(float a, uint16_t b)
312
+ {
313
+ return a + half_to_float(b);
314
+ }
315
+
316
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
317
+
318
+ #ifdef ENABLE_BF16
319
+ inline __device__ float add(float a, __nv_bfloat16 b)
320
+ {
321
+ return a + __bfloat162float(b);
322
+ }
323
+ #endif
324
+
325
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
326
+
327
+ inline __device__ float2 add(uint32_t a, float2 fb)
328
+ {
329
+ float2 fa = half2_to_float2(a);
330
+ return add(fa, fb);
331
+ }
332
+
333
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
334
+
335
+ inline __device__ Float4_ add(uint2 a, Float4_ fb)
336
+ {
337
+ Float4_ fc;
338
+ fc.x = add(a.x, fb.x);
339
+ fc.y = add(a.y, fb.y);
340
+ return fc;
341
+ }
342
+
343
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
344
+
345
+ inline __device__ Float8_ add(uint4 a, Float8_ fb)
346
+ {
347
+ Float8_ fc;
348
+ fc.x = add(a.x, fb.x);
349
+ fc.y = add(a.y, fb.y);
350
+ fc.z = add(a.z, fb.z);
351
+ fc.w = add(a.w, fb.w);
352
+ return fc;
353
+ }
354
+
355
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
356
+
357
+ inline __device__ uint32_t h0_h0(uint16_t a)
358
+ {
359
+ uint32_t b;
360
+ asm volatile("mov.b32 %0, {%1, %1};" : "=r"(b) : "h"(a));
361
+ return b;
362
+ }
363
+
364
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
365
+
366
+ inline __device__ float fma(float a, float b, float c)
367
+ {
368
+ return a * b + c;
369
+ }
370
+
371
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
372
+
373
+ inline __device__ float2 fma(float2 a, float2 b, float2 c)
374
+ {
375
+ float2 d;
376
+ d.x = fma(a.x, b.x, c.x);
377
+ d.y = fma(a.y, b.y, c.y);
378
+ return d;
379
+ }
380
+
381
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
382
+
383
+ inline __device__ float2 fma(float a, float2 b, float2 c)
384
+ {
385
+ float2 d;
386
+ d.x = fma(a, b.x, c.x);
387
+ d.y = fma(a, b.y, c.y);
388
+ return d;
389
+ }
390
+
391
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
392
+
393
+ inline __device__ float4 fma(float4 a, float4 b, float4 c)
394
+ {
395
+ float4 d;
396
+ d.x = fma(a.x, b.x, c.x);
397
+ d.y = fma(a.y, b.y, c.y);
398
+ d.z = fma(a.z, b.z, c.z);
399
+ d.w = fma(a.w, b.w, c.w);
400
+ return d;
401
+ }
402
+
403
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
404
+
405
+ inline __device__ float4 fma(float a, float4 b, float4 c)
406
+ {
407
+ float4 d;
408
+ d.x = fma(a, b.x, c.x);
409
+ d.y = fma(a, b.y, c.y);
410
+ d.z = fma(a, b.z, c.z);
411
+ d.w = fma(a, b.w, c.w);
412
+ return d;
413
+ }
414
+
415
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
416
+
417
+ inline __device__ Float4_ fma(float a, Float4_ b, Float4_ c)
418
+ {
419
+ Float4_ d;
420
+ d.x = fma(a, b.x, c.x);
421
+ d.y = fma(a, b.y, c.y);
422
+ return d;
423
+ }
424
+
425
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
426
+
427
+ inline __device__ Float8_ fma(float a, Float8_ b, Float8_ c)
428
+ {
429
+ Float8_ d;
430
+ d.x = fma(a, b.x, c.x);
431
+ d.y = fma(a, b.y, c.y);
432
+ d.z = fma(a, b.z, c.z);
433
+ d.w = fma(a, b.w, c.w);
434
+ return d;
435
+ }
436
+
437
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
438
+
439
+ #ifdef ENABLE_BF16
440
+ inline __device__ float2 add(__nv_bfloat162 a, float2 fb)
441
+ {
442
+ float2 fa = bf1622float2(a);
443
+ return add(fa, fb);
444
+ }
445
+
446
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
447
+
448
+ inline __device__ Float4_ add(bf16_4_t a, Float4_ fb)
449
+ {
450
+ Float4_ fc;
451
+ fc.x = add(a.x, fb.x);
452
+ fc.y = add(a.y, fb.y);
453
+ return fc;
454
+ }
455
+
456
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
457
+
458
+ inline __device__ Float8_ add(bf16_8_t a, Float8_ fb)
459
+ {
460
+ Float8_ fc;
461
+ fc.x = add(a.x, fb.x);
462
+ fc.y = add(a.y, fb.y);
463
+ fc.z = add(a.z, fb.z);
464
+ fc.w = add(a.w, fb.w);
465
+ return fc;
466
+ }
467
+ #endif // ENABLE_BF16
468
+
469
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
470
+
471
+ inline __device__ uint32_t fma(uint32_t a, uint32_t b, uint32_t c)
472
+ {
473
+ uint32_t d;
474
+ asm volatile("fma.rn.f16x2 %0, %1, %2, %3;\n" : "=r"(d) : "r"(a), "r"(b), "r"(c));
475
+ return d;
476
+ }
477
+
478
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
479
+
480
+ inline __device__ uint32_t fma(uint16_t a, uint32_t b, uint32_t c)
481
+ {
482
+ return fma(h0_h0(a), b, c);
483
+ }
484
+
485
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
486
+
487
+ inline __device__ uint2 fma(uint2 a, uint2 b, uint2 c)
488
+ {
489
+ uint2 d;
490
+ d.x = fma(a.x, b.x, c.x);
491
+ d.y = fma(a.y, b.y, c.y);
492
+ return d;
493
+ }
494
+
495
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
496
+
497
+ inline __device__ uint2 fma(uint16_t a, uint2 b, uint2 c)
498
+ {
499
+ uint32_t s = h0_h0(a);
500
+ uint2 d;
501
+ d.x = fma(s, b.x, c.x);
502
+ d.y = fma(s, b.y, c.y);
503
+ return d;
504
+ }
505
+
506
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
507
+
508
+ inline __device__ uint4 fma(uint4 a, uint4 b, uint4 c)
509
+ {
510
+ uint4 d;
511
+ d.x = fma(a.x, b.x, c.x);
512
+ d.y = fma(a.y, b.y, c.y);
513
+ d.z = fma(a.z, b.z, c.z);
514
+ d.w = fma(a.w, b.w, c.w);
515
+ return d;
516
+ }
517
+
518
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
519
+
520
+ inline __device__ uint4 fma(uint16_t a, uint4 b, uint4 c)
521
+ {
522
+ uint32_t s = h0_h0(a);
523
+ uint4 d;
524
+ d.x = fma(s, b.x, c.x);
525
+ d.y = fma(s, b.y, c.y);
526
+ d.z = fma(s, b.z, c.z);
527
+ d.w = fma(s, b.w, c.w);
528
+ return d;
529
+ }
530
+
531
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
532
+
533
+ inline __device__ float fma(uint16_t a, uint16_t b, float fc)
534
+ {
535
+ float fa = half_to_float(a);
536
+ float fb = half_to_float(b);
537
+ return fa * fb + fc;
538
+ }
539
+
540
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
541
+
542
+ inline __device__ float2 fma(uint32_t a, uint32_t b, float2 fc)
543
+ {
544
+ float2 fa = half2_to_float2(a);
545
+ float2 fb = half2_to_float2(b);
546
+ return fma(fa, fb, fc);
547
+ }
548
+
549
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
550
+
551
+ inline __device__ float2 fma(uint16_t a, uint32_t b, float2 fc)
552
+ {
553
+ return fma(h0_h0(a), b, fc);
554
+ }
555
+
556
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
557
+
558
+ inline __device__ Float4_ fma(uint2 a, uint2 b, Float4_ fc)
559
+ {
560
+ Float4_ fd;
561
+ fd.x = fma(a.x, b.x, fc.x);
562
+ fd.y = fma(a.y, b.y, fc.y);
563
+ return fd;
564
+ }
565
+
566
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
567
+
568
+ inline __device__ Float4_ fma(uint16_t a, uint2 b, Float4_ fc)
569
+ {
570
+ uint32_t s = h0_h0(a);
571
+ Float4_ fd;
572
+ fd.x = fma(s, b.x, fc.x);
573
+ fd.y = fma(s, b.y, fc.y);
574
+ return fd;
575
+ }
576
+
577
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
578
+
579
+ inline __device__ Float8_ fma(uint4 a, uint4 b, Float8_ fc)
580
+ {
581
+ Float8_ fd;
582
+ fd.x = fma(a.x, b.x, fc.x);
583
+ fd.y = fma(a.y, b.y, fc.y);
584
+ fd.z = fma(a.z, b.z, fc.z);
585
+ fd.w = fma(a.w, b.w, fc.w);
586
+ return fd;
587
+ }
588
+
589
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
590
+
591
+ inline __device__ Float8_ fma(uint16_t a, uint4 b, Float8_ fc)
592
+ {
593
+ uint32_t s = h0_h0(a);
594
+ Float8_ fd;
595
+ fd.x = fma(s, b.x, fc.x);
596
+ fd.y = fma(s, b.y, fc.y);
597
+ fd.z = fma(s, b.z, fc.z);
598
+ fd.w = fma(s, b.w, fc.w);
599
+ return fd;
600
+ }
601
+
602
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
603
+ #ifdef ENABLE_BF16
604
+ inline __device__ __nv_bfloat162 fma(__nv_bfloat162 a, __nv_bfloat162 b, __nv_bfloat162 c)
605
+ {
606
+ return bf16hfma2(a, b, c);
607
+ }
608
+
609
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
610
+
611
+ inline __device__ __nv_bfloat162 fma(__nv_bfloat16 a, __nv_bfloat162 b, __nv_bfloat162 c)
612
+ {
613
+ return bf16hfma2(bf162bf162(a), b, c);
614
+ }
615
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
616
+
617
+ inline __device__ bf16_4_t fma(bf16_4_t a, bf16_4_t b, bf16_4_t c)
618
+ {
619
+ bf16_4_t d;
620
+ d.x = fma(a.x, b.x, c.x);
621
+ d.y = fma(a.y, b.y, c.y);
622
+ return d;
623
+ }
624
+
625
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
626
+
627
+ inline __device__ bf16_4_t fma(__nv_bfloat16 a, bf16_4_t b, bf16_4_t c)
628
+ {
629
+ __nv_bfloat162 s = bf162bf162(a);
630
+ bf16_4_t d;
631
+ d.x = fma(s, b.x, c.x);
632
+ d.y = fma(s, b.y, c.y);
633
+ return d;
634
+ }
635
+
636
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
637
+
638
+ inline __device__ bf16_8_t fma(bf16_8_t a, bf16_8_t b, bf16_8_t c)
639
+ {
640
+ bf16_8_t d;
641
+ d.x = fma(a.x, b.x, c.x);
642
+ d.y = fma(a.y, b.y, c.y);
643
+ d.z = fma(a.z, b.z, c.z);
644
+ d.w = fma(a.w, b.w, c.w);
645
+ return d;
646
+ }
647
+
648
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
649
+
650
+ inline __device__ bf16_8_t fma(__nv_bfloat16 a, bf16_8_t b, bf16_8_t c)
651
+ {
652
+ __nv_bfloat162 s = bf162bf162(a);
653
+ bf16_8_t d;
654
+ d.x = fma(s, b.x, c.x);
655
+ d.y = fma(s, b.y, c.y);
656
+ d.z = fma(s, b.z, c.z);
657
+ d.w = fma(s, b.w, c.w);
658
+ return d;
659
+ }
660
+
661
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
662
+
663
+ inline __device__ float fma(__nv_bfloat16 a, __nv_bfloat16 b, float fc)
664
+ {
665
+ return __bfloat162float(a) * __bfloat162float(b) + fc;
666
+ }
667
+
668
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
669
+
670
+ inline __device__ float2 fma(__nv_bfloat162 a, __nv_bfloat162 b, float2 fc)
671
+ {
672
+ float2 fa = bf1622float2(a);
673
+ float2 fb = bf1622float2(b);
674
+ return fma(fa, fb, fc);
675
+ }
676
+
677
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
678
+
679
+ inline __device__ float2 fma(__nv_bfloat16 a, __nv_bfloat162 b, float2 fc)
680
+ {
681
+ return fma(bf162bf162(a), b, fc);
682
+ }
683
+
684
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
685
+
686
+ inline __device__ Float4_ fma(bf16_4_t a, bf16_4_t b, Float4_ fc)
687
+ {
688
+ Float4_ fd;
689
+ fd.x = fma(a.x, b.x, fc.x);
690
+ fd.y = fma(a.y, b.y, fc.y);
691
+ return fd;
692
+ }
693
+
694
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
695
+
696
+ inline __device__ Float4_ fma(__nv_bfloat16 a, bf16_4_t b, Float4_ fc)
697
+ {
698
+ __nv_bfloat162 s = bf162bf162(a);
699
+ Float4_ fd;
700
+ fd.x = fma(s, b.x, fc.x);
701
+ fd.y = fma(s, b.y, fc.y);
702
+ return fd;
703
+ }
704
+
705
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
706
+
707
+ inline __device__ Float8_ fma(bf16_8_t a, bf16_8_t b, Float8_ fc)
708
+ {
709
+ Float8_ fd;
710
+ fd.x = fma(a.x, b.x, fc.x);
711
+ fd.y = fma(a.y, b.y, fc.y);
712
+ fd.z = fma(a.z, b.z, fc.z);
713
+ fd.w = fma(a.w, b.w, fc.w);
714
+ return fd;
715
+ }
716
+
717
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
718
+
719
+ inline __device__ Float8_ fma(__nv_bfloat16 a, bf16_8_t b, Float8_ fc)
720
+ {
721
+ __nv_bfloat162 s = bf162bf162(a);
722
+ Float8_ fd;
723
+ fd.x = fma(s, b.x, fc.x);
724
+ fd.y = fma(s, b.y, fc.y);
725
+ fd.z = fma(s, b.z, fc.z);
726
+ fd.w = fma(s, b.w, fc.w);
727
+ return fd;
728
+ }
729
+ #endif // ENABLE_BF16
730
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
731
+
732
+ template<typename Acc, typename A, typename B>
733
+ inline __device__ Acc mul(A a, B b)
734
+ {
735
+ return a * b;
736
+ }
737
+
738
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
739
+
740
+ template<>
741
+ inline __device__ float mul<float, float>(float a, float b)
742
+ {
743
+ return a * b;
744
+ }
745
+
746
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
747
+
748
+ template<>
749
+ inline __device__ float2 mul(float2 a, float2 b)
750
+ {
751
+ float2 c;
752
+ c.x = a.x * b.x;
753
+ c.y = a.y * b.y;
754
+ return c;
755
+ }
756
+
757
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
758
+
759
+ template<>
760
+ inline __device__ float2 mul(float a, float2 b)
761
+ {
762
+ float2 c;
763
+ c.x = a * b.x;
764
+ c.y = a * b.y;
765
+ return c;
766
+ }
767
+
768
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
769
+
770
+ template<>
771
+ inline __device__ float4 mul(float4 a, float4 b)
772
+ {
773
+ float4 c;
774
+ c.x = a.x * b.x;
775
+ c.y = a.y * b.y;
776
+ c.z = a.z * b.z;
777
+ c.w = a.w * b.w;
778
+ return c;
779
+ }
780
+
781
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
782
+
783
+ template<>
784
+ inline __device__ float4 mul(float a, float4 b)
785
+ {
786
+ float4 c;
787
+ c.x = a * b.x;
788
+ c.y = a * b.y;
789
+ c.z = a * b.z;
790
+ c.w = a * b.w;
791
+ return c;
792
+ }
793
+
794
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
795
+
796
+ template<>
797
+ inline __device__ Float8_ mul(float a, Float8_ b)
798
+ {
799
+ Float8_ c;
800
+ c.x = make_float2(a * b.x.x, a * b.x.y);
801
+ c.y = make_float2(a * b.y.x, a * b.y.y);
802
+ c.z = make_float2(a * b.z.x, a * b.z.y);
803
+ c.w = make_float2(a * b.w.x, a * b.w.y);
804
+ return c;
805
+ }
806
+
807
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
808
+
809
+ template<>
810
+ inline __device__ uint16_t mul(uint16_t a, uint16_t b)
811
+ {
812
+ uint16_t c;
813
+ asm volatile("mul.f16 %0, %1, %2;\n" : "=h"(c) : "h"(a), "h"(b));
814
+ return c;
815
+ }
816
+
817
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
818
+
819
+ template<>
820
+ inline __device__ uint32_t mul(uint32_t a, uint32_t b)
821
+ {
822
+ uint32_t c;
823
+ asm volatile("mul.f16x2 %0, %1, %2;\n" : "=r"(c) : "r"(a), "r"(b));
824
+ return c;
825
+ }
826
+
827
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
828
+
829
+ template<>
830
+ inline __device__ uint32_t mul(uint16_t a, uint32_t b)
831
+ {
832
+ return mul<uint32_t, uint32_t, uint32_t>(h0_h0(a), b);
833
+ }
834
+
835
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
836
+
837
+ template<>
838
+ inline __device__ uint2 mul(uint2 a, uint2 b)
839
+ {
840
+ uint2 c;
841
+ c.x = mul<uint32_t, uint32_t, uint32_t>(a.x, b.x);
842
+ c.y = mul<uint32_t, uint32_t, uint32_t>(a.y, b.y);
843
+ return c;
844
+ }
845
+
846
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
847
+
848
+ template<>
849
+ inline __device__ uint2 mul(uint16_t a, uint2 b)
850
+ {
851
+ uint32_t s = h0_h0(a);
852
+ uint2 c;
853
+ c.x = mul<uint32_t, uint32_t, uint32_t>(s, b.x);
854
+ c.y = mul<uint32_t, uint32_t, uint32_t>(s, b.y);
855
+ return c;
856
+ }
857
+
858
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
859
+
860
+ template<>
861
+ inline __device__ uint4 mul(uint4 a, uint4 b)
862
+ {
863
+ uint4 c;
864
+ c.x = mul<uint32_t, uint32_t, uint32_t>(a.x, b.x);
865
+ c.y = mul<uint32_t, uint32_t, uint32_t>(a.y, b.y);
866
+ c.z = mul<uint32_t, uint32_t, uint32_t>(a.z, b.z);
867
+ c.w = mul<uint32_t, uint32_t, uint32_t>(a.w, b.w);
868
+ return c;
869
+ }
870
+
871
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
872
+
873
+ template<>
874
+ inline __device__ uint4 mul(uint16_t a, uint4 b)
875
+ {
876
+ uint32_t s = h0_h0(a);
877
+ uint4 c;
878
+ c.x = mul<uint32_t, uint32_t, uint32_t>(s, b.x);
879
+ c.y = mul<uint32_t, uint32_t, uint32_t>(s, b.y);
880
+ c.z = mul<uint32_t, uint32_t, uint32_t>(s, b.z);
881
+ c.w = mul<uint32_t, uint32_t, uint32_t>(s, b.w);
882
+ return c;
883
+ }
884
+
885
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
886
+
887
+ template<>
888
+ inline __device__ float mul(uint16_t a, uint16_t b)
889
+ {
890
+ float fa = half_to_float(a);
891
+ float fb = half_to_float(b);
892
+ return fa * fb;
893
+ }
894
+
895
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
896
+
897
+ template<>
898
+ inline __device__ float mul(uint16_t a, float b)
899
+ {
900
+ return half_to_float(a) * b;
901
+ }
902
+
903
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
904
+
905
+ template<>
906
+ inline __device__ float2 mul(uint32_t a, uint32_t b)
907
+ {
908
+ float2 fa = half2_to_float2(a);
909
+ float2 fb = half2_to_float2(b);
910
+ return mul<float2, float2, float2>(fa, fb);
911
+ }
912
+
913
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
914
+
915
+ template<>
916
+ inline __device__ float2 mul(uint16_t a, uint32_t b)
917
+ {
918
+ return mul<float2, uint32_t, uint32_t>(h0_h0(a), b);
919
+ }
920
+
921
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
922
+
923
+ template<>
924
+ inline __device__ Float4_ mul(uint2 a, uint2 b)
925
+ {
926
+ Float4_ fc;
927
+ fc.x = mul<float2, uint32_t, uint32_t>(a.x, b.x);
928
+ fc.y = mul<float2, uint32_t, uint32_t>(a.y, b.y);
929
+ return fc;
930
+ }
931
+
932
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
933
+
934
+ template<>
935
+ inline __device__ Float4_ mul(uint16_t a, uint2 b)
936
+ {
937
+ uint32_t s = h0_h0(a);
938
+ Float4_ fc;
939
+ fc.x = mul<float2, uint32_t, uint32_t>(s, b.x);
940
+ fc.y = mul<float2, uint32_t, uint32_t>(s, b.y);
941
+ return fc;
942
+ }
943
+
944
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
945
+
946
+ template<>
947
+ inline __device__ Float8_ mul(uint4 a, uint4 b)
948
+ {
949
+ Float8_ fc;
950
+ fc.x = mul<float2, uint32_t, uint32_t>(a.x, b.x);
951
+ fc.y = mul<float2, uint32_t, uint32_t>(a.y, b.y);
952
+ fc.z = mul<float2, uint32_t, uint32_t>(a.z, b.z);
953
+ fc.w = mul<float2, uint32_t, uint32_t>(a.w, b.w);
954
+ return fc;
955
+ }
956
+
957
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
958
+
959
+ template<>
960
+ inline __device__ Float8_ mul(uint16_t a, uint4 b)
961
+ {
962
+ uint32_t s = h0_h0(a);
963
+ Float8_ fc;
964
+ fc.x = mul<float2, uint32_t, uint32_t>(s, b.x);
965
+ fc.y = mul<float2, uint32_t, uint32_t>(s, b.y);
966
+ fc.z = mul<float2, uint32_t, uint32_t>(s, b.z);
967
+ fc.w = mul<float2, uint32_t, uint32_t>(s, b.w);
968
+ return fc;
969
+ }
970
+
971
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
972
+
973
+ #ifdef ENABLE_BF16
974
+ template<>
975
+ inline __device__ __nv_bfloat16 mul(__nv_bfloat16 a, __nv_bfloat16 b)
976
+ {
977
+ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800
978
+ return __hmul(a, b);
979
+ #else
980
+ return bf16hmul(a, b);
981
+ #endif
982
+ }
983
+
984
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
985
+
986
+ template<>
987
+ inline __device__ __nv_bfloat162 mul(__nv_bfloat162 a, __nv_bfloat162 b)
988
+ {
989
+ return bf16hmul2(a, b);
990
+ }
991
+
992
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
993
+
994
+ template<>
995
+ inline __device__ __nv_bfloat162 mul(__nv_bfloat16 a, __nv_bfloat162 b)
996
+ {
997
+ return mul<__nv_bfloat162, __nv_bfloat162, __nv_bfloat162>(bf162bf162(a), b);
998
+ }
999
+
1000
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
1001
+
1002
+ template<>
1003
+ inline __device__ bf16_4_t mul(bf16_4_t a, bf16_4_t b)
1004
+ {
1005
+ bf16_4_t c;
1006
+ c.x = mul<__nv_bfloat162, __nv_bfloat162, __nv_bfloat162>(a.x, b.x);
1007
+ c.y = mul<__nv_bfloat162, __nv_bfloat162, __nv_bfloat162>(a.y, b.y);
1008
+ return c;
1009
+ }
1010
+
1011
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
1012
+
1013
+ template<>
1014
+ inline __device__ bf16_4_t mul(__nv_bfloat16 a, bf16_4_t b)
1015
+ {
1016
+ __nv_bfloat162 s = bf162bf162(a);
1017
+ bf16_4_t c;
1018
+ c.x = mul<__nv_bfloat162, __nv_bfloat162, __nv_bfloat162>(s, b.x);
1019
+ c.y = mul<__nv_bfloat162, __nv_bfloat162, __nv_bfloat162>(s, b.y);
1020
+ return c;
1021
+ }
1022
+
1023
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
1024
+
1025
+ template<>
1026
+ inline __device__ bf16_8_t mul(bf16_8_t a, bf16_8_t b)
1027
+ {
1028
+ bf16_8_t c;
1029
+ c.x = mul<__nv_bfloat162, __nv_bfloat162, __nv_bfloat162>(a.x, b.x);
1030
+ c.y = mul<__nv_bfloat162, __nv_bfloat162, __nv_bfloat162>(a.y, b.y);
1031
+ c.z = mul<__nv_bfloat162, __nv_bfloat162, __nv_bfloat162>(a.z, b.z);
1032
+ c.w = mul<__nv_bfloat162, __nv_bfloat162, __nv_bfloat162>(a.w, b.w);
1033
+ return c;
1034
+ }
1035
+
1036
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
1037
+
1038
+ template<>
1039
+ inline __device__ bf16_8_t mul(__nv_bfloat16 a, bf16_8_t b)
1040
+ {
1041
+ __nv_bfloat162 s = bf162bf162(a);
1042
+ bf16_8_t c;
1043
+ c.x = mul<__nv_bfloat162, __nv_bfloat162, __nv_bfloat162>(s, b.x);
1044
+ c.y = mul<__nv_bfloat162, __nv_bfloat162, __nv_bfloat162>(s, b.y);
1045
+ c.z = mul<__nv_bfloat162, __nv_bfloat162, __nv_bfloat162>(s, b.z);
1046
+ c.w = mul<__nv_bfloat162, __nv_bfloat162, __nv_bfloat162>(s, b.w);
1047
+ return c;
1048
+ }
1049
+
1050
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
1051
+
1052
+ template<>
1053
+ inline __device__ float mul(__nv_bfloat16 a, __nv_bfloat16 b)
1054
+ {
1055
+ float fa = (float)a;
1056
+ float fb = (float)b;
1057
+ return fa * fb;
1058
+ }
1059
+
1060
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
1061
+
1062
+ template<>
1063
+ inline __device__ float mul(__nv_bfloat16 a, float b)
1064
+ {
1065
+ return __bfloat162float(a) * b;
1066
+ }
1067
+
1068
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
1069
+
1070
+ template<>
1071
+ inline __device__ float2 mul(__nv_bfloat162 a, __nv_bfloat162 b)
1072
+ {
1073
+ float2 fa = bf1622float2(a);
1074
+ float2 fb = bf1622float2(b);
1075
+ return mul<float2, float2, float2>(fa, fb);
1076
+ }
1077
+
1078
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
1079
+
1080
+ template<>
1081
+ inline __device__ float2 mul(__nv_bfloat16 a, __nv_bfloat162 b)
1082
+ {
1083
+ return mul<float2, __nv_bfloat162, __nv_bfloat162>(bf162bf162(a), b);
1084
+ }
1085
+
1086
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
1087
+
1088
+ template<>
1089
+ inline __device__ Float4_ mul(bf16_4_t a, bf16_4_t b)
1090
+ {
1091
+ Float4_ fc;
1092
+ fc.x = mul<float2, __nv_bfloat162, __nv_bfloat162>(a.x, b.x);
1093
+ fc.y = mul<float2, __nv_bfloat162, __nv_bfloat162>(a.y, b.y);
1094
+ return fc;
1095
+ }
1096
+
1097
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
1098
+
1099
+ template<>
1100
+ inline __device__ Float4_ mul(__nv_bfloat16 a, bf16_4_t b)
1101
+ {
1102
+ __nv_bfloat162 s = bf162bf162(a);
1103
+ Float4_ fc;
1104
+ fc.x = mul<float2, __nv_bfloat162, __nv_bfloat162>(s, b.x);
1105
+ fc.y = mul<float2, __nv_bfloat162, __nv_bfloat162>(s, b.y);
1106
+ return fc;
1107
+ }
1108
+
1109
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
1110
+
1111
+ template<>
1112
+ inline __device__ Float8_ mul(bf16_8_t a, bf16_8_t b)
1113
+ {
1114
+ Float8_ fc;
1115
+ fc.x = mul<float2, __nv_bfloat162, __nv_bfloat162>(a.x, b.x);
1116
+ fc.y = mul<float2, __nv_bfloat162, __nv_bfloat162>(a.y, b.y);
1117
+ fc.z = mul<float2, __nv_bfloat162, __nv_bfloat162>(a.z, b.z);
1118
+ fc.w = mul<float2, __nv_bfloat162, __nv_bfloat162>(a.w, b.w);
1119
+ return fc;
1120
+ }
1121
+
1122
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
1123
+
1124
+ template<>
1125
+ inline __device__ Float8_ mul(__nv_bfloat16 a, bf16_8_t b)
1126
+ {
1127
+ __nv_bfloat162 s = bf162bf162(a);
1128
+ Float8_ fc;
1129
+ fc.x = mul<float2, __nv_bfloat162, __nv_bfloat162>(s, b.x);
1130
+ fc.y = mul<float2, __nv_bfloat162, __nv_bfloat162>(s, b.y);
1131
+ fc.z = mul<float2, __nv_bfloat162, __nv_bfloat162>(s, b.z);
1132
+ fc.w = mul<float2, __nv_bfloat162, __nv_bfloat162>(s, b.w);
1133
+ return fc;
1134
+ }
1135
+ #endif // ENABLE_BF16
1136
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
1137
+
1138
+ inline __device__ float sum(float v)
1139
+ {
1140
+ return v;
1141
+ }
1142
+
1143
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
1144
+
1145
+ inline __device__ float sum(float2 v)
1146
+ {
1147
+ return v.x + v.y;
1148
+ }
1149
+
1150
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
1151
+
1152
+ inline __device__ float sum(float4 v)
1153
+ {
1154
+ return v.x + v.y + v.z + v.w;
1155
+ }
1156
+
1157
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
1158
+
1159
+ #ifdef ENABLE_BF16
1160
+ inline __device__ float sum(__nv_bfloat162 v)
1161
+ {
1162
+ float2 vf = bf1622float2(v);
1163
+ return vf.x + vf.y;
1164
+ }
1165
+
1166
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
1167
+
1168
+ inline __device__ float sum(bf16_4_t v)
1169
+ {
1170
+ return sum(v.x) + sum(v.y);
1171
+ }
1172
+
1173
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
1174
+
1175
+ inline __device__ float sum(bf16_8_t v)
1176
+ {
1177
+ return sum(v.x) + sum(v.y) + sum(v.z) + sum(v.w);
1178
+ }
1179
+ #endif // ENABLE_BF16
1180
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
1181
+
1182
+ inline __device__ float sum(uint16_t v)
1183
+ {
1184
+ return half_to_float(v);
1185
+ }
1186
+
1187
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
1188
+
1189
+ inline __device__ float sum(uint32_t v)
1190
+ {
1191
+ float2 tmp = half2_to_float2(v);
1192
+ return tmp.x + tmp.y;
1193
+ }
1194
+
1195
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
1196
+
1197
+ inline __device__ float sum(uint2 v)
1198
+ {
1199
+ uint32_t c = add(v.x, v.y);
1200
+ return sum(c);
1201
+ }
1202
+
1203
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
1204
+
1205
+ inline __device__ float sum(uint4 v)
1206
+ {
1207
+ #if 1
1208
+ uint32_t c = add(v.x, v.y);
1209
+ c = add(c, v.z);
1210
+ c = add(c, v.w);
1211
+ #else
1212
+ uint32_t c = add(v.x, v.y);
1213
+ uint32_t d = add(v.z, v.w);
1214
+ c = add(c, d);
1215
+ #endif
1216
+ return sum(c);
1217
+ }
1218
+
1219
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
1220
+
1221
+ inline __device__ float sum(Float4_ v)
1222
+ {
1223
+ return v.x.x + v.x.y + v.y.x + v.y.y;
1224
+ }
1225
+
1226
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
1227
+
1228
+ inline __device__ float sum(Float8_ v)
1229
+ {
1230
+ return v.x.x + v.x.y + v.y.x + v.y.y + v.z.x + v.z.y + v.w.x + v.w.y;
1231
+ }
1232
+
1233
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
1234
+
1235
+ template<typename T>
1236
+ inline __device__ float dot(T a, T b)
1237
+ {
1238
+ return sum(mul<T, T, T>(a, b));
1239
+ }
1240
+
1241
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
1242
+
1243
+ template<typename A, typename T>
1244
+ inline __device__ float dot(T a, T b)
1245
+ {
1246
+ return sum(mul<A, T, T>(a, b));
1247
+ }
1248
+
1249
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
1250
+
1251
+ inline __device__ void zero(uint16_t& dst)
1252
+ {
1253
+ dst = uint16_t(0);
1254
+ }
1255
+
1256
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
1257
+
1258
+ template<typename T>
1259
+ inline __device__ void zero(T& dst)
1260
+ {
1261
+ constexpr int WORDS = sizeof(T) / 4;
1262
+ union {
1263
+ T raw;
1264
+ uint32_t words[WORDS];
1265
+ } tmp;
1266
+ #pragma unroll
1267
+ for (int ii = 0; ii < WORDS; ++ii) {
1268
+ tmp.words[ii] = 0u;
1269
+ }
1270
+ dst = tmp.raw;
1271
+ }
1272
+
1273
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
1274
+
1275
+ inline __device__ float2 rotary_embedding_coefficient(const int zid, const int rot_embed_dim, const int t_step, const float base)
1276
+ {
1277
+ const float pos_idx_inv_freq = t_step / pow(base, zid / (float)rot_embed_dim);
1278
+ return {cos(pos_idx_inv_freq), sin(pos_idx_inv_freq)};
1279
+ }
1280
+
1281
+ inline __device__ float2 rotary_embedding_transform(const float2 v, const float2 coef)
1282
+ {
1283
+ float2 rot_v;
1284
+ rot_v.x = coef.x * v.x - coef.y * v.y;
1285
+ rot_v.y = coef.x * v.y + coef.y * v.x;
1286
+ return rot_v;
1287
+ }
1288
+
1289
+ inline __device__ uint32_t rotary_embedding_transform(const uint32_t v, const float2 coef)
1290
+ {
1291
+ float2 fv = half2_to_float2(v);
1292
+ float2 rot_fv = rotary_embedding_transform(fv, coef);
1293
+ return float2_to_half2(rot_fv);
1294
+ }
1295
+
1296
+ #ifdef ENABLE_BF16
1297
+ inline __device__ __nv_bfloat162 rotary_embedding_transform(const __nv_bfloat162 v, const float2 coef)
1298
+ {
1299
+ float2 fv = bf1622float2(v);
1300
+ float2 rot_fv = rotary_embedding_transform(fv, coef);
1301
+ return __floats2bfloat162_rn(rot_fv.x, rot_fv.y);
1302
+ }
1303
+ #endif
1304
+
1305
+ inline __device__ void apply_rotary_embedding(float& q, int zid, int rot_embed_dim, int t_step, const float base=10000.0f)
1306
+ {
1307
+ return;
1308
+ }
1309
+
1310
+ inline __device__ void apply_rotary_embedding(float& q, float& k, int zid, int rot_embed_dim, int t_step, const float base=10000.0f)
1311
+ {
1312
+ return;
1313
+ }
1314
+
1315
+ inline __device__ void apply_rotary_embedding(float2& q, int tid, int rot_embed_dim, int t_step, const float base=10000.0f)
1316
+ {
1317
+ if (2 * tid >= rot_embed_dim) {
1318
+ return;
1319
+ }
1320
+ const auto coef = rotary_embedding_coefficient(2 * tid, rot_embed_dim, t_step, base);
1321
+ q = rotary_embedding_transform(q, coef);
1322
+ }
1323
+
1324
+ inline __device__ void apply_rotary_embedding(float2& q, float2& k, int tid, int rot_embed_dim, int t_step, const float base=10000.0f)
1325
+ {
1326
+ if (2 * tid >= rot_embed_dim) {
1327
+ return;
1328
+ }
1329
+ const auto coef = rotary_embedding_coefficient(2 * tid, rot_embed_dim, t_step, base);
1330
+ q = rotary_embedding_transform(q, coef);
1331
+ k = rotary_embedding_transform(k, coef);
1332
+ }
1333
+
1334
+ inline __device__ void apply_rotary_embedding(float4& q, int tid, int rot_embed_dim, int t_step, const float base=10000.0f)
1335
+ {
1336
+ if (4 * tid >= rot_embed_dim) {
1337
+ return;
1338
+ }
1339
+
1340
+ Float4_& q_ = *reinterpret_cast<Float4_*>(&q);
1341
+ const auto coef0 = rotary_embedding_coefficient(4 * tid, rot_embed_dim, t_step, base);
1342
+ q_.x = rotary_embedding_transform(q_.x, coef0);
1343
+ const auto coef1 = rotary_embedding_coefficient(4 * tid + 2, rot_embed_dim, t_step, base);
1344
+ q_.y = rotary_embedding_transform(q_.y, coef1);
1345
+ }
1346
+
1347
+ inline __device__ void apply_rotary_embedding(float4& q, float4& k, int tid, int rot_embed_dim, int t_step, const float base=10000.0f)
1348
+ {
1349
+ if (4 * tid >= rot_embed_dim) {
1350
+ return;
1351
+ }
1352
+
1353
+ Float4_& q_ = *reinterpret_cast<Float4_*>(&q);
1354
+ Float4_& k_ = *reinterpret_cast<Float4_*>(&k);
1355
+ const auto coef0 = rotary_embedding_coefficient(4 * tid, rot_embed_dim, t_step, base);
1356
+ q_.x = rotary_embedding_transform(q_.x, coef0);
1357
+ k_.x = rotary_embedding_transform(k_.x, coef0);
1358
+ const auto coef1 = rotary_embedding_coefficient(4 * tid + 2, rot_embed_dim, t_step, base);
1359
+ q_.y = rotary_embedding_transform(q_.y, coef1);
1360
+ k_.y = rotary_embedding_transform(k_.y, coef1);
1361
+ }
1362
+
1363
+ inline __device__ void apply_rotary_embedding(uint32_t& q, int tid, int rot_embed_dim, int t_step, const float base=10000.0f)
1364
+ {
1365
+ if (2 * tid >= rot_embed_dim) {
1366
+ return;
1367
+ }
1368
+ const auto coef = rotary_embedding_coefficient(2 * tid, rot_embed_dim, t_step, base);
1369
+ q = rotary_embedding_transform(q, coef);
1370
+ }
1371
+
1372
+ inline __device__ void apply_rotary_embedding(uint32_t& q, uint32_t& k, int tid, int rot_embed_dim, int t_step, const float base=10000.0f)
1373
+ {
1374
+ if (2 * tid >= rot_embed_dim) {
1375
+ return;
1376
+ }
1377
+ const auto coef = rotary_embedding_coefficient(2 * tid, rot_embed_dim, t_step, base);
1378
+ q = rotary_embedding_transform(q, coef);
1379
+ k = rotary_embedding_transform(k, coef);
1380
+ }
1381
+
1382
+ inline __device__ void apply_rotary_embedding(uint2& q, int tid, int rot_embed_dim, int t_step, const float base=10000.0f)
1383
+ {
1384
+ if (4 * tid >= rot_embed_dim) {
1385
+ return;
1386
+ }
1387
+ const auto coef0 = rotary_embedding_coefficient(4 * tid, rot_embed_dim, t_step, base);
1388
+ q.x = rotary_embedding_transform(q.x, coef0);
1389
+ const auto coef1 = rotary_embedding_coefficient(4 * tid + 2, rot_embed_dim, t_step, base);
1390
+ q.y = rotary_embedding_transform(q.y, coef1);
1391
+ }
1392
+
1393
+ inline __device__ void apply_rotary_embedding(uint2& q, uint2& k, int tid, int rot_embed_dim, int t_step, const float base=10000.0f)
1394
+ {
1395
+ if (4 * tid >= rot_embed_dim) {
1396
+ return;
1397
+ }
1398
+ const auto coef0 = rotary_embedding_coefficient(4 * tid, rot_embed_dim, t_step, base);
1399
+ q.x = rotary_embedding_transform(q.x, coef0);
1400
+ k.x = rotary_embedding_transform(k.x, coef0);
1401
+ const auto coef1 = rotary_embedding_coefficient(4 * tid + 2, rot_embed_dim, t_step, base);
1402
+ q.y = rotary_embedding_transform(q.y, coef1);
1403
+ k.y = rotary_embedding_transform(k.y, coef1);
1404
+ }
1405
+
1406
+ inline __device__ void apply_rotary_embedding(uint4& q, int tid, int rot_embed_dim, int t_step, const float base=10000.0f)
1407
+ {
1408
+ if (8 * tid >= rot_embed_dim) {
1409
+ return;
1410
+ }
1411
+ const auto coef0 = rotary_embedding_coefficient(8 * tid, rot_embed_dim, t_step, base);
1412
+ q.x = rotary_embedding_transform(q.x, coef0);
1413
+ const auto coef1 = rotary_embedding_coefficient(8 * tid + 2, rot_embed_dim, t_step, base);
1414
+ q.y = rotary_embedding_transform(q.y, coef1);
1415
+ const auto coef2 = rotary_embedding_coefficient(8 * tid + 4, rot_embed_dim, t_step, base);
1416
+ q.z = rotary_embedding_transform(q.z, coef2);
1417
+ const auto coef3 = rotary_embedding_coefficient(8 * tid + 6, rot_embed_dim, t_step, base);
1418
+ q.w = rotary_embedding_transform(q.w, coef3);
1419
+ }
1420
+
1421
+ inline __device__ void apply_rotary_embedding(uint4& q, uint4& k, int tid, int rot_embed_dim, int t_step, const float base=10000.0f)
1422
+ {
1423
+ if (8 * tid >= rot_embed_dim) {
1424
+ return;
1425
+ }
1426
+ const auto coef0 = rotary_embedding_coefficient(8 * tid, rot_embed_dim, t_step, base);
1427
+ q.x = rotary_embedding_transform(q.x, coef0);
1428
+ k.x = rotary_embedding_transform(k.x, coef0);
1429
+ const auto coef1 = rotary_embedding_coefficient(8 * tid + 2, rot_embed_dim, t_step, base);
1430
+ q.y = rotary_embedding_transform(q.y, coef1);
1431
+ k.y = rotary_embedding_transform(k.y, coef1);
1432
+ const auto coef2 = rotary_embedding_coefficient(8 * tid + 4, rot_embed_dim, t_step, base);
1433
+ q.z = rotary_embedding_transform(q.z, coef2);
1434
+ k.z = rotary_embedding_transform(k.z, coef2);
1435
+ const auto coef3 = rotary_embedding_coefficient(8 * tid + 6, rot_embed_dim, t_step, base);
1436
+ q.w = rotary_embedding_transform(q.w, coef3);
1437
+ k.w = rotary_embedding_transform(k.w, coef3);
1438
+ }
1439
+
1440
+ #ifdef ENABLE_BF16
1441
+ inline __device__ void apply_rotary_embedding(__nv_bfloat162& q, int tid, int rot_embed_dim, int t_step, const float base=10000.0f)
1442
+ {
1443
+ if (2 * tid >= rot_embed_dim) {
1444
+ return;
1445
+ }
1446
+ const auto coef = rotary_embedding_coefficient(2 * tid, rot_embed_dim, t_step, base);
1447
+ q = rotary_embedding_transform(q, coef);
1448
+ }
1449
+
1450
+ inline __device__ void apply_rotary_embedding(__nv_bfloat162& q, __nv_bfloat162& k, int tid, int rot_embed_dim, int t_step, const float base=10000.0f)
1451
+ {
1452
+ if (2 * tid >= rot_embed_dim) {
1453
+ return;
1454
+ }
1455
+ const auto coef = rotary_embedding_coefficient(2 * tid, rot_embed_dim, t_step, base);
1456
+ q = rotary_embedding_transform(q, coef);
1457
+ k = rotary_embedding_transform(k, coef);
1458
+ }
1459
+
1460
+ inline __device__ void apply_rotary_embedding(bf16_4_t& q, int tid, int rot_embed_dim, int t_step, const float base=10000.0f)
1461
+ {
1462
+ if (4 * tid >= rot_embed_dim) {
1463
+ return;
1464
+ }
1465
+ const auto coef0 = rotary_embedding_coefficient(4 * tid, rot_embed_dim, t_step, base);
1466
+ q.x = rotary_embedding_transform(q.x, coef0);
1467
+ const auto coef1 = rotary_embedding_coefficient(4 * tid + 2, rot_embed_dim, t_step, base);
1468
+ q.y = rotary_embedding_transform(q.y, coef1);
1469
+ }
1470
+
1471
+ inline __device__ void apply_rotary_embedding(bf16_4_t& q, bf16_4_t& k, int tid, int rot_embed_dim, int t_step, const float base=10000.0f)
1472
+ {
1473
+ if (4 * tid >= rot_embed_dim) {
1474
+ return;
1475
+ }
1476
+ const auto coef0 = rotary_embedding_coefficient(4 * tid, rot_embed_dim, t_step, base);
1477
+ q.x = rotary_embedding_transform(q.x, coef0);
1478
+ k.x = rotary_embedding_transform(k.x, coef0);
1479
+ const auto coef1 = rotary_embedding_coefficient(4 * tid + 2, rot_embed_dim, t_step, base);
1480
+ q.y = rotary_embedding_transform(q.y, coef1);
1481
+ k.y = rotary_embedding_transform(k.y, coef1);
1482
+ }
1483
+
1484
+ inline __device__ void apply_rotary_embedding(bf16_8_t& q, int tid, int rot_embed_dim, int t_step, const float base=10000.0f)
1485
+ {
1486
+ if (8 * tid >= rot_embed_dim) {
1487
+ return;
1488
+ }
1489
+ const auto coef0 = rotary_embedding_coefficient(8 * tid, rot_embed_dim, t_step, base);
1490
+ q.x = rotary_embedding_transform(q.x, coef0);
1491
+ const auto coef1 = rotary_embedding_coefficient(8 * tid + 2, rot_embed_dim, t_step, base);
1492
+ q.y = rotary_embedding_transform(q.y, coef1);
1493
+ const auto coef2 = rotary_embedding_coefficient(8 * tid + 4, rot_embed_dim, t_step, base);
1494
+ q.z = rotary_embedding_transform(q.z, coef2);
1495
+ const auto coef3 = rotary_embedding_coefficient(8 * tid + 6, rot_embed_dim, t_step, base);
1496
+ q.w = rotary_embedding_transform(q.w, coef3);
1497
+ }
1498
+
1499
+ inline __device__ void apply_rotary_embedding(bf16_8_t& q, bf16_8_t& k, int tid, int rot_embed_dim, int t_step, const float base=10000.0f)
1500
+ {
1501
+ if (8 * tid >= rot_embed_dim) {
1502
+ return;
1503
+ }
1504
+ const auto coef0 = rotary_embedding_coefficient(8 * tid, rot_embed_dim, t_step, base);
1505
+ q.x = rotary_embedding_transform(q.x, coef0);
1506
+ k.x = rotary_embedding_transform(k.x, coef0);
1507
+ const auto coef1 = rotary_embedding_coefficient(8 * tid + 2, rot_embed_dim, t_step, base);
1508
+ q.y = rotary_embedding_transform(q.y, coef1);
1509
+ k.y = rotary_embedding_transform(k.y, coef1);
1510
+ const auto coef2 = rotary_embedding_coefficient(8 * tid + 4, rot_embed_dim, t_step, base);
1511
+ q.z = rotary_embedding_transform(q.z, coef2);
1512
+ k.z = rotary_embedding_transform(k.z, coef2);
1513
+ const auto coef3 = rotary_embedding_coefficient(8 * tid + 6, rot_embed_dim, t_step, base);
1514
+ q.w = rotary_embedding_transform(q.w, coef3);
1515
+ k.w = rotary_embedding_transform(k.w, coef3);
1516
+ }
1517
+ #endif // ENABLE_BF16
1518
+
1519
+ template <typename T>
1520
+ inline __device__ float2 rotary_embedding_coefficient(const int zid, const int t_step, const T* rotary_cos, const T* rotary_sin)
1521
+ {
1522
+ // zid is the index of the dimension (0, 2, 4, ..., rotary_dim).
1523
+ // rotary_cos/sin stores those at index 0, 1, 2, ..., rotary_dim / 2.
1524
+ return {float(rotary_cos[zid / 2]), float(rotary_sin[zid / 2])};
1525
+ }
1526
+
1527
+ // fp16 is special because we use uint16_t for reading the data, for backward compatibility.
1528
+ template <>
1529
+ inline __device__ float2 rotary_embedding_coefficient<uint16_t>(const int zid, const int t_step, const uint16_t* rotary_cos, const uint16_t* rotary_sin)
1530
+ {
1531
+ // zid is the index of the dimension (0, 2, 4, ..., rotary_dim).
1532
+ // rotary_cos/sin stores those at index 0, 1, 2, ..., rotary_dim / 2.
1533
+ return {float(reinterpret_cast<const __half*>(rotary_cos)[zid / 2]),
1534
+ float(reinterpret_cast<const __half*>(rotary_sin)[zid / 2])};
1535
+ }
1536
+
1537
+ inline __device__ void apply_rotary_embedding(float& q, int zid, int rot_embed_dim, int t_step, const float* rotary_cos, const float* rotary_sin)
1538
+ {
1539
+ return;
1540
+ }
1541
+
1542
+ inline __device__ void apply_rotary_embedding(float& q, float& k, int zid, int rot_embed_dim, int t_step, const float* rotary_cos, const float* rotary_sin)
1543
+ {
1544
+ return;
1545
+ }
1546
+
1547
+ inline __device__ void apply_rotary_embedding(float2& q, int tid, int rot_embed_dim, int t_step, const float* rotary_cos, const float* rotary_sin)
1548
+ {
1549
+ if (2 * tid >= rot_embed_dim) {
1550
+ return;
1551
+ }
1552
+ const auto coef = rotary_embedding_coefficient(2 * tid, t_step, rotary_cos, rotary_sin);
1553
+ q = rotary_embedding_transform(q, coef);
1554
+ }
1555
+
1556
+ inline __device__ void apply_rotary_embedding(float2& q, float2& k, int tid, int rot_embed_dim, int t_step, const float* rotary_cos, const float* rotary_sin)
1557
+ {
1558
+ if (2 * tid >= rot_embed_dim) {
1559
+ return;
1560
+ }
1561
+ const auto coef = rotary_embedding_coefficient(2 * tid, t_step, rotary_cos, rotary_sin);
1562
+ q = rotary_embedding_transform(q, coef);
1563
+ k = rotary_embedding_transform(k, coef);
1564
+ }
1565
+
1566
+ inline __device__ void apply_rotary_embedding(float4& q, int tid, int rot_embed_dim, int t_step, const float* rotary_cos, const float* rotary_sin)
1567
+ {
1568
+ if (4 * tid >= rot_embed_dim) {
1569
+ return;
1570
+ }
1571
+
1572
+ Float4_& q_ = *reinterpret_cast<Float4_*>(&q);
1573
+ const auto coef0 = rotary_embedding_coefficient(4 * tid, t_step, rotary_cos, rotary_sin);
1574
+ q_.x = rotary_embedding_transform(q_.x, coef0);
1575
+ const auto coef1 = rotary_embedding_coefficient(4 * tid + 2, t_step, rotary_cos, rotary_sin);
1576
+ q_.y = rotary_embedding_transform(q_.y, coef1);
1577
+ }
1578
+
1579
+ inline __device__ void apply_rotary_embedding(float4& q, float4& k, int tid, int rot_embed_dim, int t_step, const float* rotary_cos, const float* rotary_sin)
1580
+ {
1581
+ if (4 * tid >= rot_embed_dim) {
1582
+ return;
1583
+ }
1584
+
1585
+ Float4_& q_ = *reinterpret_cast<Float4_*>(&q);
1586
+ Float4_& k_ = *reinterpret_cast<Float4_*>(&k);
1587
+ const auto coef0 = rotary_embedding_coefficient(4 * tid, t_step, rotary_cos, rotary_sin);
1588
+ q_.x = rotary_embedding_transform(q_.x, coef0);
1589
+ k_.x = rotary_embedding_transform(k_.x, coef0);
1590
+ const auto coef1 = rotary_embedding_coefficient(4 * tid + 2, t_step, rotary_cos, rotary_sin);
1591
+ q_.y = rotary_embedding_transform(q_.y, coef1);
1592
+ k_.y = rotary_embedding_transform(k_.y, coef1);
1593
+ }
1594
+
1595
+ inline __device__ void apply_rotary_embedding(uint32_t& q, int tid, int rot_embed_dim, int t_step, const uint16_t* rotary_cos, const uint16_t* rotary_sin)
1596
+ {
1597
+ if (2 * tid >= rot_embed_dim) {
1598
+ return;
1599
+ }
1600
+ const auto coef = rotary_embedding_coefficient(2 * tid, t_step, rotary_cos, rotary_sin);
1601
+ q = rotary_embedding_transform(q, coef);
1602
+ }
1603
+
1604
+ inline __device__ void apply_rotary_embedding(uint32_t& q, uint32_t& k, int tid, int rot_embed_dim, int t_step, const uint16_t* rotary_cos, const uint16_t* rotary_sin)
1605
+ {
1606
+ if (2 * tid >= rot_embed_dim) {
1607
+ return;
1608
+ }
1609
+ const auto coef = rotary_embedding_coefficient(2 * tid, t_step, rotary_cos, rotary_sin);
1610
+ q = rotary_embedding_transform(q, coef);
1611
+ k = rotary_embedding_transform(k, coef);
1612
+ }
1613
+
1614
+ inline __device__ void apply_rotary_embedding(uint2& q, int tid, int rot_embed_dim, int t_step, const uint16_t* rotary_cos, const uint16_t* rotary_sin)
1615
+ {
1616
+ if (4 * tid >= rot_embed_dim) {
1617
+ return;
1618
+ }
1619
+ const auto coef0 = rotary_embedding_coefficient(4 * tid, t_step, rotary_cos, rotary_sin);
1620
+ q.x = rotary_embedding_transform(q.x, coef0);
1621
+ const auto coef1 = rotary_embedding_coefficient(4 * tid + 2, t_step, rotary_cos, rotary_sin);
1622
+ q.y = rotary_embedding_transform(q.y, coef1);
1623
+ }
1624
+
1625
+ inline __device__ void apply_rotary_embedding(uint2& q, uint2& k, int tid, int rot_embed_dim, int t_step, const uint16_t* rotary_cos, const uint16_t* rotary_sin)
1626
+ {
1627
+ if (4 * tid >= rot_embed_dim) {
1628
+ return;
1629
+ }
1630
+ const auto coef0 = rotary_embedding_coefficient(4 * tid, t_step, rotary_cos, rotary_sin);
1631
+ q.x = rotary_embedding_transform(q.x, coef0);
1632
+ k.x = rotary_embedding_transform(k.x, coef0);
1633
+ const auto coef1 = rotary_embedding_coefficient(4 * tid + 2, t_step, rotary_cos, rotary_sin);
1634
+ q.y = rotary_embedding_transform(q.y, coef1);
1635
+ k.y = rotary_embedding_transform(k.y, coef1);
1636
+ }
1637
+
1638
+ inline __device__ void apply_rotary_embedding(uint4& q, int tid, int rot_embed_dim, int t_step, const uint16_t* rotary_cos, const uint16_t* rotary_sin)
1639
+ {
1640
+ if (8 * tid >= rot_embed_dim) {
1641
+ return;
1642
+ }
1643
+ const auto coef0 = rotary_embedding_coefficient(8 * tid, t_step, rotary_cos, rotary_sin);
1644
+ q.x = rotary_embedding_transform(q.x, coef0);
1645
+ const auto coef1 = rotary_embedding_coefficient(8 * tid + 2, t_step, rotary_cos, rotary_sin);
1646
+ q.y = rotary_embedding_transform(q.y, coef1);
1647
+ const auto coef2 = rotary_embedding_coefficient(8 * tid + 4, t_step, rotary_cos, rotary_sin);
1648
+ q.z = rotary_embedding_transform(q.z, coef2);
1649
+ const auto coef3 = rotary_embedding_coefficient(8 * tid + 6, t_step, rotary_cos, rotary_sin);
1650
+ q.w = rotary_embedding_transform(q.w, coef3);
1651
+ }
1652
+
1653
+ inline __device__ void apply_rotary_embedding(uint4& q, uint4& k, int tid, int rot_embed_dim, int t_step, const uint16_t* rotary_cos, const uint16_t* rotary_sin)
1654
+ {
1655
+ if (8 * tid >= rot_embed_dim) {
1656
+ return;
1657
+ }
1658
+ const auto coef0 = rotary_embedding_coefficient(8 * tid, t_step, rotary_cos, rotary_sin);
1659
+ q.x = rotary_embedding_transform(q.x, coef0);
1660
+ k.x = rotary_embedding_transform(k.x, coef0);
1661
+ const auto coef1 = rotary_embedding_coefficient(8 * tid + 2, t_step, rotary_cos, rotary_sin);
1662
+ q.y = rotary_embedding_transform(q.y, coef1);
1663
+ k.y = rotary_embedding_transform(k.y, coef1);
1664
+ const auto coef2 = rotary_embedding_coefficient(8 * tid + 4, t_step, rotary_cos, rotary_sin);
1665
+ q.z = rotary_embedding_transform(q.z, coef2);
1666
+ k.z = rotary_embedding_transform(k.z, coef2);
1667
+ const auto coef3 = rotary_embedding_coefficient(8 * tid + 6, t_step, rotary_cos, rotary_sin);
1668
+ q.w = rotary_embedding_transform(q.w, coef3);
1669
+ k.w = rotary_embedding_transform(k.w, coef3);
1670
+ }
1671
+
1672
+ #ifdef ENABLE_BF16
1673
+ inline __device__ void apply_rotary_embedding(__nv_bfloat162& q, int tid, int rot_embed_dim, int t_step, const __nv_bfloat16* rotary_cos, const __nv_bfloat16* rotary_sin)
1674
+ {
1675
+ if (2 * tid >= rot_embed_dim) {
1676
+ return;
1677
+ }
1678
+ const auto coef = rotary_embedding_coefficient(2 * tid, t_step, rotary_cos, rotary_sin);
1679
+ q = rotary_embedding_transform(q, coef);
1680
+ }
1681
+
1682
+ inline __device__ void apply_rotary_embedding(__nv_bfloat162& q, __nv_bfloat162& k, int tid, int rot_embed_dim, int t_step, const __nv_bfloat16* rotary_cos, const __nv_bfloat16* rotary_sin)
1683
+ {
1684
+ if (2 * tid >= rot_embed_dim) {
1685
+ return;
1686
+ }
1687
+ const auto coef = rotary_embedding_coefficient(2 * tid, t_step, rotary_cos, rotary_sin);
1688
+ q = rotary_embedding_transform(q, coef);
1689
+ k = rotary_embedding_transform(k, coef);
1690
+ }
1691
+
1692
+ inline __device__ void apply_rotary_embedding(bf16_4_t& q, int tid, int rot_embed_dim, int t_step, const __nv_bfloat16* rotary_cos, const __nv_bfloat16* rotary_sin)
1693
+ {
1694
+ if (4 * tid >= rot_embed_dim) {
1695
+ return;
1696
+ }
1697
+ const auto coef0 = rotary_embedding_coefficient(4 * tid, t_step, rotary_cos, rotary_sin);
1698
+ q.x = rotary_embedding_transform(q.x, coef0);
1699
+ const auto coef1 = rotary_embedding_coefficient(4 * tid + 2, t_step, rotary_cos, rotary_sin);
1700
+ q.y = rotary_embedding_transform(q.y, coef1);
1701
+ }
1702
+
1703
+ inline __device__ void apply_rotary_embedding(bf16_4_t& q, bf16_4_t& k, int tid, int rot_embed_dim, int t_step, const __nv_bfloat16* rotary_cos, const __nv_bfloat16* rotary_sin)
1704
+ {
1705
+ if (4 * tid >= rot_embed_dim) {
1706
+ return;
1707
+ }
1708
+ const auto coef0 = rotary_embedding_coefficient(4 * tid, t_step, rotary_cos, rotary_sin);
1709
+ q.x = rotary_embedding_transform(q.x, coef0);
1710
+ k.x = rotary_embedding_transform(k.x, coef0);
1711
+ const auto coef1 = rotary_embedding_coefficient(4 * tid + 2, t_step, rotary_cos, rotary_sin);
1712
+ q.y = rotary_embedding_transform(q.y, coef1);
1713
+ k.y = rotary_embedding_transform(k.y, coef1);
1714
+ }
1715
+
1716
+ inline __device__ void apply_rotary_embedding(bf16_8_t& q, int tid, int rot_embed_dim, int t_step, const __nv_bfloat16* rotary_cos, const __nv_bfloat16* rotary_sin)
1717
+ {
1718
+ if (8 * tid >= rot_embed_dim) {
1719
+ return;
1720
+ }
1721
+ const auto coef0 = rotary_embedding_coefficient(8 * tid, t_step, rotary_cos, rotary_sin);
1722
+ q.x = rotary_embedding_transform(q.x, coef0);
1723
+ const auto coef1 = rotary_embedding_coefficient(8 * tid + 2, t_step, rotary_cos, rotary_sin);
1724
+ q.y = rotary_embedding_transform(q.y, coef1);
1725
+ const auto coef2 = rotary_embedding_coefficient(8 * tid + 4, t_step, rotary_cos, rotary_sin);
1726
+ q.z = rotary_embedding_transform(q.z, coef2);
1727
+ const auto coef3 = rotary_embedding_coefficient(8 * tid + 6, t_step, rotary_cos, rotary_sin);
1728
+ q.w = rotary_embedding_transform(q.w, coef3);
1729
+ }
1730
+
1731
+ inline __device__ void apply_rotary_embedding(bf16_8_t& q, bf16_8_t& k, int tid, int rot_embed_dim, int t_step, const __nv_bfloat16* rotary_cos, const __nv_bfloat16* rotary_sin)
1732
+ {
1733
+ if (8 * tid >= rot_embed_dim) {
1734
+ return;
1735
+ }
1736
+ const auto coef0 = rotary_embedding_coefficient(8 * tid, t_step, rotary_cos, rotary_sin);
1737
+ q.x = rotary_embedding_transform(q.x, coef0);
1738
+ k.x = rotary_embedding_transform(k.x, coef0);
1739
+ const auto coef1 = rotary_embedding_coefficient(8 * tid + 2, t_step, rotary_cos, rotary_sin);
1740
+ q.y = rotary_embedding_transform(q.y, coef1);
1741
+ k.y = rotary_embedding_transform(k.y, coef1);
1742
+ const auto coef2 = rotary_embedding_coefficient(8 * tid + 4, t_step, rotary_cos, rotary_sin);
1743
+ q.z = rotary_embedding_transform(q.z, coef2);
1744
+ k.z = rotary_embedding_transform(k.z, coef2);
1745
+ const auto coef3 = rotary_embedding_coefficient(8 * tid + 6, t_step, rotary_cos, rotary_sin);
1746
+ q.w = rotary_embedding_transform(q.w, coef3);
1747
+ k.w = rotary_embedding_transform(k.w, coef3);
1748
+ }
1749
+ #endif // ENABLE_BF16
1750
+
1751
+ template<typename Vec_T, typename T>
1752
+ __device__ __inline__ void vec_from_smem_transpose(Vec_T& vec, T* smem, int transpose_idx, int smem_pitch);
1753
+
1754
+ template<>
1755
+ __device__ __inline__ void vec_from_smem_transpose(float& vec, float* smem, int transpose_idx, int smem_pitch)
1756
+ {
1757
+ return;
1758
+ }
1759
+
1760
+ template<>
1761
+ __device__ __inline__ void vec_from_smem_transpose(uint32_t& vec, uint16_t* smem, int transpose_idx, int smem_pitch)
1762
+ {
1763
+ union {
1764
+ uint32_t u32;
1765
+ uint16_t u16[2];
1766
+ } tmp;
1767
+ tmp.u16[0] = smem[transpose_idx];
1768
+ tmp.u16[1] = smem[smem_pitch + transpose_idx];
1769
+
1770
+ vec = tmp.u32;
1771
+ }
1772
+
1773
+ template<>
1774
+ __device__ __inline__ void vec_from_smem_transpose(uint2& vec, uint16_t* smem, int transpose_idx, int smem_pitch)
1775
+ {
1776
+ union {
1777
+ uint32_t u32;
1778
+ uint16_t u16[2];
1779
+ } tmp_1, tmp_2;
1780
+ tmp_1.u32 = *reinterpret_cast<uint32_t*>(&smem[transpose_idx]);
1781
+ tmp_2.u32 = *reinterpret_cast<uint32_t*>(&smem[smem_pitch + transpose_idx]);
1782
+
1783
+ union {
1784
+ uint2 u32x2;
1785
+ uint16_t u16[4];
1786
+ } tmp_3;
1787
+ tmp_3.u16[0] = tmp_1.u16[0];
1788
+ tmp_3.u16[1] = tmp_2.u16[0];
1789
+ tmp_3.u16[2] = tmp_1.u16[1];
1790
+ tmp_3.u16[3] = tmp_2.u16[1];
1791
+
1792
+ vec = tmp_3.u32x2;
1793
+ }
1794
+
1795
+ template<>
1796
+ __device__ __inline__ void vec_from_smem_transpose(uint4& vec, uint16_t* smem, int transpose_idx, int smem_pitch)
1797
+ {
1798
+ union {
1799
+ uint64_t u64;
1800
+ uint16_t u16[4];
1801
+ } tmp_1, tmp_2;
1802
+ tmp_1.u64 = *reinterpret_cast<uint64_t*>(&smem[transpose_idx]);
1803
+ tmp_2.u64 = *reinterpret_cast<uint64_t*>(&smem[smem_pitch + transpose_idx]);
1804
+
1805
+ union {
1806
+ uint4 u32x4;
1807
+ uint16_t u16[8];
1808
+ } tmp_3;
1809
+ tmp_3.u16[0] = tmp_1.u16[0];
1810
+ tmp_3.u16[1] = tmp_2.u16[0];
1811
+ tmp_3.u16[2] = tmp_1.u16[1];
1812
+ tmp_3.u16[3] = tmp_2.u16[1];
1813
+ tmp_3.u16[4] = tmp_1.u16[2];
1814
+ tmp_3.u16[5] = tmp_2.u16[2];
1815
+ tmp_3.u16[6] = tmp_1.u16[3];
1816
+ tmp_3.u16[7] = tmp_2.u16[3];
1817
+
1818
+ vec = tmp_3.u32x4;
1819
+ }
1820
+
1821
+ #ifdef ENABLE_BF16
1822
+ template<>
1823
+ __device__ __inline__ void
1824
+ vec_from_smem_transpose(bf16_4_t& vec, __nv_bfloat16* smem, int transpose_idx, int smem_pitch)
1825
+ {
1826
+ union {
1827
+ uint32_t u32;
1828
+ __nv_bfloat16 bf16[2];
1829
+ } tmp_1, tmp_2;
1830
+ tmp_1.u32 = *reinterpret_cast<uint32_t*>(&smem[transpose_idx]);
1831
+ tmp_2.u32 = *reinterpret_cast<uint32_t*>(&smem[smem_pitch + transpose_idx]);
1832
+
1833
+ vec.x = __nv_bfloat162{tmp_1.bf16[0], tmp_2.bf16[0]};
1834
+ vec.y = __nv_bfloat162{tmp_1.bf16[1], tmp_2.bf16[1]};
1835
+ }
1836
+
1837
+ template<>
1838
+ __device__ __inline__ void
1839
+ vec_from_smem_transpose(bf16_8_t& vec, __nv_bfloat16* smem, int transpose_idx, int smem_pitch)
1840
+ {
1841
+ union {
1842
+ uint64_t u64;
1843
+ __nv_bfloat16 bf16[4];
1844
+ } tmp_1, tmp_2;
1845
+ tmp_1.u64 = *reinterpret_cast<uint64_t*>(&smem[transpose_idx]);
1846
+ tmp_2.u64 = *reinterpret_cast<uint64_t*>(&smem[smem_pitch + transpose_idx]);
1847
+
1848
+ vec.x = __nv_bfloat162{tmp_1.bf16[0], tmp_2.bf16[0]};
1849
+ vec.y = __nv_bfloat162{tmp_1.bf16[1], tmp_2.bf16[1]};
1850
+ vec.z = __nv_bfloat162{tmp_1.bf16[2], tmp_2.bf16[2]};
1851
+ vec.w = __nv_bfloat162{tmp_1.bf16[3], tmp_2.bf16[3]};
1852
+ }
1853
+ #endif // ENABLE_BF16
1854
+
1855
+ template<>
1856
+ __device__ __inline__ void vec_from_smem_transpose(float4& vec, float* smem, int transpose_idx, int smem_pitch)
1857
+ {
1858
+ vec.x = smem[transpose_idx];
1859
+ vec.z = smem[transpose_idx + 1];
1860
+ vec.y = smem[smem_pitch + transpose_idx];
1861
+ vec.w = smem[smem_pitch + transpose_idx + 1];
1862
+ }
1863
+
1864
+ template<>
1865
+ __device__ __inline__ void vec_from_smem_transpose(uint32_t& vec, half* smem, int transpose_idx, int smem_pitch)
1866
+ {
1867
+ union {
1868
+ uint32_t u32;
1869
+ half u16[2];
1870
+ } tmp;
1871
+ tmp.u16[0] = smem[transpose_idx];
1872
+ tmp.u16[1] = smem[smem_pitch + transpose_idx];
1873
+
1874
+ vec = tmp.u32;
1875
+ }
1876
+
1877
+ #ifdef ENABLE_BF16
1878
+ template<>
1879
+ __device__ __inline__ void
1880
+ vec_from_smem_transpose(__nv_bfloat162& vec, __nv_bfloat16* smem, int transpose_idx, int smem_pitch)
1881
+ {
1882
+ vec.x = smem[transpose_idx];
1883
+ vec.y = smem[smem_pitch + transpose_idx];
1884
+ }
1885
+ #endif
1886
+
1887
+ template<>
1888
+ __device__ __inline__ void vec_from_smem_transpose(float2& vec, float* smem, int transpose_idx, int smem_pitch)
1889
+ {
1890
+ vec.x = smem[transpose_idx];
1891
+ vec.y = smem[smem_pitch + transpose_idx];
1892
+ }
1893
+
1894
+ template<typename Vec_T, typename T>
1895
+ __device__ __inline__ void write_smem_transpose(const Vec_T& vec, T* smem, int transpose_idx, int smem_pitch);
1896
+
1897
+ template<>
1898
+ __device__ __inline__ void write_smem_transpose(const float& vec, float* smem, int transpose_idx, int smem_pitch)
1899
+ {
1900
+ return;
1901
+ }
1902
+
1903
+ template<>
1904
+ __device__ __inline__ void write_smem_transpose(const uint4& vec, uint16_t* smem, int transpose_idx, int smem_pitch)
1905
+ {
1906
+ union {
1907
+ uint64_t u64;
1908
+ uint16_t u16[4];
1909
+ } tmp_1, tmp_2;
1910
+
1911
+ union {
1912
+ uint4 u32x4;
1913
+ uint16_t u16[8];
1914
+ } tmp_3;
1915
+ tmp_3.u32x4 = vec;
1916
+ tmp_1.u16[0] = tmp_3.u16[0];
1917
+ tmp_2.u16[0] = tmp_3.u16[1];
1918
+ tmp_1.u16[1] = tmp_3.u16[2];
1919
+ tmp_2.u16[1] = tmp_3.u16[3];
1920
+ tmp_1.u16[2] = tmp_3.u16[4];
1921
+ tmp_2.u16[2] = tmp_3.u16[5];
1922
+ tmp_1.u16[3] = tmp_3.u16[6];
1923
+ tmp_2.u16[3] = tmp_3.u16[7];
1924
+
1925
+ *reinterpret_cast<uint64_t*>(&smem[transpose_idx]) = tmp_1.u64;
1926
+ *reinterpret_cast<uint64_t*>(&smem[smem_pitch + transpose_idx]) = tmp_2.u64;
1927
+ }
1928
+
1929
+ template<>
1930
+ __device__ __inline__ void write_smem_transpose(const uint2& vec, uint16_t* smem, int transpose_idx, int smem_pitch)
1931
+ {
1932
+ union {
1933
+ uint32_t u32;
1934
+ uint16_t u16[2];
1935
+ } tmp_1, tmp_2;
1936
+
1937
+ union {
1938
+ uint2 u32x2;
1939
+ uint16_t u16[4];
1940
+ } tmp_3;
1941
+ tmp_3.u32x2 = vec;
1942
+ tmp_1.u16[0] = tmp_3.u16[0];
1943
+ tmp_2.u16[0] = tmp_3.u16[1];
1944
+ tmp_1.u16[1] = tmp_3.u16[2];
1945
+ tmp_2.u16[1] = tmp_3.u16[3];
1946
+
1947
+ *reinterpret_cast<uint32_t*>(&smem[transpose_idx]) = tmp_1.u32;
1948
+ *reinterpret_cast<uint32_t*>(&smem[smem_pitch + transpose_idx]) = tmp_2.u32;
1949
+ }
1950
+
1951
+ template<>
1952
+ __device__ __inline__ void write_smem_transpose(const uint32_t& vec, uint16_t* smem, int transpose_idx, int smem_pitch)
1953
+ {
1954
+ union {
1955
+ uint32_t u32;
1956
+ uint16_t u16[2];
1957
+ } tmp;
1958
+ tmp.u32 = vec;
1959
+
1960
+ smem[transpose_idx] = tmp.u16[0];
1961
+ smem[smem_pitch + transpose_idx] = tmp.u16[1];
1962
+ }
1963
+
1964
+ template<>
1965
+ __device__ __inline__ void write_smem_transpose(const float4& vec, float* smem, int transpose_idx, int smem_pitch)
1966
+ {
1967
+ smem[transpose_idx] = vec.x;
1968
+ smem[transpose_idx + 1] = vec.z;
1969
+ smem[smem_pitch + transpose_idx] = vec.y;
1970
+ smem[smem_pitch + transpose_idx + 1] = vec.w;
1971
+ }
1972
+
1973
+ template<>
1974
+ __device__ __inline__ void write_smem_transpose(const uint32_t& vec, half* smem, int transpose_idx, int smem_pitch)
1975
+ {
1976
+ union {
1977
+ uint32_t u32;
1978
+ half u16[2];
1979
+ } tmp;
1980
+
1981
+ tmp.u32 = vec;
1982
+ smem[transpose_idx] = tmp.u16[0];
1983
+ smem[smem_pitch + transpose_idx] = tmp.u16[1];
1984
+ }
1985
+
1986
+ #ifdef ENABLE_BF16
1987
+ template<>
1988
+ __device__ __inline__ void
1989
+ write_smem_transpose(const __nv_bfloat162& vec, __nv_bfloat16* smem, int transpose_idx, int smem_pitch)
1990
+ {
1991
+ smem[transpose_idx] = vec.x;
1992
+ smem[smem_pitch + transpose_idx] = vec.y;
1993
+ }
1994
+
1995
+ template<>
1996
+ __device__ __inline__ void
1997
+ write_smem_transpose(const bf16_4_t& vec, __nv_bfloat16* smem, int transpose_idx, int smem_pitch)
1998
+ {
1999
+ write_smem_transpose(reinterpret_cast<const uint2&>(vec), reinterpret_cast<uint16_t*>(smem), transpose_idx, smem_pitch);
2000
+ }
2001
+
2002
+ template<>
2003
+ __device__ __inline__ void
2004
+ write_smem_transpose(const bf16_8_t& vec, __nv_bfloat16* smem, int transpose_idx, int smem_pitch)
2005
+ {
2006
+ write_smem_transpose(reinterpret_cast<const uint4&>(vec), reinterpret_cast<uint16_t*>(smem), transpose_idx, smem_pitch);
2007
+ }
2008
+ #endif
2009
+
2010
+ template<>
2011
+ __device__ __inline__ void write_smem_transpose(const float2& vec, float* smem, int transpose_idx, int smem_pitch)
2012
+ {
2013
+ smem[transpose_idx] = vec.x;
2014
+ smem[smem_pitch + transpose_idx] = vec.y;
2015
+ }
2016
+
2017
+ } // namespace mmha
flash-attention/csrc/ft_attention/ft_attention.cpp ADDED
@@ -0,0 +1,232 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <torch/extension.h>
2
+ #include "ATen/cuda/CUDAContext.h"
3
+ #include <c10/cuda/CUDAGuard.h>
4
+
5
+
6
+ #include "decoder_masked_multihead_attention.h"
7
+
8
+ #define CHECK_DEVICE(x) TORCH_CHECK(x.device().type() == torch::kCUDA, #x " must be on CUDA")
9
+ #define CHECK_SHAPE(x, ...) TORCH_CHECK(x.sizes() == torch::IntArrayRef({__VA_ARGS__}), #x " must have shape (" #__VA_ARGS__ ")")
10
+ #define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous")
11
+
12
+ #define DISPATCH_FLOAT_AND_HALF_AND_BF16(TYPE, NAME, ...) \
13
+ if (TYPE == at::ScalarType::Half) { \
14
+ using scalar_t = at::Half; \
15
+ __VA_ARGS__(); \
16
+ } else if (TYPE == at::ScalarType::BFloat16) { \
17
+ using scalar_t = at::BFloat16; \
18
+ __VA_ARGS__(); \
19
+ } else if (TYPE == at::ScalarType::Float) { \
20
+ using scalar_t = float; \
21
+ __VA_ARGS__(); \
22
+ } else { \
23
+ AT_ERROR(#NAME, " not implemented for type '", toString(TYPE), "'"); \
24
+ }
25
+
26
+ template<typename T>
27
+ void masked_multihead_attention(const Masked_multihead_attention_params<T>& params,
28
+ const cudaStream_t& stream);
29
+
30
+ template<typename T>
31
+ void cross_multihead_attention(const Masked_multihead_attention_params<T>& params,
32
+ const cudaStream_t& stream);
33
+
34
+ template<typename T>
35
+ struct SATypeConverter {
36
+ using Type = T;
37
+ };
38
+
39
+ template<>
40
+ struct SATypeConverter<at::Half> {
41
+ using Type = uint16_t;
42
+ };
43
+
44
+ template<>
45
+ struct SATypeConverter<at::BFloat16> {
46
+ using Type = __nv_bfloat16;
47
+ };
48
+
49
+ template <typename T>
50
+ void set_params(Masked_multihead_attention_params<T> &params,
51
+ const size_t batch_size,
52
+ const size_t nheads,
53
+ const size_t nheads_kv,
54
+ const size_t memory_max_seqlen,
55
+ const size_t headdim,
56
+ const int timestep,
57
+ const int rotary_embedding_dim,
58
+ const float rotary_base,
59
+ const bool neox_rotary_style,
60
+ const int q_batch_stride,
61
+ const int k_batch_stride,
62
+ const int v_batch_stride,
63
+ const int nnz_heads,
64
+ T *q_ptr,
65
+ T *k_ptr,
66
+ T *v_ptr,
67
+ T *k_cache_ptr,
68
+ T *v_cache_ptr,
69
+ int *length_per_sample,
70
+ T *rotary_cos,
71
+ T *rotary_sin,
72
+ T *out_ptr,
73
+ int *nnz_head_idx) {
74
+ // Reset the parameters
75
+ memset(&params, 0, sizeof(params));
76
+ params.q = q_ptr;
77
+ params.k = k_ptr;
78
+ params.v = v_ptr;
79
+ params.q_bias = nullptr;
80
+ params.k_bias = nullptr;
81
+ params.v_bias = nullptr;
82
+ params.k_cache = k_cache_ptr;
83
+ params.v_cache = v_cache_ptr;
84
+ params.out = out_ptr;
85
+ params.cache_indir = nullptr;
86
+ params.stride_q = q_batch_stride;
87
+ params.stride_k = k_batch_stride;
88
+ params.stride_v = v_batch_stride;
89
+ params.batch_size = batch_size;
90
+ params.beam_width = 1;
91
+ params.memory_max_len = memory_max_seqlen;
92
+ params.num_heads = nheads;
93
+ params.num_heads_kv = nheads_kv;
94
+ params.num_heads_q_kv_ratio = nheads / nheads_kv;
95
+ params.nnz_heads = nnz_heads;
96
+ params.hidden_size_per_head = headdim;
97
+ params.rotary_embedding_dim = rotary_embedding_dim;
98
+ params.rotary_base = rotary_base;
99
+ params.neox_rotary_style = neox_rotary_style;
100
+ params.timestep = timestep;
101
+ params.inv_sqrt_dh = 1.f / sqrt(float(headdim));
102
+ params.total_padding_tokens = nullptr;
103
+ params.masked_tokens = nullptr;
104
+ params.prefix_prompt_lengths = nullptr;
105
+ params.max_prefix_prompt_length = 0;
106
+ params.relative_attention_bias = nullptr;
107
+ params.relative_attention_bias_stride = 0;
108
+ params.cross_attention_out = nullptr;
109
+ params.max_decoder_seq_len = 0;
110
+ params.is_return_cross_attentions = false;
111
+ params.finished = nullptr;
112
+ params.memory_length_per_sample = nullptr;
113
+ params.length_per_sample = length_per_sample;
114
+ params.rotary_cos = rotary_cos;
115
+ params.rotary_sin = rotary_sin;
116
+ params.nnz_head_idx = nnz_head_idx;
117
+ }
118
+
119
+ torch::Tensor single_query_attention(const torch::Tensor q,
120
+ const torch::Tensor k,
121
+ const torch::Tensor v,
122
+ torch::Tensor k_cache,
123
+ torch::Tensor v_cache,
124
+ c10::optional<const torch::Tensor> length_per_sample_,
125
+ c10::optional<const torch::Tensor> rotary_cos_,
126
+ c10::optional<const torch::Tensor> rotary_sin_,
127
+ c10::optional<const torch::Tensor> nnz_head_idx_,
128
+ const int timestep,
129
+ int rotary_embedding_dim = 0,
130
+ const float rotary_base = 10000.0f,
131
+ const bool neox_rotary_style=true) {
132
+ CHECK_DEVICE(q); CHECK_DEVICE(k); CHECK_DEVICE(v); CHECK_DEVICE(k_cache); CHECK_DEVICE(v_cache);
133
+ int batch_size = v_cache.size(0);
134
+ int nheads = q.size(1);
135
+ int nheads_kv = v_cache.size(1);
136
+ int memory_max_seqlen = v_cache.size(2);
137
+ int headdim = v_cache.size(3);
138
+ auto input_type = q.scalar_type();
139
+ TORCH_CHECK(input_type == at::ScalarType::Float || input_type == at::ScalarType::Half || input_type == at::ScalarType::BFloat16);
140
+
141
+ CHECK_SHAPE(q, batch_size, nheads, headdim);
142
+ CHECK_SHAPE(k, batch_size, nheads_kv, headdim);
143
+ CHECK_SHAPE(v, batch_size, nheads_kv, headdim);
144
+ CHECK_SHAPE(v_cache, batch_size, nheads_kv, memory_max_seqlen, headdim);
145
+ // k_cache shape: [B, H, Dh/x, L, x] where x=8 for fp16 and x=4 for fp32
146
+ int packsize = k_cache.dtype() == torch::kFloat32 ? 4 : 8;
147
+ CHECK_SHAPE(k_cache, batch_size, nheads_kv, headdim / packsize, memory_max_seqlen, packsize);
148
+ TORCH_CHECK(q.stride(2) == 1 && q.stride(1) == headdim);
149
+ TORCH_CHECK(k.stride(2) == 1 && k.stride(1) == headdim);
150
+ TORCH_CHECK(v.stride(2) == 1 && v.stride(1) == headdim);
151
+ CHECK_CONTIGUOUS(v_cache); CHECK_CONTIGUOUS(k_cache);
152
+
153
+ TORCH_CHECK(q.scalar_type() == input_type);
154
+ TORCH_CHECK(k.scalar_type() == input_type);
155
+ TORCH_CHECK(v.scalar_type() == input_type);
156
+ TORCH_CHECK(k_cache.scalar_type() == input_type);
157
+ TORCH_CHECK(v_cache.scalar_type() == input_type);
158
+
159
+ if (length_per_sample_.has_value()) {
160
+ auto length_per_sample = length_per_sample_.value();
161
+ CHECK_DEVICE(length_per_sample);
162
+ CHECK_SHAPE(length_per_sample, batch_size);
163
+ CHECK_CONTIGUOUS(length_per_sample);
164
+ TORCH_CHECK(length_per_sample.dtype() == torch::kInt32);
165
+ }
166
+
167
+ if (rotary_cos_.has_value()) {
168
+ auto rotary_cos = rotary_cos_.value();
169
+ CHECK_DEVICE(rotary_cos);
170
+ rotary_embedding_dim = rotary_cos.size(-1) * 2;
171
+ CHECK_SHAPE(rotary_cos, batch_size, rotary_embedding_dim / 2);
172
+ CHECK_CONTIGUOUS(rotary_cos);
173
+ TORCH_CHECK(rotary_cos.scalar_type() == input_type);
174
+
175
+ TORCH_CHECK(rotary_sin_.has_value());
176
+ auto rotary_sin = rotary_sin_.value();
177
+ CHECK_DEVICE(rotary_sin);
178
+ CHECK_SHAPE(rotary_sin, batch_size, rotary_embedding_dim / 2);
179
+ CHECK_CONTIGUOUS(rotary_sin);
180
+ TORCH_CHECK(rotary_sin.scalar_type() == input_type);
181
+ }
182
+
183
+ if (nnz_head_idx_.has_value()) {
184
+ auto nnz_head_idx = nnz_head_idx_.value();
185
+ CHECK_DEVICE(nnz_head_idx);
186
+ int nnz_heads = nnz_head_idx.size(0);
187
+ CHECK_SHAPE(nnz_head_idx, nnz_heads);
188
+ CHECK_CONTIGUOUS(nnz_head_idx);
189
+ TORCH_CHECK(nnz_head_idx.dtype() == torch::kInt32);
190
+ }
191
+
192
+ // Otherwise the kernel will be launched from cuda:0 device
193
+ // Cast to char to avoid compiler warning about narrowing
194
+ at::cuda::CUDAGuard device_guard{(char)q.get_device()};
195
+
196
+ torch::Tensor out = torch::empty_like(q);
197
+
198
+ DISPATCH_FLOAT_AND_HALF_AND_BF16(q.scalar_type(), "single_query_attention", [&] {
199
+ using DataType = typename SATypeConverter<scalar_t>::Type;
200
+ Masked_multihead_attention_params<DataType> params;
201
+ set_params(params, batch_size, nheads, nheads_kv, memory_max_seqlen, headdim, timestep,
202
+ rotary_embedding_dim, rotary_base, neox_rotary_style,
203
+ q.stride(0), k.stride(0), v.stride(0),
204
+ nnz_head_idx_.has_value() ? nnz_head_idx_.value().size(0) : 0,
205
+ reinterpret_cast<DataType*>(q.data_ptr()),
206
+ reinterpret_cast<DataType*>(k.data_ptr()),
207
+ reinterpret_cast<DataType*>(v.data_ptr()),
208
+ reinterpret_cast<DataType*>(k_cache.data_ptr()),
209
+ reinterpret_cast<DataType*>(v_cache.data_ptr()),
210
+ length_per_sample_.has_value()
211
+ ? length_per_sample_.value().data_ptr<int>() : nullptr,
212
+ rotary_cos_.has_value()
213
+ ? reinterpret_cast<DataType*>(rotary_cos_.value().data_ptr()) : nullptr,
214
+ rotary_sin_.has_value()
215
+ ? reinterpret_cast<DataType*>(rotary_sin_.value().data_ptr()) : nullptr,
216
+ reinterpret_cast<DataType*>(out.data_ptr()),
217
+ nnz_head_idx_.has_value() ? nnz_head_idx_.value().data_ptr<int>() : nullptr
218
+ );
219
+ auto stream = at::cuda::getCurrentCUDAStream();
220
+ masked_multihead_attention(params, stream);
221
+ });
222
+ return out;
223
+ }
224
+
225
+ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
226
+ m.def("single_query_attention", &single_query_attention, "Attention with a single query",
227
+ py::arg("q"), py::arg("k"), py::arg("v"), py::arg("k_cache"), py::arg("v_cache"),
228
+ py::arg("length_per_sample_"), py::arg("rotary_cos_"),
229
+ py::arg("rotary_sin_"), py::arg("nnz_head_idx_"),
230
+ py::arg("timestep"), py::arg("rotary_embedding_dim")=0,
231
+ py::arg("rotary_base")=10000.0f, py::arg("neox_rotary_style")=true);
232
+ }
flash-attention/csrc/ft_attention/setup.py ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Adapted from https://github.com/NVIDIA/apex/blob/master/setup.py
2
+ import sys
3
+ import warnings
4
+ import os
5
+ from packaging.version import parse, Version
6
+
7
+ from setuptools import setup, find_packages
8
+ import subprocess
9
+
10
+ import torch
11
+ from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension, CUDA_HOME
12
+
13
+
14
+ # ninja build does not work unless include_dirs are abs path
15
+ this_dir = os.path.dirname(os.path.abspath(__file__))
16
+
17
+
18
+ def get_cuda_bare_metal_version(cuda_dir):
19
+ raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
20
+ output = raw_output.split()
21
+ release_idx = output.index("release") + 1
22
+ bare_metal_version = parse(output[release_idx].split(",")[0])
23
+
24
+ return raw_output, bare_metal_version
25
+
26
+
27
+ def check_cuda_torch_binary_vs_bare_metal(cuda_dir):
28
+ raw_output, bare_metal_version = get_cuda_bare_metal_version(cuda_dir)
29
+ torch_binary_version = parse(torch.version.cuda)
30
+
31
+ print("\nCompiling cuda extensions with")
32
+ print(raw_output + "from " + cuda_dir + "/bin\n")
33
+
34
+ if (bare_metal_version != torch_binary_version):
35
+ raise RuntimeError(
36
+ "Cuda extensions are being compiled with a version of Cuda that does "
37
+ "not match the version used to compile Pytorch binaries. "
38
+ "Pytorch binaries were compiled with Cuda {}.\n".format(torch.version.cuda)
39
+ + "In some cases, a minor-version mismatch will not cause later errors: "
40
+ "https://github.com/NVIDIA/apex/pull/323#discussion_r287021798. "
41
+ "You can try commenting out this check (at your own risk)."
42
+ )
43
+
44
+
45
+ def raise_if_cuda_home_none(global_option: str) -> None:
46
+ if CUDA_HOME is not None:
47
+ return
48
+ raise RuntimeError(
49
+ f"{global_option} was requested, but nvcc was not found. Are you sure your environment has nvcc available? "
50
+ "If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, "
51
+ "only images whose names contain 'devel' will provide nvcc."
52
+ )
53
+
54
+
55
+ def append_nvcc_threads(nvcc_extra_args):
56
+ _, bare_metal_version = get_cuda_bare_metal_version(CUDA_HOME)
57
+ if bare_metal_version >= Version("11.2"):
58
+ nvcc_threads = os.getenv("NVCC_THREADS") or "4"
59
+ return nvcc_extra_args + ["--threads", nvcc_threads]
60
+ return nvcc_extra_args
61
+
62
+
63
+ if not torch.cuda.is_available():
64
+ # https://github.com/NVIDIA/apex/issues/486
65
+ # Extension builds after https://github.com/pytorch/pytorch/pull/23408 attempt to query torch.cuda.get_device_capability(),
66
+ # which will fail if you are compiling in an environment without visible GPUs (e.g. during an nvidia-docker build command).
67
+ print(
68
+ "\nWarning: Torch did not find available GPUs on this system.\n",
69
+ "If your intention is to cross-compile, this is not an error.\n"
70
+ "By default, Apex will cross-compile for Pascal (compute capabilities 6.0, 6.1, 6.2),\n"
71
+ "Volta (compute capability 7.0), Turing (compute capability 7.5),\n"
72
+ "and, if the CUDA version is >= 11.0, Ampere (compute capability 8.0).\n"
73
+ "If you wish to cross-compile for a single specific architecture,\n"
74
+ 'export TORCH_CUDA_ARCH_LIST="compute capability" before running setup.py.\n',
75
+ )
76
+ if os.environ.get("TORCH_CUDA_ARCH_LIST", None) is None and CUDA_HOME is not None:
77
+ _, bare_metal_version = get_cuda_bare_metal_version(CUDA_HOME)
78
+ if bare_metal_version >= Version("11.8"):
79
+ os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0;8.6;9.0"
80
+ elif bare_metal_version >= Version("11.1"):
81
+ os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0;8.6"
82
+ elif bare_metal_version == Version("11.0"):
83
+ os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0"
84
+ else:
85
+ os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5"
86
+
87
+
88
+ print("\n\ntorch.__version__ = {}\n\n".format(torch.__version__))
89
+ TORCH_MAJOR = int(torch.__version__.split(".")[0])
90
+ TORCH_MINOR = int(torch.__version__.split(".")[1])
91
+
92
+ cmdclass = {}
93
+ ext_modules = []
94
+
95
+ # Check, if ATen/CUDAGeneratorImpl.h is found, otherwise use ATen/cuda/CUDAGeneratorImpl.h
96
+ # See https://github.com/pytorch/pytorch/pull/70650
97
+ generator_flag = []
98
+ torch_dir = torch.__path__[0]
99
+ if os.path.exists(os.path.join(torch_dir, "include", "ATen", "CUDAGeneratorImpl.h")):
100
+ generator_flag = ["-DOLD_GENERATOR_PATH"]
101
+
102
+ raise_if_cuda_home_none("--ft_attention")
103
+ # Check, if CUDA11 is installed for compute capability 8.0
104
+ cc_flag = []
105
+ _, bare_metal_version = get_cuda_bare_metal_version(CUDA_HOME)
106
+ if bare_metal_version < Version("11.0"):
107
+ raise RuntimeError("ft_attention is only supported on CUDA 11 and above")
108
+ cc_flag.append("-gencode")
109
+ cc_flag.append("arch=compute_70,code=sm_70")
110
+ cc_flag.append("-gencode")
111
+ cc_flag.append("arch=compute_80,code=sm_80")
112
+ if bare_metal_version >= Version("11.8"):
113
+ cc_flag.append("-gencode")
114
+ cc_flag.append("arch=compute_90,code=sm_90")
115
+
116
+ ext_modules.append(
117
+ CUDAExtension(
118
+ name="ft_attention",
119
+ sources=[
120
+ "ft_attention.cpp",
121
+ "decoder_masked_multihead_attention.cu",
122
+ ],
123
+ extra_compile_args={
124
+ "cxx": ["-O3", "-DENABLE_BF16"] + generator_flag,
125
+ "nvcc": append_nvcc_threads(
126
+ [
127
+ "-DENABLE_BF16", # TODO
128
+ "-O3",
129
+ "-U__CUDA_NO_HALF_OPERATORS__",
130
+ "-U__CUDA_NO_HALF_CONVERSIONS__",
131
+ "-U__CUDA_NO_BFLOAT16_OPERATORS__",
132
+ "-U__CUDA_NO_BFLOAT16_CONVERSIONS__",
133
+ "-U__CUDA_NO_BFLOAT162_OPERATORS__",
134
+ "-U__CUDA_NO_BFLOAT162_CONVERSIONS__",
135
+ "--expt-relaxed-constexpr",
136
+ "--expt-extended-lambda",
137
+ "--use_fast_math",
138
+ ]
139
+ + generator_flag
140
+ + cc_flag
141
+ ),
142
+ },
143
+ include_dirs=[this_dir],
144
+ )
145
+ )
146
+
147
+ setup(
148
+ name="ft_attention",
149
+ version="0.1",
150
+ description="Attention for single query from FasterTransformer",
151
+ ext_modules=ext_modules,
152
+ cmdclass={"build_ext": BuildExtension} if ext_modules else {},
153
+ )