uuid
stringlengths
36
36
file_name
stringlengths
5
50
repo_name
stringclasses
110 values
file_path
stringlengths
7
112
commit_hash
stringclasses
110 values
starcount
int64
0
0
input
stringlengths
39
33.8k
category
dict
licenses
listlengths
1
2
github_url
stringlengths
94
193
6d32b21d-530f-4d1d-a2b2-bb08a0c056cc
triton_matric_matmul.py
elphinkuo/fast_matrix_multiplication
dot_product/on_gpu/Triton/triton_matric_matmul.py
4e875a17e95b7ccf9af102d2c0f8cc31ed9a29f3
0
@triton.jit def _matmul_kernel(A, B, C, M, N, K, **meta): TILE_M = meta['BLOCK_M'] TILE_N = meta['BLOCK_N'] TILE_K = 128 m = tl.program_id(0) * TILE_M + tl.arange(0, TILE_M) n = tl.program_id(1) * TILE_N + tl.arange(0, TILE_N) acc = tl.zeros((TILE_M, TILE_N), dtype=tl.float32) for k in range...
{ "Data Type": [ "fp32" ], "Functionality": [ "Matrix Multiplication", "Elementwise Operations" ], "Memory Access Pattern": [ "Tiled", "Coalesced" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound", "High Throughput" ...
[ "MIT" ]
https://github.com/elphinkuo/fast_matrix_multiplication/blob/4e875a17e95b7ccf9af102d2c0f8cc31ed9a29f3/dot_product/on_gpu/Triton/triton_matric_matmul.py
1325bcba-7d28-47b5-b18b-31e0f79878f5
triton_sll.py
pytorch/FBGEMM
fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py
fe980ab54a6e28818d81c8694b6564e7f804418b
0
@triton.jit def jagged_2_softmax_kernel(input_ptr, output_ptr, offsets_row_ptr, offsets_col_ptr, offsets_overall_ptr, input_stride, output_stride, transpose, max_seq_len_row, max_seq_len_col, BLOCK_SIZE: tl.constexpr): """ input shape is [sum_B(Ni * Hi)] output shape is [sum_B(Ni * Hi)] Padded v...
{ "Data Type": [ "fp32" ], "Functionality": [ "Softmax" ], "Memory Access Pattern": [ "Coalesced", "Strided Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "High Throughput", "Compute Bound" ] }
[ "BSD", "MIT" ]
https://github.com/pytorch/FBGEMM/blob/fe980ab54a6e28818d81c8694b6564e7f804418b/fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py
c5681889-cbdf-4c8c-b730-916ee6ccb0d9
triton_ops.py
huyz2023/2by4-pretrain
sparse/triton_ops.py
9e330125dea71e5a3dee235f4efb8869f9e4cdd0
0
@triton.jit def _MVUE24_approx_triton(dense_ptr, sparse_ptr, dense_row_stride, sparse_row_stride, dense_col_stride, sparse_col_stride, m, k, seed, BLOCK_SIZE: tl.constexpr, ARRAY_LAYOUT: tl.constexpr): if ARRAY_LAYOUT == 'row': row_idx = tl.program_id(0) col_idx = tl.program_id(1) * 4 * BLOC...
{ "Data Type": [ "fp32", "uint8" ], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Coalesced", "Strided Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Memory-Bound", "Compute Bound" ] }
[ "BSD" ]
https://github.com/huyz2023/2by4-pretrain/blob/9e330125dea71e5a3dee235f4efb8869f9e4cdd0/sparse/triton_ops.py
ae2cb3a3-be88-4b7a-bbba-b418ac601259
parallel_scan.py
chengkai-liu/RecBLR
parallel_scan.py
66e520c26e28c05a5425ba2e81c9169b7e0176e2
0
@triton.jit def unpack64(merged): tl.static_assert(merged.dtype == tl.uint64) b = (merged & 4294967295).to(tl.uint32).to(tl.float32, bitcast=True) a = (merged >> 32).to(tl.uint32).to(tl.float32, bitcast=True) return a, b
{ "Data Type": [ "fp32" ], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "High Throughput" ] }
[ "MIT" ]
https://github.com/chengkai-liu/RecBLR/blob/66e520c26e28c05a5425ba2e81c9169b7e0176e2/parallel_scan.py
f031d842-9ab5-40cf-b113-7fe0ef2ae51e
y_5.py
IntelLabs/EquiTriton
src/equitriton/sph_harm/direct/y_5.py
1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c
0
@triton.jit def fifth_order_bwd(coord_ptr: tl.tensor, coord_grad_ptr: tl.tensor, sph_grad_ptr: tl.tensor, block_size: tl.constexpr, coord_numel: tl. constexpr, output_numel: tl.constexpr, col_offset: tl.constexpr, output_stride: tl.constexpr): block_id = tl.program_id(0) coord_stride = 3 coord_s...
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [], "Performance Objective": [ "Compute Bound" ] }
[ "Apache" ]
https://github.com/IntelLabs/EquiTriton/blob/1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c/src/equitriton/sph_harm/direct/y_5.py
e4b5235d-37a5-4b17-8c48-fa82e3aecf4f
paged_attn.py
AlibabaPAI/FLASHNN
flashnn/triton_kernels/paged_attn.py
528a9301587f5fb135b25d973a87ba0a40a703a7
0
@triton.autotune(configs=[triton.Config({'UNROLL_FACTOR': uf}) for uf in [1, 2, 4, 8]], key=['POWER_OF_2_MAX_SEQ_LEN', 'QUERY_GROUP_SIZE', 'USE_PARTITIONING', 'BLOCK_SIZE', 'HEAD_SIZE', 'PARTITION_SIZE']) @triton.jit def _paged_attn_wo_mma_kernel(exp_sums, max_logits, out, q, k_cache, v_cache, scale, block_...
{ "Data Type": [ "fp16", "fp32" ], "Functionality": [ "Attention Mechanisms", "Softmax", "Matrix Multiplication" ], "Memory Access Pattern": [ "Blocked Access", "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops", "Persistent Kernels" ], "Performan...
[ "Apache" ]
https://github.com/AlibabaPAI/FLASHNN/blob/528a9301587f5fb135b25d973a87ba0a40a703a7/flashnn/triton_kernels/paged_attn.py
cd64708f-5721-4a20-be92-b7c64e1762ca
GELUglu.py
huyz2023/2by4-pretrain
sparse/GELUglu.py
9e330125dea71e5a3dee235f4efb8869f9e4cdd0
0
@triton.jit def _gelu_glu_fwd_kernel(output_ptr, input_ptr, output_row_stride, input_row_stride, output_col_stride, input_col_stride, output_page_stride, input_page_stride, n_pages, BLOCK_SIZE: tl.constexpr): row_idx = tl.program_id(0) col_idx = tl.program_id(1) x = tl.load(input_ptr + row_idx * inp...
{ "Data Type": [], "Functionality": [ "Activation Functions" ], "Memory Access Pattern": [], "Parallelization Strategy": [], "Performance Objective": [] }
[ "BSD" ]
https://github.com/huyz2023/2by4-pretrain/blob/9e330125dea71e5a3dee235f4efb8869f9e4cdd0/sparse/GELUglu.py
e3441201-2cc6-4bc0-b20c-0cd97d2fe333
triton_welford.py
pytorch-labs/tritonbench
tritonbench/operators/welford/triton_welford.py
3a5dccb159834968567a2e45e561dc1aeaa8f8a8
0
@triton.autotune(configs=[triton.Config({'XBLOCK': 1, 'RBLOCK': 1024}, num_stages=1, num_warps=8), triton.Config({'XBLOCK': 1, 'RBLOCK': 2048}, num_stages=1, num_warps=8)], key=['xnumel', 'rnumel']) @triton.jit def triton_red_fused_native_layer_norm_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_pt...
{ "Data Type": [ "fp32" ], "Functionality": [ "Normalization" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound", "Memory-Bound" ] }
[ "BSD" ]
https://github.com/pytorch-labs/tritonbench/blob/3a5dccb159834968567a2e45e561dc1aeaa8f8a8/tritonbench/operators/welford/triton_welford.py
b6bbdca6-8c98-4528-a967-b358c90a1d6f
triton_fused_local_attn.py
LouChao98/vqtree
ops/triton_fused_local_attn.py
27a53274df7a804bce27dffcce5f5be73f64b6f3
0
@triton.jit def _attn_fwd_inner(acc, l_i, m_i, q, sm_scale, K_block_ptr, V_block_ptr, start_m, offs_m, offs_n, SEQLEN_K: tl.constexpr, WINDOW_SIZE: tl. constexpr, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, EVEN_MN: tl. constexpr, STAGE: tl.constexpr): if STAGE == 1: hi = start_m * BLOCK_M - W...
{ "Data Type": [ "fp32" ], "Functionality": [ "Attention Mechanisms", "Matrix Multiplication" ], "Memory Access Pattern": [ "Strided Access", "Coalesced" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound" ] }
[ "Apache" ]
https://github.com/LouChao98/vqtree/blob/27a53274df7a804bce27dffcce5f5be73f64b6f3/ops/triton_fused_local_attn.py
362d6816-df21-46a1-b625-bc3f25aab424
06-fused-attention.py
triton-lang/triton
python/tutorials/06-fused-attention.py
a2b398e0bb1b120f31cf386d6ae3261c3ab84207
0
@triton.jit def _attn_bwd_dkdv(dk, dv, Q, k, v, sm_scale, DO, M, D, stride_tok, stride_d, H, N_CTX, BLOCK_M1: tl.constexpr, BLOCK_N1: tl.constexpr, HEAD_DIM: tl.constexpr, start_n, start_m, num_steps, MASK: tl.constexpr): offs_m = start_m + tl.arange(0, BLOCK_M1) offs_n = start_n + tl.arange(0, BLOCK_N1...
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation", "Attention Mechanisms" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/triton-lang/triton/blob/a2b398e0bb1b120f31cf386d6ae3261c3ab84207/python/tutorials/06-fused-attention.py
465954ee-4cfe-46e9-8668-a230f02bb257
triton_sll.py
pytorch/FBGEMM
fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py
fe980ab54a6e28818d81c8694b6564e7f804418b
0
@triton.jit def jagged_dense_bmm_kernel(a_ptr, a_offset_ptr, b_ptr, c_ptr, N, K, stride_am, stride_ak, stride_bl, stride_bk, stride_bn, stride_cm, stride_cn, max_seq_len, allow_tf32: tl.constexpr, BLOCK_SIZE_M: tl. constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr): """Kernel for computi...
{ "Data Type": [ "fp32" ], "Functionality": [ "Matrix Multiplication" ], "Memory Access Pattern": [ "Strided Access", "Blocked Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound", "Memory-Bound" ] }
[ "BSD", "MIT" ]
https://github.com/pytorch/FBGEMM/blob/fe980ab54a6e28818d81c8694b6564e7f804418b/fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py
ff383b02-6ac6-4d19-8a8c-ff80198f182f
z_order.py
Kitsunetic/space-filling-pytorch
space_filling_pytorch/functional/z_order.py
0de955ad1036973ee7506c5a0124c208acec722d
0
@triton.jit def _encode_z_kernel(xyz_ptr, distance_ptr, B, N, space_size, x_offset, y_offset, z_offset, str_xyz_B, str_xyz_N, str_xyz_C, BLK: tl.constexpr, ASSIGN_BATCH_INDEX: tl.constexpr): pid_b = tl.program_id(0) pid_n = tl.program_id(1) offs_n = pid_n * BLK + tl.arange(0, BLK) mask_n = offs_...
{ "Data Type": [ "fp32" ], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [], "Parallelization Strategy": [], "Performance Objective": [] }
[ "MIT" ]
https://github.com/Kitsunetic/space-filling-pytorch/blob/0de955ad1036973ee7506c5a0124c208acec722d/space_filling_pytorch/functional/z_order.py
1f260b65-2aa3-4dd8-ad87-6f5bba941dd2
block_sparse_attention_lut.py
sparklesea/sparse-quant
sparse-attention/muxi/playground/kernels/block_sparse_attention_lut.py
e3d8b6ecab208c31b744913ed8c3caaa43605f86
0
@triton.jit def _sparse_attention_prefill_fwd_kernel(Q, K, V, sm_scale, Out, lut, stride_qz, stride_qh, stride_qm, stride_qk, stride_kz, stride_kh, stride_kn, stride_kk, stride_vz, stride_vh, stride_vk, stride_vn, stride_oz, stride_oh, stride_om, stride_on, stride_lz, stride_lh, stride_lx, Z, H, N_CTX, ...
{ "Data Type": [ "fp32" ], "Functionality": [ "Attention Mechanisms", "Matrix Multiplication" ], "Memory Access Pattern": [ "Strided Access", "Coalesced" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound" ] }
[ "Apache", "BSD" ]
https://github.com/sparklesea/sparse-quant/blob/e3d8b6ecab208c31b744913ed8c3caaa43605f86/sparse-attention/muxi/playground/kernels/block_sparse_attention_lut.py
33299f98-59f0-48e0-ae23-2da139cb499d
triton_sll.py
pytorch/FBGEMM
fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py
fe980ab54a6e28818d81c8694b6564e7f804418b
0
@triton.jit def jagged_softmax_backward_kernel(grad_output_ptr, softmax_output_ptr, grad_input_ptr, input_offsets_ptr, grad_output_row_stride, grad_output_head_stride, softmax_output_row_stride, softmax_output_head_stride, grad_input_row_stride, grad_input_head_stride, max_seq_len: tl.constexpr, BLOCK_S...
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation", "Softmax" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound" ] }
[ "BSD", "MIT" ]
https://github.com/pytorch/FBGEMM/blob/fe980ab54a6e28818d81c8694b6564e7f804418b/fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py
a5dd6188-758a-4f75-ad16-7e404fe62595
chunk.py
sustcsonglin/flash-linear-attention
fla/ops/linear_attn/chunk.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.jit def chunk_linear_attn_fwd_kernel_h(k, v, h, h0, ht, s_k_h, s_k_t, s_k_d, s_v_h, s_v_t, s_v_d, s_h_h, s_h_t, T: tl.constexpr, K: tl.constexpr, V: tl.constexpr, BT: tl.constexpr, BK: tl.constexpr, BV: tl.constexpr, NT: tl.constexpr, USE_INITIAL_STATE: tl.constexpr, STORE_FINAL_STATE: tl. const...
{ "Data Type": [ "fp32" ], "Functionality": [ "Attention Mechanisms", "Matrix Multiplication" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/linear_attn/chunk.py
e7e27939-c077-46e1-9632-7858a429dae5
k_layer_norm.py
cpuhrsch/torchfused
torchfused/triton/k_layer_norm.py
6c40ed160dcecbe7825f268f7c86bccd359e0ebf
0
@triton.jit def _layer_norm_non_affine_fw(X, Y, M, V, stride, N, eps, **META): _store(_layer_norm_non_affine(X, M, V, stride, N, eps, META), Y, stride, N, META)
{ "Data Type": [ "fp32" ], "Functionality": [ "Normalization" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound" ] }
[ "BSD" ]
https://github.com/cpuhrsch/torchfused/blob/6c40ed160dcecbe7825f268f7c86bccd359e0ebf/torchfused/triton/k_layer_norm.py
a8c30808-d914-41b7-8bd2-094cbdbfcbd0
k_fused_matmul_bw.py
cpuhrsch/torchfused
torchfused/triton/k_fused_matmul_bw.py
6c40ed160dcecbe7825f268f7c86bccd359e0ebf
0
@triton.heuristics({'EVEN_N': lambda *args, **meta: args[3] % meta[ 'BLOCK_COL'] == 0}) @triton.autotune(configs=[triton.Config({'BLOCK_COL': 32}, num_stages=5, num_warps=2), triton.Config({'BLOCK_COL': 64}, num_stages=5, num_warps= 2), triton.Config({'BLOCK_COL': 128}, num_stages=3, num_warps=4), trito...
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation", "Activation Functions" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound" ] }
[ "BSD" ]
https://github.com/cpuhrsch/torchfused/blob/6c40ed160dcecbe7825f268f7c86bccd359e0ebf/torchfused/triton/k_fused_matmul_bw.py
e3fb6f4a-8ca3-4994-899f-d62d808652d3
shape.py
2niuhe/triton_utils
src/triton_utils/shape.py
6184906ac3b86dac3ccbfac128ec393ccecde5df
0
@triton.jit def store_1d(vals, ptr, sz: tl.constexpr, n, max, stride=1): """Store 1d block into nth chunk of vector (defined by ptr), where each chunk has size sz""" offs = get_1d_offest(sz, n) mask = get_1d_mask(offs, max) tl.store(ptr + offs, vals, mask)
{ "Data Type": [ "fp32" ], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Memory-Bound" ] }
[ "Apache" ]
https://github.com/2niuhe/triton_utils/blob/6184906ac3b86dac3ccbfac128ec393ccecde5df/src/triton_utils/shape.py
ad3e39e4-beb3-4789-856e-e24e65695e79
wy_fast.py
sustcsonglin/flash-linear-attention
fla/ops/delta_rule/wy_fast.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.heuristics({'USE_OFFSETS': lambda args: args['offsets'] is not None}) @triton.autotune(configs=[triton.Config({}, num_warps=num_warps) for num_warps in [1, 2, 4, 8]], key=['BT', 'BK']) @triton.jit def fwd_recompute_w_kernel(k, beta, w, A, offsets, indices, T: tl.constexpr, H: tl.constexpr, K: tl.constex...
{ "Data Type": [ "fp32" ], "Functionality": [ "Matrix Multiplication" ], "Memory Access Pattern": [ "Strided Access", "Coalesced" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/delta_rule/wy_fast.py
02c185b4-ba6d-4e60-84de-9ccd865f78e9
chunk.py
sustcsonglin/flash-linear-attention
fla/ops/abc/chunk.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.jit def chunk_abc_fwd_kernel_intra_K(v, z, o, A, s_v_h, s_v_t, s_v_d, T: tl. constexpr, V: tl.constexpr, BT: tl.constexpr, BC: tl.constexpr, BV: tl. constexpr, NC: tl.constexpr): i_v, i_c, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) i_t, i_i = i_c // NC, i_c % NC p_z = tl.mak...
{ "Data Type": [ "fp32" ], "Functionality": [ "Attention Mechanisms" ], "Memory Access Pattern": [ "Tiled", "Coalesced" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound", "Memory-Bound" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/abc/chunk.py
159f0cc5-72b0-4231-97dc-2a5e3f2c0d0b
hilbert.py
Kitsunetic/space-filling-pytorch
space_filling_pytorch/functional/hilbert.py
0de955ad1036973ee7506c5a0124c208acec722d
0
@triton.jit def _encode_hilbert_unpadded_kernel(xyz_ptr, batch_idx_ptr, code_ptr, space_size, x_offset, y_offset, z_offset, str_xyz_n, str_xyz_c, N, BLK: tl.constexpr, ASSIGN_BATCH_INDEX: tl.constexpr): pid = tl.program_id(0) offs_n = pid * BLK + tl.arange(0, BLK) mask = offs_n < N xyz_ptrs = xy...
{ "Data Type": [ "fp32" ], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/Kitsunetic/space-filling-pytorch/blob/0de955ad1036973ee7506c5a0124c208acec722d/space_filling_pytorch/functional/hilbert.py
2c6c706f-18a5-446c-bc50-dd5319c23177
triton_fused_local_attn_rerope.py
LouChao98/vqtree
ops/triton_fused_local_attn_rerope.py
27a53274df7a804bce27dffcce5f5be73f64b6f3
0
@triton.heuristics({'EVEN_M': lambda args: args['seqlen_q'] % args[ 'BLOCK_M'] == 0, 'EVEN_N': lambda args: args['seqlen_k'] % args[ 'BLOCK_N'] == 0}) @triton.jit def _fwd_kernel(Q1, Q2, K1, K2, V, Out, L, softmax_scale, stride_qb, stride_qh, stride_qm, stride_kb, stride_kh, stride_kn, stride_vb, stride...
{ "Data Type": [ "fp32" ], "Functionality": [ "Attention Mechanisms", "Softmax" ], "Memory Access Pattern": [ "Tiled", "Coalesced" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound", "High Throughput" ] }
[ "Apache" ]
https://github.com/LouChao98/vqtree/blob/27a53274df7a804bce27dffcce5f5be73f64b6f3/ops/triton_fused_local_attn_rerope.py
295647f9-b805-4b3f-8f9a-72124ff188fd
relu.py
daemyung/practice-triton
relu.py
27f727726f1507c8380a1c11751d851c7c4a07ce
0
@staticmethod @triton.jit def backward(grad_input_ptr, grad_output_ptr, input_ptr, size, block_size: tl.constexpr): pid = tl.program_id(0) offset = pid * block_size grad_input_block_ptr = tl.make_block_ptr(grad_input_ptr, shape=(size,), strides=(1,), offsets=(offset,), block_shape=(block_size,),...
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation", "Elementwise Operations" ], "Memory Access Pattern": [ "Coalesced", "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/daemyung/practice-triton/blob/27f727726f1507c8380a1c11751d851c7c4a07ce/relu.py
98f78d67-d8c7-4106-a9bc-6716d5cd0889
sb_varlen_fwd.py
shawntan/stickbreaking-attention
stickbreaking_attention/sb_varlen/sb_varlen_fwd.py
8dd32ad5e58f0ee0232fd4782dc53d354ff8d283
0
@triton.jit def compute_block(q, k, qk_scale, neg_log_acc, M_blk_idxs, N_blk_idxs, cm, on_band: tl.constexpr, ALLOW_TF32: tl.constexpr, backward: tl.constexpr, attend_current: tl.constexpr=False, use_cumsum: tl.constexpr=False, is_compiling: tl.constexpr=False): qk = tl.dot(q, tl.trans(k), allow_tf32=AL...
{ "Data Type": [ "fp32" ], "Functionality": [ "Attention Mechanisms", "Activation Functions" ], "Memory Access Pattern": [ "Coalesced" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound" ] }
[ "Apache" ]
https://github.com/shawntan/stickbreaking-attention/blob/8dd32ad5e58f0ee0232fd4782dc53d354ff8d283/stickbreaking_attention/sb_varlen/sb_varlen_fwd.py
c3784dfb-4b90-4a4f-9175-4a69cc1f915d
fused_attn.py
thunlp/Delta-CoMe
quant/fused_attn.py
646a1fbf3443295c4b04aba27334c6bc5aa3df4f
0
@triton.jit def rotate_half_kernel(qk_seq_ptr, position_ids_ptr, qk_seq_stride, position_ids_batch_stride, seq_len, HEAD_DIM: tl.constexpr, BLOCK_HEIGHT: tl.constexpr, BLOCK_WIDTH: tl.constexpr, INV_BASE: tl. constexpr): HALF_HEAD: tl.constexpr = HEAD_DIM // 2 STEPS_PER_ROW: tl.constexpr = HALF_HEAD...
{ "Data Type": [ "fp32" ], "Functionality": [ "Attention Mechanisms" ], "Memory Access Pattern": [ "Tiled", "Coalesced" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound" ] }
[ "Apache" ]
https://github.com/thunlp/Delta-CoMe/blob/646a1fbf3443295c4b04aba27334c6bc5aa3df4f/quant/fused_attn.py
297bd9f8-dbf4-4cd4-b87b-6208c25245d1
pointwise.py
ServiceNow/Fast-LLM
fast_llm/functional/triton/pointwise.py
8b46289079da67cba99628448a6b6083dac083cf
0
@triton.jit def triton_copy_kernel(input_ptr, out_ptr, numel: tl.constexpr, block_size: tl.constexpr): block_start = tl.program_id(axis=0).to(tl.int64) * block_size offsets = block_start + tl.arange(0, block_size) mask = offsets < numel input_ = tl.load(input_ptr + offsets, mask=mask) tl.store(o...
{ "Data Type": [ "fp32" ], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Coalesced", "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Memory-Bound", "High Throughput" ] }
[ "Apache" ]
https://github.com/ServiceNow/Fast-LLM/blob/8b46289079da67cba99628448a6b6083dac083cf/fast_llm/functional/triton/pointwise.py
c7b00225-9961-4819-bb21-d098a0681a35
RzLinearBackward.py
apd10/RzLinear
python/rz_linear/impl/RzLinearBackward.py
eb56657b2de0a97f398f88af421b0fbcbc5469c9
0
@triton.autotune(configs=[triton.Config({'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 256, 'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=8), triton.Config({'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 128, 'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=8), triton.Config({'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 256, 'BLOCK_SI...
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation", "Matrix Multiplication" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Persistent Kernels" ], "Performance Objective": [ "Memory-Bound", "Compute Bound" ] }
[ "MIT" ]
https://github.com/apd10/RzLinear/blob/eb56657b2de0a97f398f88af421b0fbcbc5469c9/python/rz_linear/impl/RzLinearBackward.py
b43d035a-53a0-4ffb-8bee-abc8a227f8b9
ops.py
shawntan/scattermoe
scattermoe/kernels/ops.py
63b76a2f5f28c052fb4cd7c34479a54158354052
0
@triton.autotune(configs=_scatter2scatter_configs(), key=['M', 'N', 'K']) @triton.heuristics({'NO_K_MASK': lambda args: args['K'] % args['BLOCK_K'] == 0, 'NO_N_MASK': lambda args: args['N'] % args['BLOCK_N'] == 0}) @triton.jit def _scatter2scatter(X_ptr, stride_xm, stride_xk, W_ptr, stride_we, stride_wk, stride...
{ "Data Type": [ "fp32" ], "Functionality": [ "Matrix Multiplication" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Memory-Bound", "Compute Bound" ] }
[ "Apache" ]
https://github.com/shawntan/scattermoe/blob/63b76a2f5f28c052fb4cd7c34479a54158354052/scattermoe/kernels/ops.py
98ed13e6-3d06-4b48-bf3b-55ccbee71cfb
dw_conv.py
neuro-ml/kerops
kerops/kernels/dw_conv.py
735336775e825d5cb06b8850d25423661b12d1ac
0
@triton.jit def _DWConv_wgrad_cl3d_impl(grad_ptr, input_ptr, weight_grad_ptr, H, W, D, H_stride, W_stride, ACCTYPE: tl.constexpr, channels: tl.constexpr, D_block: tl.constexpr, WD_grid): H_cell = tl.program_id(0) W_D_cell = tl.program_id(1) D_gridsize = tl.cdiv(D, D_block) W_cell = W_D_cell // D...
{ "Data Type": [ "fp16" ], "Functionality": [ "Matrix Multiplication" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/neuro-ml/kerops/blob/735336775e825d5cb06b8850d25423661b12d1ac/kerops/kernels/dw_conv.py
2f63fe4e-4b75-45aa-b01f-c5b1eed17423
main_triton.py
dwgan/GraphMST
main_triton.py
4d65ed0f108d339e3e4cfff25085a39adc6a48a2
0
@triton.jit def find_kernel(parent, u, ret_ptr, BLOCK_SIZE: tl.constexpr): pu = tl.load(parent + u) while pu != u: u = pu pu = tl.load(parent + u) ret_ptr[u % BLOCK_SIZE] = pu
{ "Data Type": [], "Functionality": [], "Memory Access Pattern": [], "Parallelization Strategy": [], "Performance Objective": [] }
[ "MIT" ]
https://github.com/dwgan/GraphMST/blob/4d65ed0f108d339e3e4cfff25085a39adc6a48a2/main_triton.py
fee89749-a0a9-4316-87b5-75545363f010
wy_fast.py
sustcsonglin/flash-linear-attention
fla/ops/delta_rule/wy_fast.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.heuristics({'USE_OFFSETS': lambda args: args['offsets'] is not None}) @triton.autotune(configs=[triton.Config({}, num_warps=num_warps) for num_warps in [1, 2, 4, 8, 16]], key=['BK']) @triton.jit def fwd_prepare_wy_repr_kernel_chunk32(k, beta, A, offsets, indices, T: tl. constexpr, H: tl.constexpr, K: tl...
{ "Data Type": [ "fp32" ], "Functionality": [ "Matrix Multiplication" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "High Throughput" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/delta_rule/wy_fast.py
3ccfe00c-b0c4-4828-873a-fd5b2174ea1b
kernels.py
pytorch-labs/tritonbench
tritonbench/operators/jagged_mean/kernels.py
3a5dccb159834968567a2e45e561dc1aeaa8f8a8
0
@triton.autotune(configs=[triton.Config({'BLOCK_SIZE_RAGGED': b_r, 'BLOCK_SIZE_M': b_m}, num_warps=w, num_stages=s) for b_r, b_m, w, s in itertools.product(BLOCK_SIZES_RAGGED, BLOCK_SIZES_M, NUM_WARPS, NUM_STAGES)], key=['M']) @triton.jit def triton_jagged_mean_kernel_variable_length_loop_sum_then_buffer( ...
{ "Data Type": [ "fp32" ], "Functionality": [ "Matrix Multiplication" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Persistent Kernels" ], "Performance Objective": [ "High Throughput" ] }
[ "BSD" ]
https://github.com/pytorch-labs/tritonbench/blob/3a5dccb159834968567a2e45e561dc1aeaa8f8a8/tritonbench/operators/jagged_mean/kernels.py
58fb14a4-ee5b-47db-b1e7-de8b3fd737be
fused_moe.py
Charlie-XIAO/sparse-vllm
vllm/model_executor/layers/fused_moe/fused_moe.py
d228909a30b0c245c35417fb7d2acdf9a3690042
0
@triton.jit def fused_moe_kernel(a_ptr, b_ptr, c_ptr, a_scale_ptr, b_scale_ptr, topk_weights_ptr, sorted_token_ids_ptr, expert_ids_ptr, num_tokens_post_padded_ptr, N, K, EM, num_valid_tokens, stride_am, stride_ak, stride_be, stride_bk, stride_bn, stride_cm, stride_cn, stride_bse, stride_bsn, BLOCK_SIZE_...
{ "Data Type": [ "int8", "fp16" ], "Functionality": [ "Matrix Multiplication", "Top-K Selection" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "High Throughput" ] }
[ "Apache" ]
https://github.com/Charlie-XIAO/sparse-vllm/blob/d228909a30b0c245c35417fb7d2acdf9a3690042/vllm/model_executor/layers/fused_moe/fused_moe.py
b915901f-25e7-4e07-86fb-3ce11a600e0e
test_triton_varargs.py
facebookresearch/xformers
tests/test_triton_varargs.py
a2f37f8c5f4e3ae0d3459a92e42cd1aeb45b03bc
0
@triton.jit def kernel(x_ptrs: 'VAR_ARGS_ARRAY', y_ptrs: 'VAR_ARGS_ARRAY', numel, BLOCK_SIZE: tl.constexpr): pid = tl.program_id(axis=0) offsets = BLOCK_SIZE * pid + tl.arange(0, BLOCK_SIZE) mask = offsets < numel for i in range(len(x_ptrs)): x_ptr = x_ptrs[i] y_ptr = y_ptrs[i] ...
{ "Data Type": [], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "High Throughput" ] }
[ "BSD" ]
https://github.com/facebookresearch/xformers/blob/a2f37f8c5f4e3ae0d3459a92e42cd1aeb45b03bc/tests/test_triton_varargs.py
b5a86ee1-c573-4ef0-b121-3f8c69923b2c
triton_fused_attention.py
pytorch-labs/tritonbench
tritonbench/kernels/triton_fused_attention.py
3a5dccb159834968567a2e45e561dc1aeaa8f8a8
0
@triton.autotune(list(filter(keep, configsWS)), key=['N_CTX']) @triton.jit def _attn_fwd_ws(Q, K, V, sm_scale, M, Out, desc_q, desc_k, desc_v, desc_o, stride_qz, stride_qh, stride_qm, stride_qk, stride_kz, stride_kh, stride_kn, stride_kk, stride_vz, stride_vh, stride_vk, stride_vn, stride_oz, stride_oh, str...
{ "Data Type": [ "fp32" ], "Functionality": [ "Attention Mechanisms" ], "Memory Access Pattern": [ "Tiled", "Coalesced" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "High Throughput", "Compute Bound" ] }
[ "BSD" ]
https://github.com/pytorch-labs/tritonbench/blob/3a5dccb159834968567a2e45e561dc1aeaa8f8a8/tritonbench/kernels/triton_fused_attention.py
ac3a79c4-8892-4afd-bc3a-b1376778c60f
parallel_scan.py
chengkai-liu/RecBLR
parallel_scan.py
66e520c26e28c05a5425ba2e81c9169b7e0176e2
0
@triton.jit def pack64(a, b): tl.static_assert(a.dtype == tl.float32) tl.static_assert(b.dtype == tl.float32) a = a.to(dtype=tl.uint32, bitcast=True).to(tl.uint64) a = a << 32 b = b.to(dtype=tl.uint32, bitcast=True).to(tl.uint64) return a | b
{ "Data Type": [ "fp32" ], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Register Intensive" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/chengkai-liu/RecBLR/blob/66e520c26e28c05a5425ba2e81c9169b7e0176e2/parallel_scan.py
aeb1d88e-f7a3-4baf-ad05-a447c96fd287
nll_loss_kernels.py
BobMcDear/attorch
attorch/nll_loss_kernels.py
da06cb6236bb47195e33fe3986ed21c675ed94cc
0
@triton.autotune(configs=warps_kernel_configs(), key=['batch_dim', 'spatial_dim']) @triton.heuristics({'BLOCK_SIZE_BATCH': BLOCK_SIZE_BATCH_heuristic, 'BLOCK_SIZE_SPATIAL': lambda args: next_power_of_2(args['spatial_dim'])}) @triton.jit def nll_loss_forward_kernel(input_pointer, target_pointer, weight_pointer, ...
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation", "Elementwise Operations" ], "Memory Access Pattern": [ "Tiled", "Coalesced" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound", "High Throughput" ] }
[ "MIT" ]
https://github.com/BobMcDear/attorch/blob/da06cb6236bb47195e33fe3986ed21c675ed94cc/attorch/nll_loss_kernels.py
f9da9e73-4afd-45c7-a28d-2725468622a1
paged_attn.py
AlibabaPAI/FLASHNN
flashnn/triton_kernels/paged_attn.py
528a9301587f5fb135b25d973a87ba0a40a703a7
0
@triton.autotune(configs=[triton.Config({}, num_warps=warps) for warps in [ 4, 8, 16]], key=['QUERY_GROUP_SIZE', 'HEAD_SIZE', 'NUM_PARTITIONS', 'PARTITION_SIZE']) @triton.jit def _paged_attn_w_mma_v2_reduce_kernel(out_ptr, m_i_ptr, l_i_ptr, tmp_out_ptr, context_lens_ptr, max_num_partitions, stride_o0, strid...
{ "Data Type": [ "fp32" ], "Functionality": [ "Attention Mechanisms", "Softmax" ], "Memory Access Pattern": [ "Tiled", "Coalesced" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound", "High Throughput" ] }
[ "Apache" ]
https://github.com/AlibabaPAI/FLASHNN/blob/528a9301587f5fb135b25d973a87ba0a40a703a7/flashnn/triton_kernels/paged_attn.py
fbbb8e30-59c7-4345-a0af-c5932ca05a42
hello_triton.py
gmgu/study-triton
1_hello_triton/hello_triton.py
3a9a24fd3f1de3e7465535ffe72f6deac8a419bd
0
@triton.jit def hello_kernel(): print('Hello Triton Kernel!')
{ "Data Type": [], "Functionality": [], "Memory Access Pattern": [], "Parallelization Strategy": [], "Performance Objective": [] }
[ "Apache" ]
https://github.com/gmgu/study-triton/blob/3a9a24fd3f1de3e7465535ffe72f6deac8a419bd/1_hello_triton/hello_triton.py
89417c21-0b2b-4b0f-bb94-3113c88d8895
adam.py
ServiceNow/Fast-LLM
fast_llm/functional/triton/adam.py
8b46289079da67cba99628448a6b6083dac083cf
0
@triton.jit def triton_adam_kernel(params_ptr, grads_ptr, exp_avgs_ptr, exp_avg_sqs_ptr, noop_flag_ptr, scale_ptr, step_size, beta1, beta2, bias_correction, decay_factor, epsilon, numel: tl.constexpr, block_size: tl.constexpr): noop_flag = tl.load(noop_flag_ptr) if noop_flag != 0: return sca...
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation", "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access", "Coalesced" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound", "High Throughput...
[ "Apache" ]
https://github.com/ServiceNow/Fast-LLM/blob/8b46289079da67cba99628448a6b6083dac083cf/fast_llm/functional/triton/adam.py
b21323e3-f171-4003-9eda-bd4fcfee5aff
flash_attention.py
falkaer/multi-scale-music
seq/flash_attention.py
a7794ddfb3bbd95b70acf3fe72a08d8a1d47564d
0
@triton.jit def make_bounds(offs_m, offs_n, M, N, EVEN_M: tl.constexpr, EVEN_N: tl. constexpr): if EVEN_M: mask = offs_n[None, :] < N elif EVEN_N: mask = offs_m[:, None] < M else: mask = (offs_m[:, None] < M) & (offs_n[None, :] < N) return mask
{ "Data Type": [], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [], "Parallelization Strategy": [], "Performance Objective": [] }
[ "MIT" ]
https://github.com/falkaer/multi-scale-music/blob/a7794ddfb3bbd95b70acf3fe72a08d8a1d47564d/seq/flash_attention.py
5f6f1215-91df-4856-935d-ad21674c7526
rwkv_log.py
berlino/seq_icl
src/models/sequence/rnn/scan_triton/rwkv_log.py
9b9223d15348b5a415fb453ed988ed5f7ab9fbdc
0
@triton.jit def logaddexp(a, b): max_ab = tl.maximum(a, b) return max_ab + tl.log(tl.exp(a - max_ab) + tl.exp(b - max_ab))
{ "Data Type": [], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [], "Parallelization Strategy": [], "Performance Objective": [] }
[ "Apache" ]
https://github.com/berlino/seq_icl/blob/9b9223d15348b5a415fb453ed988ed5f7ab9fbdc/src/models/sequence/rnn/scan_triton/rwkv_log.py
aeaaa009-521f-43fd-884c-f286d78d2d44
fused_linear_cross_entropy.py
sustcsonglin/flash-linear-attention
fla/modules/fused_linear_cross_entropy.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.jit def cross_entropy_kernel(logits, lse, target, loss, total, ignore_index, label_smoothing: tl.constexpr, logit_scale: tl.constexpr, reduction: tl .constexpr, V: tl.constexpr, BV: tl.constexpr): """ This kernel computes both cross entropy loss and the gradient of the input. We only conside...
{ "Data Type": [ "fp32" ], "Functionality": [ "Softmax", "Backpropagation" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [], "Performance Objective": [ "Memory-Bound" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/modules/fused_linear_cross_entropy.py
0259cfec-6015-444e-944d-75eaa64eb07f
y_4.py
IntelLabs/EquiTriton
src/equitriton/sph_harm/direct/y_4.py
1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c
0
@triton.jit def fourth_order_fwd(coord_ptr: tl.tensor, output_ptr: tl.tensor, block_size: tl.constexpr, coord_numel: tl.constexpr, output_numel: tl. constexpr, col_offset: tl.constexpr, output_stride: tl.constexpr): coord_stride = 3 block_id = tl.program_id(0) coord_striding = tl.arange(0, block_siz...
{ "Data Type": [ "fp32" ], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [], "Performance Objective": [] }
[ "Apache" ]
https://github.com/IntelLabs/EquiTriton/blob/1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c/src/equitriton/sph_harm/direct/y_4.py
99554c85-0a2d-42e8-ab1a-65744f560890
chunk.py
sustcsonglin/flash-linear-attention
fla/ops/gla/chunk.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.heuristics({'USE_OFFSETS': lambda args: args['offsets'] is not None}) @triton.autotune(configs=[triton.Config({}, num_warps=1), triton.Config({}, num_warps=2), triton.Config({}, num_warps=4), triton.Config({}, num_warps=8)], key=['BC']) @triton.jit def chunk_gla_fwd_A_kernel_intra_sub_intra_merge(A, A2,...
{ "Data Type": [ "fp32" ], "Functionality": [ "Matrix Multiplication" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [], "Performance Objective": [] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/gla/chunk.py
58b3463b-151f-4a0e-bc35-134133839e16
chunk.py
sustcsonglin/flash-linear-attention
fla/ops/abc/chunk.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.jit def chunk_abc_fwd_kernel_V(q, v, z, h, o, A, s_k_h, s_k_t, s_k_d, s_v_h, s_v_t, s_v_d, s_h_h, s_h_t, s_h_d, scale, T: tl.constexpr, K: tl. constexpr, V: tl.constexpr, BT: tl.constexpr, BK: tl.constexpr, BV: tl. constexpr): i_v, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)...
{ "Data Type": [ "fp32" ], "Functionality": [ "Matrix Multiplication" ], "Memory Access Pattern": [], "Parallelization Strategy": [], "Performance Objective": [] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/abc/chunk.py
466944e0-8df6-43af-8e5a-f9a4513cce97
wy_fast.py
sustcsonglin/flash-linear-attention
fla/ops/gated_delta_rule/wy_fast.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.heuristics({'USE_OFFSETS': lambda args: args['offsets'] is not None}) @triton.autotune(configs=[triton.Config({}, num_warps=num_warps) for num_warps in [2, 4, 8]], key=['BT', 'BK', 'BV']) @triton.jit def fwd_recompute_w_u_kernel(k, v, beta, w, u, Aw, Au, offsets, indices, T: tl.constexpr, H: tl.constexp...
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [], "Performance Objective": [] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/gated_delta_rule/wy_fast.py
b7a78b7c-edba-48ab-88b8-0d8b4fa84948
normalization.py
ServiceNow/Fast-LLM
fast_llm/functional/triton/normalization.py
8b46289079da67cba99628448a6b6083dac083cf
0
@triton.jit def triton_normalization_backward_kernel_2(grad_weight_partial_ptr, grad_bias_partial_ptr, grad_weight_ptr, grad_bias_ptr, m, n_cols, has_bias: tl.constexpr, accumulate_grad: tl.constexpr, block_size_m: tl .constexpr, block_size_n: tl.constexpr): pid = tl.program_id(0) cols = pid * block...
{ "Data Type": [ "fp32" ], "Functionality": [ "Normalization", "Backpropagation" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [], "Performance Objective": [] }
[ "Apache" ]
https://github.com/ServiceNow/Fast-LLM/blob/8b46289079da67cba99628448a6b6083dac083cf/fast_llm/functional/triton/normalization.py
26e4d111-f4aa-445c-91fa-fcdaff284554
k_layer_norm.py
cpuhrsch/torchfused
torchfused/triton/k_layer_norm.py
6c40ed160dcecbe7825f268f7c86bccd359e0ebf
0
@triton.jit def _layer_norm_no_affine_bwd(DX, DY, Y, V, stride, N, **META): row = tl.program_id(0) cols = tl.arange(0, META['BLOCK_SIZE_N']) y_ptrs = Y + row * stride + cols dy_ptrs = DY + row * stride + cols y = tl.load(y_ptrs, mask=cols < N, other=0).to(tl.float32) dy = tl.load(dy_ptrs, mask=c...
{ "Data Type": [ "fp32" ], "Functionality": [ "Normalization", "Backpropagation" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [], "Performance Objective": [] }
[ "BSD" ]
https://github.com/cpuhrsch/torchfused/blob/6c40ed160dcecbe7825f268f7c86bccd359e0ebf/torchfused/triton/k_layer_norm.py
8cfb5a84-099f-41cc-9d64-f30e70e6e39b
quantization.py
neuro-ml/kerops
kerops/kernels/quantization.py
735336775e825d5cb06b8850d25423661b12d1ac
0
@triton.jit def _QuantUint8Window_impl(input_ptr, output_ptr, numel, window, BLOCK_SIZE: tl.constexpr): tid = tl.program_id(0) input_ptr += tid * BLOCK_SIZE output_ptr += tid * BLOCK_SIZE offset = tl.arange(0, BLOCK_SIZE) mask = offset < numel - tid * BLOCK_SIZE input = tl.load(input_ptr + o...
{ "Data Type": [ "uint8" ], "Functionality": [ "Quantization" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/neuro-ml/kerops/blob/735336775e825d5cb06b8850d25423661b12d1ac/kerops/kernels/quantization.py
ecb649cc-99a1-47b3-abec-69aff2b74328
kernel_benchmark.py
ruikangliu/FlatQuant
benchmarks/kernel_benchmark.py
9d3032065f1688cb3f71ebc8166df6d91440e871
0
@triton.autotune(configs=[triton.Config({}, num_stages=2, num_warps=4), triton.Config({}, num_stages=2, num_warps=2), triton.Config({}, num_stages=3, num_warps=4), triton.Config({}, num_stages=3, num_warps=2 ), triton.Config({}, num_stages=4, num_warps=4), triton.Config({}, num_stages=4, num_warps=2)], ...
{ "Data Type": [ "fp32", "fp16", "int8" ], "Functionality": [ "Matrix Multiplication", "Quantization" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [], "Performance Objective": [ "High Throughput" ] }
[ "MIT" ]
https://github.com/ruikangliu/FlatQuant/blob/9d3032065f1688cb3f71ebc8166df6d91440e871/benchmarks/kernel_benchmark.py
06a01956-3562-48af-87d8-0ba21f8d29e7
fused_moe_a8w8.py
AlibabaPAI/FLASHNN
flashnn/triton_kernels/fused_moe_a8w8.py
528a9301587f5fb135b25d973a87ba0a40a703a7
0
@triton.jit def _fused_moe_a8w8_kernel(A, B, C, alpha_row_ptr, alpha_col_ptr, topk_weights_ptr, sorted_token_ids_ptr, expert_ids_ptr, num_tokens_post_padded_ptr, N, K, EM, num_valid_tokens, stride_am, stride_ak, stride_be, stride_bn, stride_bk, stride_cm, stride_cn, stride_scale_be, stride_scale_bn, BLO...
{ "Data Type": [ "fp16" ], "Functionality": [ "Matrix Multiplication", "Top-K Selection" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "High Throughput", "Compute Bound" ] }
[ "Apache" ]
https://github.com/AlibabaPAI/FLASHNN/blob/528a9301587f5fb135b25d973a87ba0a40a703a7/flashnn/triton_kernels/fused_moe_a8w8.py
18bdbd68-3013-4acb-9efe-c2827d61c4ee
y_7.py
IntelLabs/EquiTriton
src/equitriton/sph_harm/direct/y_7.py
1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c
0
@triton.jit def seventh_order_bwd(coord_ptr: tl.tensor, coord_grad_ptr: tl.tensor, sph_grad_ptr: tl.tensor, block_size: tl.constexpr, coord_numel: tl. constexpr, output_numel: tl.constexpr, col_offset: tl.constexpr, output_stride: tl.constexpr): block_id = tl.program_id(0) coord_stride = 3 coord...
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation" ], "Memory Access Pattern": [], "Parallelization Strategy": [], "Performance Objective": [ "Compute Bound" ] }
[ "Apache" ]
https://github.com/IntelLabs/EquiTriton/blob/1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c/src/equitriton/sph_harm/direct/y_7.py
cf434cf1-eaa7-43a8-af57-ed65644e78a1
tritonFun.py
microsoft/Givens-Orthogonal-Backprop
rotMat/triton/tritonFun.py
3040fa287aacbf07be56eb12ddd7c513f7800191
0
@triton.jit def _forward_kernel(c_ptr, s_ptr, u_ptr, col_stride, row_stride, **meta): n, n_tilde, dead_index, d_max, tournament_step, BLOCK_SIZE = meta['N' ], meta['N_TILDE'], meta['DEAD_INDEX'], meta['D_MAX'], meta['STEP' ], meta['BLOCK_SIZE'] pid_x = tl.program_id(axis=0) temp = n_tilde - ...
{ "Data Type": [], "Functionality": [], "Memory Access Pattern": [], "Parallelization Strategy": [], "Performance Objective": [] }
[ "MIT" ]
https://github.com/microsoft/Givens-Orthogonal-Backprop/blob/3040fa287aacbf07be56eb12ddd7c513f7800191/rotMat/triton/tritonFun.py
ba747cf4-08d3-4408-bf74-1154ad010718
triton_chunk.py
NX-AI/xlstm-jax
xlstm_jax/models/xlstm_pytorch/blocks/mlstm/backend/triton_chunk.py
6615e620ba4ecdbe4fd9cc4e9a5a313b133e84a7
0
@triton.jit def chunk_mlstm_fwd_kernel_h(q, k, v, C, n, m, m_total, i, f, h, norm, s_qk_h, s_qk_t, s_qk_d, s_vh_h, s_vh_t, s_vh_d, s_C_h, s_C_t, s_n_h, scale, H: tl.constexpr, T: tl.constexpr, K: tl.constexpr, V: tl. constexpr, BT: tl.constexpr, BK: tl.constexpr, BV: tl.constexpr, NT: tl .constexpr): ...
{ "Data Type": [ "fp32" ], "Functionality": [ "Recurrent Neural Networks", "Matrix Multiplication" ], "Memory Access Pattern": [ "Strided Access", "Blocked Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound", "H...
[ "Apache", "BSD" ]
https://github.com/NX-AI/xlstm-jax/blob/6615e620ba4ecdbe4fd9cc4e9a5a313b133e84a7/xlstm_jax/models/xlstm_pytorch/blocks/mlstm/backend/triton_chunk.py
185656ec-3bff-4006-8c3e-b0f32117c386
triton_sll.py
pytorch/FBGEMM
fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py
fe980ab54a6e28818d81c8694b6564e7f804418b
0
@triton.jit def array_jagged_bmm_kernel(a_ptr, b_ptr, c_ptr, a_offsets_ptr, b_offsets_ptr, c_offsets_ptr, D, stride_bk, stride_bn, stride_cm, stride_cn, transpose, max_seq_len, BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, allow_tf32: tl. constexpr): pid_batch =...
{ "Data Type": [ "fp32" ], "Functionality": [ "Matrix Multiplication" ], "Memory Access Pattern": [ "Blocked Access", "Strided Access" ], "Parallelization Strategy": [], "Performance Objective": [ "High Throughput" ] }
[ "BSD", "MIT" ]
https://github.com/pytorch/FBGEMM/blob/fe980ab54a6e28818d81c8694b6564e7f804418b/fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py
022c4cc0-b308-49be-b921-b32509712645
empty.py
triton-lang/triton
python/examples/empty.py
a2b398e0bb1b120f31cf386d6ae3261c3ab84207
0
@triton.jit def kernel(X, stride_xm, stride_xn, BLOCK: tl.constexpr): pass
{ "Data Type": [], "Functionality": [], "Memory Access Pattern": [], "Parallelization Strategy": [], "Performance Objective": [] }
[ "MIT" ]
https://github.com/triton-lang/triton/blob/a2b398e0bb1b120f31cf386d6ae3261c3ab84207/python/examples/empty.py
e008b2d4-e5d7-4904-adbb-d6d877357da0
gemm_a16w8.py
AlibabaPAI/FLASHNN
flashnn/triton_kernels/gemm_a16w8.py
528a9301587f5fb135b25d973a87ba0a40a703a7
0
@triton.jit def _triton_gemm_a16w8_per_channel_kernel(A, B, C, scale_b, bias, zero_points, M, N, K, stride_am, stride_ak, stride_bn, stride_bk, stride_cm, stride_cn, stride_zpk, stride_zpn, stride_scalek, stride_scalen, add_bias: tl.constexpr, add_zero_points: tl.constexpr, BLOCK_M: tl.constexpr, BLOCK_...
{ "Data Type": [ "int8" ], "Functionality": [ "Matrix Multiplication", "Quantization" ], "Memory Access Pattern": [ "Strided Access", "Blocked Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "High Throughput", "Compute Boun...
[ "Apache" ]
https://github.com/AlibabaPAI/FLASHNN/blob/528a9301587f5fb135b25d973a87ba0a40a703a7/flashnn/triton_kernels/gemm_a16w8.py
0193e573-d1a1-4efc-bba9-db1a33827ad0
z_order.py
Kitsunetic/space-filling-pytorch
space_filling_pytorch/functional/z_order.py
0de955ad1036973ee7506c5a0124c208acec722d
0
@triton.jit def _encode_z_unpadded_kernel(xyz_ptr, batch_idx_ptr, code_ptr, space_size, x_offset, y_offset, z_offset, str_xyz_n, str_xyz_c, N, BLK: tl. constexpr, ASSIGN_BATCH_INDEX: tl.constexpr): pid = tl.program_id(0) offs_n = pid * BLK + tl.arange(0, BLK) mask = offs_n < N xyz_ptrs = xyz_ptr...
{ "Data Type": [], "Functionality": [], "Memory Access Pattern": [], "Parallelization Strategy": [], "Performance Objective": [] }
[ "MIT" ]
https://github.com/Kitsunetic/space-filling-pytorch/blob/0de955ad1036973ee7506c5a0124c208acec722d/space_filling_pytorch/functional/z_order.py
86d2fa4b-e481-4a17-9385-cbf6f0389011
triton_sll.py
pytorch/FBGEMM
fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py
fe980ab54a6e28818d81c8694b6564e7f804418b
0
@triton.jit def jagged_dense_flash_attention_fwd_kernel(q_ptr, k_ptr, v_ptr, ab_ptr, o_ptr, lse_ptr, jagged_offsets_ptr, max_seq_len, stride_ql, stride_qd, stride_kb, stride_kd, stride_kt, stride_vn, stride_vd, stride_ab_b, stride_ab_n, stride_ab_t, stride_ob, stride_ot, stride_od, D: tl. constexpr, T: ...
{ "Data Type": [ "fp32" ], "Functionality": [ "Attention Mechanisms", "Matrix Multiplication" ], "Memory Access Pattern": [ "Strided Access", "Blocked Access" ], "Parallelization Strategy": [], "Performance Objective": [ "High Throughput", "Compute Bound" ] }
[ "BSD", "MIT" ]
https://github.com/pytorch/FBGEMM/blob/fe980ab54a6e28818d81c8694b6564e7f804418b/fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py
25c4864d-4f48-498d-bd92-b326c89bc547
math.py
BobMcDear/attorch
attorch/math.py
da06cb6236bb47195e33fe3986ed21c675ed94cc
0
@triton.jit def nll_loss(input, size, reduction: tl.constexpr): """ Measures the negative log likelihood loss given log-probabilities of target class. Args: input: Input containing predicted log-probabilities corresponding to target class. The input can have arbitrary shape. siz...
{ "Data Type": [ "fp32" ], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [], "Performance Objective": [ "Low Latency", "Single Instance" ] }
[ "MIT" ]
https://github.com/BobMcDear/attorch/blob/da06cb6236bb47195e33fe3986ed21c675ed94cc/attorch/math.py
f9b8875f-1555-4a1e-a553-4f8289afd403
triton_fused_local_attn.py
LouChao98/vqtree
ops/triton_fused_local_attn.py
27a53274df7a804bce27dffcce5f5be73f64b6f3
0
@triton.heuristics({'EVEN_M': lambda args: args['seqlen_q'] % args[ 'BLOCK_M'] == 0, 'EVEN_N': lambda args: args['seqlen_k'] % args[ 'BLOCK_N'] == 0}) @triton.jit def _fwd_kernel(Q, K, V, Out, softmax_scale, stride_qb, stride_qh, stride_qm, stride_kb, stride_kh, stride_kn, stride_vb, stride_vh, stride_v...
{ "Data Type": [ "fp32" ], "Functionality": [ "Attention Mechanisms", "Matrix Multiplication", "Softmax" ], "Memory Access Pattern": [ "Tiled", "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "High Throughput", "Co...
[ "Apache" ]
https://github.com/LouChao98/vqtree/blob/27a53274df7a804bce27dffcce5f5be73f64b6f3/ops/triton_fused_local_attn.py
d220be62-ce65-4110-8341-90c6b7412373
scatter_reduce.py
pyg-team/pyg-lib
pyg_lib/ops/scatter_reduce.py
bdd392a7093c5016f42ec7ae1945ca77dbdd97db
0
@triton.jit def _fused_scatter_reduce_forward_kernel(inputs_ptr, index_ptr, out_ptr, num_feats, num_reductions, numel, REDUCE0, REDUCE1, REDUCE2, REDUCE3, BLOCK_SIZE: tl.constexpr): pid = tl.program_id(axis=0) block_start = pid * BLOCK_SIZE offsets = block_start + tl.arange(0, BLOCK_SIZE) mask =...
{ "Data Type": [ "fp32" ], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Memory-Bound" ] }
[ "MIT" ]
https://github.com/pyg-team/pyg-lib/blob/bdd392a7093c5016f42ec7ae1945ca77dbdd97db/pyg_lib/ops/scatter_reduce.py
04221a8d-0acd-483c-9226-c62cc41c67fe
layer_norm.py
chengzeyi/stable-fast
src/sfast/triton/ops/layer_norm.py
3a6f35c7045f8f6812515957ca62ef37260ff080
0
@triton.jit def _layer_norm_fwd_fused(X, Y, W, B, Mean, Rstd, stride: tl.constexpr, N: tl.constexpr, eps, BLOCK_SIZE: tl.constexpr): row = tl.program_id(0) Y += row * stride X += row * stride if BLOCK_SIZE >= N: cols = tl.arange(0, BLOCK_SIZE) x = tl.load(X + cols, mask=cols < N).to(...
{ "Data Type": [ "fp32" ], "Functionality": [ "Normalization" ], "Memory Access Pattern": [ "Tiled" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound", "Memory-Bound" ] }
[ "MIT" ]
https://github.com/chengzeyi/stable-fast/blob/3a6f35c7045f8f6812515957ca62ef37260ff080/src/sfast/triton/ops/layer_norm.py
747983d8-2740-48f1-a5b4-0b2b362c5601
qkv_concat.py
ai-compiler-study/triton-kernels
triton_kernels/ops/qkv_concat.py
2308e5e9d965059fe2d19b4d535debac4970b69e
0
@triton.jit def triton_qkv_concat(txt_qkv, img_qkv, out_q_ptr, out_k_ptr, out_v_ptr, seq_len, num_heads, head_dim, hidden_dim, seq_txt_len, stride_txt_a, stride_txt_b, stride_img_a, stride_img_b, stride_output_a, stride_output_b, stride_output_c, XBLOCK: tl.constexpr): pid = tl.program_id(0) xoffset...
{ "Data Type": [ "fp32" ], "Functionality": [ "Attention Mechanisms", "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "High Throughput" ] }
[ "MIT" ]
https://github.com/ai-compiler-study/triton-kernels/blob/2308e5e9d965059fe2d19b4d535debac4970b69e/triton_kernels/ops/qkv_concat.py
5d836130-3356-4d7b-9797-41b5b20290c8
fp8_matmul.py
drisspg/transformer_nuggets
transformer_nuggets/fp8/fp8_matmul.py
a4c66bbeebaa479ad8b6ed82d7efbafa41b17260
0
@triton.jit def apply_scaling(accumulator, a_scale, b_scale, ROW_WISE_SCALING: tl. constexpr, offs_cm, offs_cn, M, N, stride_a_scale_m, stride_b_scale_n): if ROW_WISE_SCALING: a_scales = tl.load(a_scale + offs_cm * stride_a_scale_m, mask= offs_cm < M, other=0.0) b_scales = tl.load(b_...
{ "Data Type": [ "fp32" ], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound" ] }
[ "BSD" ]
https://github.com/drisspg/transformer_nuggets/blob/a4c66bbeebaa479ad8b6ed82d7efbafa41b17260/transformer_nuggets/fp8/fp8_matmul.py
da11f109-9ed7-4901-b57c-0a34d0fda019
chunk.py
sustcsonglin/flash-linear-attention
fla/ops/gsa/chunk.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.heuristics({'USE_OFFSETS': lambda args: args['offsets'] is not None}) @triton.jit def chunk_gsa_bwd_k_kernel_intra_dvg(v, g, o, A, do, dv, dg, offsets, indices, T: tl.constexpr, HQ: tl.constexpr, H: tl.constexpr, V: tl. constexpr, BT: tl.constexpr, BC: tl.constexpr, BV: tl.constexpr, NC: tl .constex...
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation", "Attention Mechanisms" ], "Memory Access Pattern": [ "Tiled", "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops", "Thread-Block Mappings" ], "Performance Objective": [ "High Throughput",...
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/gsa/chunk.py
8dbbd600-4b51-4d60-9081-c24273378c71
y_0.py
IntelLabs/EquiTriton
src/equitriton/sph_harm/direct/y_0.py
1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c
0
@triton.jit def zeroth_order_fwd(coord_ptr: tl.tensor, output_ptr: tl.tensor, block_size: tl.constexpr, coord_numel: tl.constexpr, output_numel: tl. constexpr, col_offset: tl.constexpr, output_stride: tl.constexpr): block_id = tl.program_id(0) output_striding = tl.arange(0, block_size) * output_stride ...
{ "Data Type": [ "fp32" ], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Low Latency" ] }
[ "Apache" ]
https://github.com/IntelLabs/EquiTriton/blob/1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c/src/equitriton/sph_harm/direct/y_0.py
98296604-5ff6-454d-b957-63e6735444c9
fused_chunk.py
sustcsonglin/flash-linear-attention
fla/ops/gla/fused_chunk.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.jit def fused_chunk_gla_bwd_kernel(q, k, v, g, do, dq, dk, dv, h0, s_k_h, s_k_t, s_k_d, s_v_h, s_v_t, s_v_d, scale, B: tl.constexpr, H: tl.constexpr, T: tl.constexpr, K: tl.constexpr, V: tl.constexpr, BT: tl.constexpr, BK: tl.constexpr, BV: tl.constexpr, USE_INITIAL_STATE: tl.constexpr, CHECK: t...
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation", "Attention Mechanisms" ], "Memory Access Pattern": [ "Tiled", "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "High Throughput", "Compute Bound" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/gla/fused_chunk.py
601dc13f-efe7-4189-8251-9423cd04334d
foward.py
Forkxz/TritonDeepLearningKernel
kernel/dropconnect/foward.py
add54b6318e8fa5fdbf8c7b47659de9fceaa5691
0
@triton.jit def dropconnect_fwd_kernel(x_ptr, w_ptr, y_ptr, seed, M, K, N, stride_xm, stride_xk, stride_wk, stride_wn, stride_ym, stride_yn, stride_dm, stride_dk, stride_dn, BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl. constexpr, BLOCK_SIZE_K: tl.constexpr, ALLOWTF32: tl.constexpr): pid_m = tl.program_...
{ "Data Type": [ "fp32", "int8" ], "Functionality": [ "Elementwise Operations", "Quantization" ], "Memory Access Pattern": [ "Tiled", "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "High Throughput", "Compute Boun...
[ "MIT" ]
https://github.com/Forkxz/TritonDeepLearningKernel/blob/add54b6318e8fa5fdbf8c7b47659de9fceaa5691/kernel/dropconnect/foward.py
eceee47a-7f5a-4c5d-a084-f79da7308114
bwd_inner_dq.py
ROCm/aotriton
tritonsrc/bwd_inner_dq.py
016f733e8ff746450e066f78bed68709ccd93e60
0
@triton.jit def bwd_inner_dq(dq, qk_scale, bias_scale, DB_block_ptr, store_db, q, kt_ptrs, k_stride, vt_ptrs, v_stride, B_block_ptr, do, Di, l_i, seqlen_q, seqlen_k, head_dim, start_q, lo, hi, dropout_p, dropout_scale, philox_seed, batch_philox_offset, max_seqlen_k, BLOCK_M: tl.constexpr, BLOCK_DMODEL: ...
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation", "Attention Mechanisms" ], "Memory Access Pattern": [ "Strided Access", "Tiled Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/ROCm/aotriton/blob/016f733e8ff746450e066f78bed68709ccd93e60/tritonsrc/bwd_inner_dq.py
774b2df7-3308-4be6-9859-ced86bcaa8fc
gemm_a16w4.py
AlibabaPAI/FLASHNN
flashnn/triton_kernels/gemm_a16w4.py
528a9301587f5fb135b25d973a87ba0a40a703a7
0
@triton.jit def _triton_gemm_a16w4_per_channel_kernel(A, B, C, scale_b, bias, zero_points, M, N, K, rescale_m, rescale_n, rescale_k, stride_am, stride_ak, stride_bn, stride_bk, stride_cm, stride_cn, stride_zpk, stride_zpn, stride_scalek, stride_scalen, add_bias: tl.constexpr, add_zero_points: tl.constex...
{ "Data Type": [ "int8", "fp32" ], "Functionality": [ "Matrix Multiplication", "Quantization" ], "Memory Access Pattern": [ "Tiled", "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops", "Thread-Block Mappings" ], "Performance Objective": [ "High Th...
[ "Apache" ]
https://github.com/AlibabaPAI/FLASHNN/blob/528a9301587f5fb135b25d973a87ba0a40a703a7/flashnn/triton_kernels/gemm_a16w4.py
32a86171-95db-478d-aace-264ce85654bc
chunk_h_parallel.py
sustcsonglin/flash-linear-attention
fla/ops/common/chunk_h_parallel.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.heuristics({'STORE_INITIAL_STATE_GRADIENT': lambda args: args['dh0' ] is not None, 'USE_FINAL_STATE_GRADIENT': lambda args: args['dht'] is not None, 'USE_OFFSETS': lambda args: args['offsets'] is not None}) @triton.autotune(configs=[triton.Config({'BK': BK, 'BV': BV}, num_warps= num_warps, num_stage...
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation", "Attention Mechanisms" ], "Memory Access Pattern": [ "Strided Access", "Tiled Access" ], "Parallelization Strategy": [ "Grid-Stride Loops", "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bo...
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/common/chunk_h_parallel.py
10461b26-96c5-475b-b85e-1317b069b740
mlstm_scan.py
LukasBluebaum/xLSTM-Triton-CUDA-Implementation
mlstm_scan.py
6fb49b89cc74e7dadd0f3d56db05684bb4e86f4b
0
@triton.jit def scan_op(x1, y1, x2, y2): z1 = x2 * x1 z2 = x2 * y1 + y2 return z1, z2
{ "Data Type": [], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [], "Parallelization Strategy": [], "Performance Objective": [] }
[ "MIT" ]
https://github.com/LukasBluebaum/xLSTM-Triton-CUDA-Implementation/blob/6fb49b89cc74e7dadd0f3d56db05684bb4e86f4b/mlstm_scan.py
e72ea98e-3167-44ec-923d-f412cbff14b8
naive_associative_rnn_scan.py
TushaarGVS/linear-rnn
linear_rnn/triton/naive_associative_rnn_scan.py
48320589b73154484be7d09a144923a2b9e56b85
0
@triton.jit def _associative_scan_op(a_l, x_l, a_r, x_r): return a_r * a_l, a_r * x_l + x_r
{ "Data Type": [], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [], "Parallelization Strategy": [], "Performance Objective": [] }
[ "Apache" ]
https://github.com/TushaarGVS/linear-rnn/blob/48320589b73154484be7d09a144923a2b9e56b85/linear_rnn/triton/naive_associative_rnn_scan.py
489248a3-bb60-40d3-a95f-13874bdd6bf8
chunk.py
sustcsonglin/flash-linear-attention
fla/ops/gated_delta_rule/chunk.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.heuristics({'USE_OFFSETS': lambda args: args['offsets'] is not None}) @triton.autotune(configs=[triton.Config({}, num_warps=num_warps) for num_warps in [2, 4]], key=['BT', 'BK', 'BV']) @triton.jit def chunk_gated_delta_rule_bwd_kernel_dqkw(q, k, v, w, g, h, do, dh, dq, dk, dv, dw, dg, offsets, indices, ...
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation", "Attention Mechanisms" ], "Memory Access Pattern": [ "Tiled", "Coalesced" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound", "Memory-Bound" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/gated_delta_rule/chunk.py
ba1125e7-41b5-4aee-9a38-9f0cb8548a00
math.py
BobMcDear/attorch
attorch/math.py
da06cb6236bb47195e33fe3986ed21c675ed94cc
0
@triton.jit def accum_linear(accum, input1, input2, fp16: tl.constexpr, tf32: tl.constexpr ): """ Accumulates matrix multiplications of input tensors for linear functions. Args: accum: Accumulator holding aggregation of matrix multiplications. The accumulator must be of shape [BLOCK...
{ "Data Type": [ "fp16" ], "Functionality": [ "Matrix Multiplication" ], "Memory Access Pattern": [ "Tiled" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/BobMcDear/attorch/blob/da06cb6236bb47195e33fe3986ed21c675ed94cc/attorch/math.py
cecb82d3-5254-4b2d-9ca3-137d666bad12
tuned_bwd.py
ROCm/aotriton
tritonsrc/tuned_bwd.py
016f733e8ff746450e066f78bed68709ccd93e60
0
@triton.autotune(configs=TRITON_CONFIG_LIST_BWD, key=['BLOCK_DMODEL', 'max_seqlen_q', 'max_seqlen_k']) @triton.jit def tuned_bwd_kernel_dk_dv(Q, K, V, B, sm_scale, Out, DO, DK, DV, L, D, stride_qz, stride_qh, stride_qm, stride_qk, stride_kz, stride_kh, stride_kn, stride_kk, stride_vz, stride_vh, stride_vk, ...
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation", "Attention Mechanisms" ], "Memory Access Pattern": [ "Tiled", "Coalesced" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound", "Memory-Bound" ] }
[ "MIT" ]
https://github.com/ROCm/aotriton/blob/016f733e8ff746450e066f78bed68709ccd93e60/tritonsrc/tuned_bwd.py
0bb116ff-c023-4106-b0ff-399e5628a32e
layernorm.py
sustcsonglin/flash-linear-attention
fla/modules/layernorm.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.heuristics({'RECOMPUTE_OUTPUT': lambda args: args['Y'] is not None}) @triton.autotune(configs=[triton.Config({}, num_warps=1), triton.Config({}, num_warps=2), triton.Config({}, num_warps=4), triton.Config({}, num_warps=8), triton.Config({}, num_warps=16), triton.Config({}, num_warps=32)], key=['N', ...
{ "Data Type": [ "fp32" ], "Functionality": [ "Normalization", "Backpropagation" ], "Memory Access Pattern": [ "Tiled", "Coalesced" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/modules/layernorm.py
bbfcb74d-d097-408e-89dc-20d7bdf56c4b
09-experimental-tma-matrix-multiplication.py
hgl71964/SIP
benchmarks/09-experimental-tma-matrix-multiplication.py
767ed720d4bd5cee21670b125b62c434258c532b
0
@triton.autotune(configs=[triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 64, 'GROUP_SIZE_M': 8}, num_stages =7, num_warps=4)], key=['M', 'N', 'K']) @triton.jit def matmul_kernel(a_ptr, b_ptr, z_ptr, M, N, K, stride_am, stride_ak, stride_bk, stride_bn, stride_zm, stride_zn, BLOCK_SI...
{ "Data Type": [ "fp16" ], "Functionality": [ "Matrix Multiplication" ], "Memory Access Pattern": [ "Tiled", "Coalesced" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound", "High Throughput" ] }
[ "MIT" ]
https://github.com/hgl71964/SIP/blob/767ed720d4bd5cee21670b125b62c434258c532b/benchmarks/09-experimental-tma-matrix-multiplication.py
6901e5d7-7826-4fb5-b04f-6731b4bdf651
lstm_bw.py
NX-AI/flashrnn
flashrnn/flashrnn/triton_fused/lstm_bw.py
3fca666a81c8740af4878d7bc5e2a51900e4fe14
0
@triton.jit def _backward_sequence_kernel(delta_states_all_outside, delta_states_last_outside, R, states_all, gates_all, delta_states_initial, delta_Wx, delta_R, delta_b, T: tl.constexpr, NS: tl.constexpr, B: tl.constexpr, NH: tl.constexpr, DH: tl.constexpr, NGI: tl.constexpr, NGR: tl.constexpr, siz_B: ...
{ "Data Type": [ "fp32" ], "Functionality": [ "Recurrent Neural Networks", "Backpropagation" ], "Memory Access Pattern": [ "Tiled", "Coalesced" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound", "Memory-Bound" ] }
[ "MIT", "BSD" ]
https://github.com/NX-AI/flashrnn/blob/3fca666a81c8740af4878d7bc5e2a51900e4fe14/flashrnn/flashrnn/triton_fused/lstm_bw.py
67e19a21-a90d-4dc1-acd6-5034e67c1d5c
FleetAttention_triton.py
Computational-Machine-Intelligence/LeetDecoding
leetDecoding/methods/FleetAttention_triton.py
1b545c2f5bacc155255250d1f70ac9484744559a
0
@triton.jit def FleetAttention_kernel(B_ptr, C_ptr, V_ptr, ans_ptr, seqlen: tl. constexpr, dim: tl.constexpr, rank: tl.constexpr, stride_vbh: tl. constexpr, stride_bbh: tl.constexpr, dim_BLOCK: tl.constexpr): rank_idx = tl.program_id(axis=0) bz = tl.program_id(axis=1) dim_block_idx = tl.program_id(a...
{ "Data Type": [ "fp32" ], "Functionality": [ "Attention Mechanisms", "Matrix Multiplication" ], "Memory Access Pattern": [ "Tiled", "Coalesced" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound", "High Throughput" ]...
[ "MIT" ]
https://github.com/Computational-Machine-Intelligence/LeetDecoding/blob/1b545c2f5bacc155255250d1f70ac9484744559a/leetDecoding/methods/FleetAttention_triton.py
1352a7a5-ebd7-4fa6-b706-8efe63475295
fwd_kernel.py
ROCm/aotriton
test/fwd_kernel.py
016f733e8ff746450e066f78bed68709ccd93e60
0
@triton.jit def attn_fwd_inner(acc, l_i, m_i, q, K_block_ptr, V_block_ptr, start_m, seqlen_q, seqlen_k, dropout_p, philox_seed, batch_philox_offset, encoded_softmax_block_ptr, BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl. constexpr, BLOCK_N: tl.constexpr, STAGE: tl.constexpr, offs_m: tl. constexpr, offs_n: t...
{ "Data Type": [ "fp32" ], "Functionality": [ "Attention Mechanisms", "Softmax" ], "Memory Access Pattern": [ "Tiled", "Coalesced" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/ROCm/aotriton/blob/016f733e8ff746450e066f78bed68709ccd93e60/test/fwd_kernel.py
1310b43a-9ced-4062-b861-4e7f7c35f090
cumsum.py
sustcsonglin/flash-linear-attention
fla/ops/utils/cumsum.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.heuristics({'USE_OFFSETS': lambda args: args['offsets'] is not None}) @triton.autotune(configs=[triton.Config({'BS': BS}, num_warps=num_warps) for BS in [16, 32, 64] for num_warps in [2, 4, 8]], key=['S', 'BT']) @triton.jit def chunk_local_cumsum_vector_kernel(s, o, offsets, indices, T: tl. constexpr, H...
{ "Data Type": [ "fp32" ], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Tiled", "Blocked Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "High Throughput", "Compute Bound" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/utils/cumsum.py
5f884ba7-afb8-4c7b-b7f8-8c0bdc99e57f
gemm_streamk_benchmark.py
intel/intel-xpu-backend-for-triton
benchmarks/triton_kernels_benchmark/gemm_streamk_benchmark.py
6ee08cd29ec3cd8b8eb3f92b9c93977fc6f6e5c2
0
@triton.autotune(configs=[triton.Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 4, 'grf_mode': 'large'}, num_stages=2, num_warps=32)], key=['M', 'N', 'K']) @triton.jit def first_wave(a_ptr, b_ptr, c_ptr, M: tl.constexpr, N: tl.constexpr, K: tl .constexpr, stride_am: tl...
{ "Data Type": [ "fp32" ], "Functionality": [ "Matrix Multiplication" ], "Memory Access Pattern": [ "Tiled", "Blocked Access" ], "Parallelization Strategy": [ "Persistent Kernels" ], "Performance Objective": [ "High Throughput", "Compute Bound" ] }
[ "MIT" ]
https://github.com/intel/intel-xpu-backend-for-triton/blob/6ee08cd29ec3cd8b8eb3f92b9c93977fc6f6e5c2/benchmarks/triton_kernels_benchmark/gemm_streamk_benchmark.py
76e5f84e-91fd-40d6-b973-5c5374ec218d
triton_fused_attn.py
LouChao98/vqtree
ops/triton_fused_attn.py
27a53274df7a804bce27dffcce5f5be73f64b6f3
0
@triton.heuristics({'EVEN_M': lambda args: args['seqlen_q'] % args[ 'BLOCK_M'] == 0, 'EVEN_N': lambda args: args['seqlen_k'] % args[ 'BLOCK_N'] == 0}) @triton.jit def _fwd_kernel(Q, K, V, Out, softmax_scale, stride_qb, stride_qh, stride_qm, stride_kb, stride_kh, stride_kn, stride_vb, stride_vh, stride_v...
{ "Data Type": [ "fp32" ], "Functionality": [ "Attention Mechanisms", "Softmax" ], "Memory Access Pattern": [ "Tiled", "Blocked Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "High Throughput", "Compute Bound" ] }
[ "Apache" ]
https://github.com/LouChao98/vqtree/blob/27a53274df7a804bce27dffcce5f5be73f64b6f3/ops/triton_fused_attn.py
fbe7dd6a-e642-49e2-85ae-30c92f1c7dd6
gemm_preop_exp_benchmark.py
intel/intel-xpu-backend-for-triton
benchmarks/triton_kernels_benchmark/gemm_preop_exp_benchmark.py
6ee08cd29ec3cd8b8eb3f92b9c93977fc6f6e5c2
0
@triton.autotune(configs=[triton.Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 4, 'grf_mode': 'large'}, num_stages=2, num_warps=32), triton.Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 4, 'grf_mode': 'large'}, num_stages=3...
{ "Data Type": [ "bf16" ], "Functionality": [ "Matrix Multiplication" ], "Memory Access Pattern": [ "Tiled", "Blocked Access", "Transposed Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "High Throughput", "Compute Bound" ...
[ "MIT" ]
https://github.com/intel/intel-xpu-backend-for-triton/blob/6ee08cd29ec3cd8b8eb3f92b9c93977fc6f6e5c2/benchmarks/triton_kernels_benchmark/gemm_preop_exp_benchmark.py
8fbd1eee-ae83-4f46-9244-6b0b3bf3bee6
softmax_online_v1.py
iclementine/optimize_softmax
softmax_online_v1.py
6ddeee3481dd5e63f4a30b946c417e97bc4494bf
0
@triton.jit def softmax_kernel_online_v1(output_ptr, input_ptr, M, N, TILE_N: tl.constexpr ): pid_m = tl.program_id(0) m = tl.full((), value=-float('inf'), dtype=output_ptr.dtype.element_ty) z = tl.full((), value=0, dtype=output_ptr.dtype.element_ty) for start_n in range(0, N, TILE_N): n_off...
{ "Data Type": [ "fp32" ], "Functionality": [ "Softmax" ], "Memory Access Pattern": [ "Tiled" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "High Throughput", "Compute Bound" ] }
[ "BSD" ]
https://github.com/iclementine/optimize_softmax/blob/6ddeee3481dd5e63f4a30b946c417e97bc4494bf/softmax_online_v1.py
1370ad45-ebfb-4938-8f50-14f7af621077
fused_recurrent.py
sustcsonglin/flash-linear-attention
fla/ops/linear_attn/fused_recurrent.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.jit def fused_recurrent_linear_attn_fwd_kernel(q, k, v, o, h0, ht, s_k_h, s_v_h, scale, B, H, T, K: tl.constexpr, V: tl.constexpr, BK: tl.constexpr, BV: tl.constexpr, USE_INITIAL_STATE: tl.constexpr, STORE_FINAL_STATE: tl. constexpr): i_v, i_k, i_bh = tl.program_id(0), tl.program_id(1), tl.progr...
{ "Data Type": [ "fp32" ], "Functionality": [ "Recurrent Neural Networks", "Attention Mechanisms" ], "Memory Access Pattern": [ "Tiled", "Blocked Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "High Throughput", "Compute B...
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/linear_attn/fused_recurrent.py
8bddc46d-3ecf-45eb-88d1-ea5dd55dfb8c
fp8_gemm.py
pytorch/FBGEMM
fbgemm_gpu/experimental/gemm/triton_gemm/fp8_gemm.py
fe980ab54a6e28818d81c8694b6564e7f804418b
0
@triton.jit def _kernel_quantize_fp8_block(A, A_scale, A_fp8, scale_ub, M, K, stride_am, stride_ak, stride_om, stride_ok, stride_a_scale_m, stride_a_scale_k, TL_FP8_DTYPE: tl.constexpr, MAX_FP8: tl.constexpr, EPS: tl.constexpr, CLAMP_MAX: tl.constexpr, BLOCK_M: tl.constexpr, BLOCK_K: tl.constexpr ) ->No...
{ "Data Type": [ "int8" ], "Functionality": [ "Quantization" ], "Memory Access Pattern": [ "Tiled", "Blocked Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Memory-Bound", "Compute Bound" ] }
[ "BSD", "MIT" ]
https://github.com/pytorch/FBGEMM/blob/fe980ab54a6e28818d81c8694b6564e7f804418b/fbgemm_gpu/experimental/gemm/triton_gemm/fp8_gemm.py
26a51175-e022-44f8-9ec3-2d92fe596611
parallel.py
sustcsonglin/flash-linear-attention
fla/ops/retention/parallel.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.jit def parallel_retention_bwd_kernel_dq(i_bh, i_t, i_k, i_v, i_h, k, v, do, dq, scale, B: tl.constexpr, H: tl.constexpr, T: tl.constexpr, K: tl. constexpr, V: tl.constexpr, BT: tl.constexpr, BS: tl.constexpr, BK: tl. constexpr, BV: tl.constexpr): p_k = tl.make_block_ptr(k + i_bh * T * K, (T, K)...
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation" ], "Memory Access Pattern": [ "Coalesced", "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Memory-Bound", "Compute Bound" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/retention/parallel.py
96564198-2f7d-486f-97bf-e992926034e9
paged_attn_v2.py
AlibabaPAI/FLASHNN
flashnn/triton_kernels/paged_attn_v2.py
528a9301587f5fb135b25d973a87ba0a40a703a7
0
@triton.jit def _single_query_cached_kv_attention_v2(exp_sums, max_logits, out, q, k_cache, v_cache, head_mapping, scale, block_tables, seq_lens, partiton_size, max_num_blocks_per_seq, alibi_slopes, stride_qm, stride_qn, stride_om, stride_on, stride_ok, stride_km, stride_kn, stride_kk, stride_exp_m, str...
{ "Data Type": [ "fp16" ], "Functionality": [ "Attention Mechanisms" ], "Memory Access Pattern": [ "Tiled", "Blocked Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "High Throughput", "Compute Bound" ] }
[ "Apache" ]
https://github.com/AlibabaPAI/FLASHNN/blob/528a9301587f5fb135b25d973a87ba0a40a703a7/flashnn/triton_kernels/paged_attn_v2.py
b70199cf-0316-4688-b762-de62481ca266
bwd_split_kernel.py
ROCm/aotriton
test/bwd_split_kernel.py
016f733e8ff746450e066f78bed68709ccd93e60
0
@triton.jit def bwd_kernel_dq(Q, K, V, sm_scale, Out, DO, DQ, L, D, stride_qz, stride_qh, stride_qm, stride_qk, stride_kz, stride_kh, stride_kn, stride_kk, stride_vz, stride_vh, stride_vk, stride_vn, seqlen_q, seqlen_k, dropout_p, philox_seed, philox_offset_base, BLOCK_M: tl. constexpr, BLOCK_DMODEL: tl...
{ "Data Type": [ "fp32" ], "Functionality": [ "Attention Mechanisms", "Backpropagation", "Matrix Multiplication", "Softmax", "Elementwise Operations" ], "Memory Access Pattern": [ "Blocked Access", "Coalesced" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], ...
[ "MIT" ]
https://github.com/ROCm/aotriton/blob/016f733e8ff746450e066f78bed68709ccd93e60/test/bwd_split_kernel.py
01fdb3bd-4c8f-4387-a609-7db94baec8e2
geglu.py
Kitsunetic/kitsu
kitsu/nn/geglu.py
826967a493c89753ac2cf1e28b52b79998fc9076
0
@triton.jit def cosh(x): return (tl.exp(x) + tl.exp(-x)) * 0.5
{ "Data Type": [ "fp32" ], "Functionality": [ "Activation Functions", "Elementwise Operations" ], "Memory Access Pattern": [ "Coalesced", "Non-Tiled" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Low Latency" ] }
[ "MIT" ]
https://github.com/Kitsunetic/kitsu/blob/826967a493c89753ac2cf1e28b52b79998fc9076/kitsu/nn/geglu.py
5abc2eaa-cbaa-4445-b0ea-d0fd89b0f5ac
chunk_fuse.py
elephantmipt/rebased_minimal
flash_linear_attention/fla/ops/triton/abc/chunk_fuse.py
e7b945509972fab9f9c1c7be431abf7d6bf62c95
0
@triton.jit def chunk_abc_bwd_kernel_dq(k, rk, ck, dq, ds, s_qk_h, s_qk_t, s_qk_d, s_sk_h, s_sk_t, s_sk_m, T, BT: tl.constexpr, BK: tl.constexpr, BM: tl. constexpr, DK: tl.constexpr, DM: tl.constexpr, NT: tl.constexpr): i_k, i_m, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) n_bh = tl.num_...
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation", "Matrix Multiplication", "Elementwise Operations" ], "Memory Access Pattern": [ "Blocked Access", "Coalesced" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "High Throug...
[ "Apache" ]
https://github.com/elephantmipt/rebased_minimal/blob/e7b945509972fab9f9c1c7be431abf7d6bf62c95/flash_linear_attention/fla/ops/triton/abc/chunk_fuse.py
4eae1f4c-27a6-472e-83b1-3c5bb1e82ddf
bwd_preprocess.py
ROCm/aotriton
test/bwd_preprocess.py
016f733e8ff746450e066f78bed68709ccd93e60
0
@triton.jit def bwd_preprocess(Out, DO, Delta, stride_oz, stride_oh, stride_om, stride_on, stride_doz, stride_doh, stride_dom, stride_don, seqlen_q, BLOCK_M: tl.constexpr, D_HEAD: tl.constexpr): off_m = tl.program_id(0) * BLOCK_M off_h = tl.program_id(1) off_z = tl.program_id(2) num_h = tl.num_p...
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation", "Elementwise Operations" ], "Memory Access Pattern": [ "Blocked Access", "Coalesced", "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "High Throughput", ...
[ "MIT" ]
https://github.com/ROCm/aotriton/blob/016f733e8ff746450e066f78bed68709ccd93e60/test/bwd_preprocess.py
b3fcb89f-85fb-4bab-969a-d45fd9c5cb9e
ln_linear_triton.py
ethansmith2000/fused-layer-norm
ln_linear_triton.py
84fe243a829364acdcfd7cd70b699db04838af0f
0
@triton.jit def _layer_norm_fwd_fused(X_ptr, Y_ptr, W_ptr, B_ptr, Mean_ptr, RSTD_ptr, stride, n_cols, eps, BLOCK_SIZE: tl.constexpr): row_idx = tl.program_id(0) col_offsets = tl.arange(0, BLOCK_SIZE) mask = col_offsets < n_cols Y_ptr += row_idx * stride X_ptr += row_idx * stride Mean_ptr += ...
{ "Data Type": [ "fp32" ], "Functionality": [ "Normalization", "Elementwise Operations" ], "Memory Access Pattern": [ "Blocked Access", "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "High Throughput", "Compute Bound"...
[ "MIT" ]
https://github.com/ethansmith2000/fused-layer-norm/blob/84fe243a829364acdcfd7cd70b699db04838af0f/ln_linear_triton.py
be6a1d21-b2db-4870-b6b3-b6ac680f6a72
mlstm_scan.py
LukasBluebaum/xLSTM-Triton-CUDA-Implementation
mlstm_scan.py
6fb49b89cc74e7dadd0f3d56db05684bb4e86f4b
0
@triton.jit def mlstm_triton_scan(Q, K, V, F, I, O, F_REDUCED, C, N, M, H, NH: tl. constexpr, S: tl.constexpr, D: tl.constexpr, SB: tl.constexpr, VB: tl. constexpr): bh_id = tl.program_id(0) sb_id = tl.program_id(1) vb_id = tl.program_id(2) num_sequence_blocks = tl.num_programs(1) batch_id =...
{ "Data Type": [ "fp32" ], "Functionality": [ "Recurrent Neural Networks", "Attention Mechanisms", "Matrix Multiplication", "Elementwise Operations" ], "Memory Access Pattern": [ "Blocked Access", "Coalesced" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Perf...
[ "MIT" ]
https://github.com/LukasBluebaum/xLSTM-Triton-CUDA-Implementation/blob/6fb49b89cc74e7dadd0f3d56db05684bb4e86f4b/mlstm_scan.py
8e56e989-ac16-4909-9f0e-4ab0f6c13dae
math.py
BobMcDear/attorch
attorch/math.py
da06cb6236bb47195e33fe3986ed21c675ed94cc
0
@triton.jit def glu(input1, input2, param, act_func: tl.constexpr): """ Applies the gated linear unit with an arbitrary activation function to the input. Args: input1: First half of input to gate. The first half must be of the same shape as the second half. input2: Second ha...
{ "Data Type": [ "fp32" ], "Functionality": [ "Activation Functions", "Elementwise Operations" ], "Memory Access Pattern": [ "Coalesced", "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "High Throughput", "Compute Boun...
[ "MIT" ]
https://github.com/BobMcDear/attorch/blob/da06cb6236bb47195e33fe3986ed21c675ed94cc/attorch/math.py
fdd2e0b1-dd78-4f2b-8862-5b13f1fc1f97
k_layer_norm.py
cpuhrsch/torchfused
torchfused/triton/k_layer_norm.py
6c40ed160dcecbe7825f268f7c86bccd359e0ebf
0
@triton.jit def _affine(W, B, N, x, META): cols = tl.arange(0, META['BLOCK_SIZE_N']) w = tl.load(W + cols, mask=cols < N, other=1.0) zero = 0.0 zero = zero.to(w.dtype) w = tl.where(cols < N, w, zero) b = tl.load(B + cols, mask=cols < N, other=0.0) b = tl.where(cols < N, b, zero) y = x * ...
{ "Data Type": [ "fp32" ], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Coalesced", "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "High Throughput", "Compute Bound" ] }
[ "BSD" ]
https://github.com/cpuhrsch/torchfused/blob/6c40ed160dcecbe7825f268f7c86bccd359e0ebf/torchfused/triton/k_layer_norm.py