
    piB&                        d dl Z d dlmZ d dlZd dlmZ d dlmZ ddlm	Z	m
Z ddlmZ ddlmZmZmZmZ dd	lmZmZmZmZmZmZ dd
lmZ ddlmZmZmZ erddl	m Z   e jB                  e"      Z#ejH                  jJ                  Z%ed        Z& ede&dd      Z' eejP                  d      Z) eejP                  dde%jP                  jT                        Z+ eejX                  de%jX                  jZ                        Z. ej^                  e%jP                        dddd       Z0 ej^                  e%jX                        ddddd       Z1y)    N)TYPE_CHECKING)counters)CKGemmTemplate   )irlowering)MMKernelInputs)autotune_select_algorithmExternKernelChoiceSymbolicGridFnTritonTemplate)_use_cutlass_for_opuse_aten_gemm_kernelsuse_ck_gemm_templateuse_cpp_bmm_templateuse_cutlass_templateuse_triton_template)V   )_is_static_problemis_batch_stride_largest_or_zeromm_args)ChoiceCallerc                :     |||d          |||d         z  | dfS )NBLOCK_MBLOCK_Nr    )bmnmetacdivs        \/opt/services/ai/voice_agent/venv/lib/python3.12/site-packages/torch/_inductor/kernel/bmm.pybmm_gridr$   $   s*    DO$tAtI'??AFF    bmma	  
{{def_kernel("A", "B")}}
    M = {{size("A", -2)}}
    N = {{size("B", -1)}}
    K = {{size("A", -1)}}

    stride_aq = {{stride("A", 0)}}
    stride_am = {{stride("A", 1)}}
    stride_ak = {{stride("A", 2)}}

    stride_bq = {{stride("B", 0)}}
    stride_bk = {{stride("B", 1)}}
    stride_bn = {{stride("B", 2)}}

    # based on triton.ops.matmul
    pid = tl.program_id(0)
    grid_m = (M + BLOCK_M - 1) // BLOCK_M
    grid_n = (N + BLOCK_N - 1) // BLOCK_N

    # re-order program ID for better L2 performance
    width = GROUP_M * grid_n
    group_id = pid // width
    group_size = min(grid_m - group_id * GROUP_M, GROUP_M)
    pid_m = group_id * GROUP_M + (pid % group_size)
    pid_n = (pid % width) // (group_size)
    tl.assume(pid_m >= 0)
    tl.assume(pid_n >= 0)

    rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
    rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
    if (stride_am == 1 and stride_ak == M) or (stride_am == K and stride_ak == 1):
        ram = tl.max_contiguous(tl.multiple_of(rm % M, BLOCK_M), BLOCK_M)
    else:
        ram = rm % M
    if (stride_bk == 1 and stride_bn == K) or (stride_bk == N and stride_bn == 1):
        rbn = tl.max_contiguous(tl.multiple_of(rn % N, BLOCK_N), BLOCK_N)
    else:
        rbn = rn % N

    rk = tl.arange(0, BLOCK_K)

    idx_q = tl.program_id(1)  # batch dimension for BMM
    A = A + (ram[:, None] * stride_am + rk[None, :] * stride_ak + idx_q*stride_aq)
    B = B + (rk[:, None] * stride_bk + rbn[None, :] * stride_bn + idx_q*stride_bq)

    acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=ACC_TYPE)
    for k in range(K, 0, -BLOCK_K):
        if EVEN_K:
            a = tl.load(A)
            b = tl.load(B)
        else:
            a = tl.load(A, mask=rk[None, :] < k, other=0.)
            b = tl.load(B, mask=rk[:, None] < k, other=0.)
        acc += tl.dot(a, b, allow_tf32=ALLOW_TF32)
        A += BLOCK_K * stride_ak
        B += BLOCK_K * stride_bk

    # rematerialize rm and rn to save registers
    rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
    rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
    idx_q = tl.program_id(1)  # batch dimension for BMM
    idx_m = rm[:, None]
    idx_n = rn[None, :]
    mask = (idx_m < M) & (idx_n < N)

    # inductor generates a suffix
    {{store_output(("idx_q", "idx_m", "idx_n"), "acc", "mask")}}
T)namegridsource"cache_codegen_enabled_for_templatezat::bmm_outzat::_bmm_out_dtype_cuda	bmm_dtype)r'   op_overloadzat::baddbmm_out)r,   layoutc          
         t        d | |fD              r| j                         d   dk(  s|j                         d   dk(  rWt        j                  | d      } t        j                  |d      }t        j                  t        j
                  | |      d      S d }d fd} ||       r0t        j                  j                  j                  d	   } || |      }  ||      r0t        j                  j                  j                  d   } |||      }t        | |||
      \  }}	}
}} }d}t        | |g      }| j                         d	   }t        d   d| d| d|	 d|
 xx   dz  cc<   t        j                  d|||	|
| j                         |j                         |       t         }i }|r.| j#                         j$                  dk(  sJ d       t&        }d|i}g }t)               r?|j+                  t        j,                  j/                  |||g||j0                  |i             t3        |d      rK||| j                         k(  r6|j+                  t        j,                  j/                  ||t4        g|             t7        |      \  }}t9        | ||      }|rB|r@t;        |||	|
      r2t=        |      r'ddlm } |jC                  |||jE                                tG        || |      r'ddl$m%} |jM                  |||jE                                tO        |||	|
      r%tQ        jR                  |||jE                                tU        |||jE                         |      S )z`
    Lowering for autotuning aten.bmm with different backends (Aten, Triton, CUTLASS, etc.)
    c              3   V   K   | ]!  }|j                         j                  d k(   # yw)cpuN)
get_devicetype).0xs     r#   	<genexpr>ztuned_bmm.<locals>.<genexpr>   s!     
>A1<<>%'
>s   ')r   r   )axisc                     t        j                  |       syt        j                  | d      \  }}t        |t         j                        S )NTF)freeze)r   is_storage_and_layoutas_storage_and_layout
isinstanceFlexibleLayout)t_r.   s      r#   is_valid_to_require_contiguousz1tuned_bmm.<locals>.is_valid_to_require_contiguous   s<    ++A.005AIAvfb&7&788r%   c                     |d   dk(  xr | d   dk(  xs |d   | d   k\  xs |d   dk(  xr | d   dk(  xs |d   | d   k\  S )Nr7   r   r   )sizesstridess     r#    is_preferred_layout_as_bmm_inputz3tuned_bmm.<locals>.is_preferred_layout_as_bmm_input   sf     q QeBi1n&PuRy8PU"+"Sb	Q(R'"+r:RUr%   c                     |j                   d   j                         }|j                   d   j                         } ||      st        j                  j                  |       } | S )Nval)r!   sizestrider   ExternKernelrequire_contiguous)r?   meta_trD   rE   rF   s       r#   may_require_contiguousz)tuned_bmm.<locals>.may_require_contiguous   sT    KK&++-Ekk%(//1G3E7COO66q9Hr%   r   )r.   	out_dtyper&   aten_mm_infoz	aten.bmm_r@   zZTuned aten.bmm: batch=%s, m=%s, n=%s, k=%s, mat1_dtype=%s, mat2_dtype=%s, output_layout=%scudaz$out_dtype is only supported for CUDArO   Fcheck_max_autotune)CUTLASS3xGemmTemplate)CppBmmTemplate)+allget_sizeL	unsqueezesum_mulr   graphcurrent_nodeargsr   r	   r   loginfo	get_dtypeaten_bmmr2   r3   aten_bmm_dtyper   extendchoicesget_mm_configsuidr   bmm_templater   r   r   r   codegen.cuda.gemm_templaterT   add_cutlass_gemm_choicesnodesr   codegen.cpp_bmm_templaterU   add_choicesr   r   add_ck_gemm_choicesr
   )mat1mat2rO   r.   rA   rN   	meta_mat1	meta_mat2r   r    kr'   kernel_inputs
batch_sizeaten_handleraten_extra_kwargsre   r@   
is_nonzerobatch_stride_largest_or_zerorT   rU   rF   s                         @r#   	tuned_bmmrz      s2   
 
>$
>>==?1"dmmoa&8A&=;;tR(D;;tQ'D66!%%d+!44	9	U	 *$/,,11!4I)$	:D)$/,,11!4I)$	:D #*d6Y#Aq!VT4 D #D$<0M #J^yAaS!AaSABaGBHHd				 (0L %%/W1WW/%()4"$GII$$!!#45	
 6e<Y$..*:: 	II$$]F\NDQ	
 'v.MAz#B4v#V $ Aq1%F66V]002	
 FD$/=""!	
 FAq!,**7FM<O<O<QR$T7M4G4G4I6RRr%   )alphabetar.   c                   t        ||| |      \  }}}}}}} t        | ||gt        ||            }	|j                         d   }
t        d   d|
 d| d| d| xx   dz  cc<   t
        j                  d	|
||||j                         |j                         | j                         |	       d
}g }t               r6|j                  t        j                  j                  |	|t        g|             t        |d      r6|j                  t        j                  j                  |	|t        g|             t!        |||	j#                         |      S )z_
    Lowering for autotuning aten.mm with different backends (Aten, Triton, CUTLASS, etc.)
    r-   )r{   r|   )scalarsr   rP   zaten.baddbmm_r@   r   zkTuned aten.baddbmm: batch_size=%s, m=%s, n=%s, k=%s, mat1_dtype=%s, mat2_dtype=%s, inp=%s, output_layout=%sbaddbmmFrR   )r   r	   dictrW   r   r_   r`   ra   r   rd   r   re   rf   aten_baddbmmr   rh   r
   rk   )inpro   rp   r{   r|   r.   r   r    rs   rt   ru   r'   re   s                r#   tuned_baddbmmr      sI    (/tT3v'N$Aq!VT4 #	dD4e$#?M
 #J^}ZL!AaS!EF!KFHHu			
 D"$GII$$]F\NDQ	
 6e<II$$		
 %T7M4G4G4I6RRr%   )N)2loggingtypingr   torchtorch._dynamo.utilsr   7torch._inductor.codegen.rocm.ck_universal_gemm_templater    r   r   rX   rt   r	   select_algorithmr
   r   r   r   utilsr   r   r   r   r   r   virtualizedr   	mm_commonr   r   r   r   	getLogger__name__r_   opsatenr$   rh   r&   rb   	dtype_outrc   r   outr   register_loweringrz   r   r   r%   r#   <module>r      sP       ( R   *    S S !g!yy~~ G G 		CH (,OHT eii7#	II	""	 "	MM$$,,2B2B
 TXXuSD uS uSp T\\",-Ad ,S #,Sr%   