
    pi6&                     >   d dl mZmZmZ d dlZd dlmZ d dlmZ d dl	m
Z
mZmZ d dlmZmZmZ g dZ G d dej$                  j&                  j(                        Z G d	 d
eej*                        Z G d deej,                        Z G d deej.                        Zy)    )ClassVarLiteralUnionN)_FusedModule)	_size_1_t	_size_2_t	_size_3_t)_pair_single_triple)Conv1dConv2dConv3dc                       e Zd ZU eeej                  j                  j                        e	d<   	 	 	 dde
de
dee
df   dee
df   deeee
df   f   d	ee
df   d
edee
df   de
deded   ddfdZd Zedd       Zd Zy)_ConvNd_FLOAT_MODULENin_channelsout_channelskernel_size.stridepaddingdilation
transposedoutput_paddinggroupsbiaspadding_modezerosreflect	replicatecircularreturnc                     ||d}t        j                  j                  j                  j                  | |||||||||	|
|fi | |sJ d       || _        |j                  |      | _        y )N)devicedtypez'qconfig must be provided for QAT module)factory_kwargs)nnmodulesconvr   __init__qconfigweightweight_fake_quant)selfr   r   r   r   r   r   r   r   r   r   r   r,   r%   r&   r'   s                   ^/opt/services/ai/voice_agent/venv/lib/python3.12/site-packages/torch/ao/nn/qat/modules/conv.pyr+   z_ConvNd.__init__   s    " %+U;


((	
 	
 AAAw!(~!N    c                 n    | j                  || j                  | j                        | j                        S N_conv_forwardr.   r-   r   r/   inputs     r0   forwardz_ConvNd.forward6   *    !!%)?)?)LdiiXXr1   c                 R   t        |      | j                  k(  s.J d| j                  z   dz   | j                  j                  z          t        |d      sJ d       |j                  sJ d       t        t        |      t              r|d   }|j                  } | |j                  |j                  |j                  |j                  |j                  |j                  |j                  |j                  du|j                  |
      }|j                   |_        |j                  |_        |S )	zCreate a qat module from a float module

        Args:
           `mod`: a float module, either produced by torch.ao.quantization utilities
           or directly from user
        zqat.z.from_float only works for r,   z,Input float module must have qconfig definedz,Input float module must have a valid qconfigr   N)r   r   r   r   r   r   r,   )typer   __name__hasattrr,   
issubclassr   r   r   r   r   r   r   r   r   r   r-   )clsmoduse_precomputed_fake_quantr,   qat_convs        r0   
from_floatz_ConvNd.from_float9   s    CyC--- 	
ll+, (()	
- sI&V(VV&{{JJJ{d3i.a&C++OOOO::KK\\::%))
 **r1   c                    t        |       }|j                  | j                  | j                  | j                  | j
                  | j                  | j                  | j                  | j                  du| j                  	      }t        j                  j                  | j                  j                               |_        | j                  <t        j                  j                  | j                  j                               |_	        t!        |t"              r^|g}t%        |d      sJ |j'                         }|j)                  |        |j*                  | }|j-                  | j.                         |S |S )zThis works for both single qat conv, and the qat conv - relu modules
        to convert the qat module to a floating point module
        N_FLOAT_RELU_MODULE)r;   _FLOAT_CONV_MODULEr   r   r   r   r   r   r   r   r   torchr(   	Parameterr-   detachr>   r   r=   rE   appendr   traintraining)r/   r?   r*   r)   relufuseds         r0   to_floatz_ConvNd.to_float\   s#    4j%%KKLLMMKKIIT!

 hh((););)=>99 **499+;+;+=>DIc<(fG3 4555))+DNN4 %C%%w/EKK&LKr1   )NNNF)r<   
__module____qualname__r   r;   r(   r)   r*   r   __annotations__inttupler   strboolr   r+   r8   staticmethodrC   rO    r1   r0   r   r      s   D!8!89:: #O#O #O 38_	#O
 c3h#O sE#s(O+,#O S/#O #O c3h#O #O #O IJ#O  
!#OJY    Dr1   r   c                       e Zd ZU dZej
                  Zeeej
                        e	d<   ej
                  Z
eeej
                        e	d<   	 	 	 	 	 	 	 	 	 ddedededed	eeef   d
edededed   ddf fdZed fd	       Z xZS )r   aZ  
    A Conv1d module attached with FakeQuantize modules for weight,
    used for quantization aware training.

    We adopt the same interface as :class:`~torch.nn.Conv1d`

    Similar to :class:`~torch.nn.Conv2d`, with FakeQuantize modules initialized to
    default.

    Attributes:
        weight_fake_quant: fake quant module for weight
    r   rF   Nr   r   r   r   r   r   r   r   r   r   r#   c                     t        |      }t        |      }t        |t              r|n
t        |      }t        |      }t        |   ||||||dt        d      |||	|
||       y NFr   )r   r   r   r   r   r   r   r   r,   r%   r&   )r   
isinstancerV   superr+   r/   r   r   r   r   r   r   r   r   r   r,   r%   r&   kernel_size_stride_padding_	dilation_	__class__s                    r0   r+   zConv1d.__init__   w     {+&/(#67GG<LH%	"1:% 	 	
r1   c                 (    t         |   | ||      S N)rA   r^   rC   r?   r@   rA   rd   s      r0   rC   zConv1d.from_float   "    w!1K " 
 	
r1   	   r   rl   rl   Tr   NNNrP   )r<   rQ   rR   __doc__r(   r   r   r   r;   rS   rF   rT   r   r   rV   rW   r   r+   classmethodrC   __classcell__rd   s   @r0   r   r   |   s     02yyM8DO,846IIbii1= )*MT"
"
 "
 	"

 "
 sI~&"
 "
 "
 "
 IJ"
 
"
H 
 
r1   r   c                       e Zd ZU dZej
                  Zeeej
                        e	d<   ej
                  Z
eeej
                        e	d<   	 	 	 	 	 	 	 	 	 ddedededed	eeef   d
edededed   ddf fdZd Zed fd	       Z xZS )r   a  
    A Conv2d module attached with FakeQuantize modules for weight,
    used for quantization aware training.

    We adopt the same interface as `torch.nn.Conv2d`, please see
    https://pytorch.org/docs/stable/nn.html?highlight=conv2d#torch.nn.Conv2d
    for documentation.

    Similar to `torch.nn.Conv2d`, with FakeQuantize modules initialized to
    default.

    Attributes:
        weight_fake_quant: fake quant module for weight
    r   rF   Nr   r   r   r   r   r   r   r   r   r   r#   c                     t        |      }t        |      }t        |t              r|n
t        |      }t        |      }t        |   ||||||dt        d      |||	|
||       y r\   )r
   r]   rV   r^   r+   r_   s                    r0   r+   zConv2d.__init__   su     [)-(#67E'N(O	 8% 	 	
r1   c                 n    | j                  || j                  | j                        | j                        S r3   r4   r6   s     r0   r8   zConv2d.forward   r9   r1   c                 (    t         |   | ||      S rg   rh   ri   s      r0   rC   zConv2d.from_float   rj   r1   rk   rP   )r<   rQ   rR   rm   r(   r   r   r   r;   rS   rF   rT   r   r   rV   rW   r   r+   r8   rn   rC   ro   rp   s   @r0   r   r           02yyM8DO,846IIbii1= )*MT"
"
 "
 	"

 "
 sI~&"
 "
 "
 "
 IJ"
 
"
HY 
 
r1   r   c                       e Zd ZU dZej
                  Zeeej
                        e	d<   ej
                  Z
eeej
                        e	d<   	 	 	 	 	 	 	 	 	 ddedededed	eeef   d
edededed   ddf fdZd Zed fd	       Z xZS )r   a  
    A Conv3d module attached with FakeQuantize modules for weight,
    used for quantization aware training.

    We adopt the same interface as `torch.nn.Conv3d`, please see
    https://pytorch.org/docs/stable/nn.html?highlight=conv3d#torch.nn.Conv3d
    for documentation.

    Similar to `torch.nn.Conv3d`, with FakeQuantize modules initialized to
    default.

    Attributes:
        weight_fake_quant: fake quant module for weight
    r   rF   Nr   r   r   r   r   r   r   r   r   r   r#   c                     t        |      }t        |      }t        |t              r|n
t        |      }t        |      }t        |   ||||||dt        d      |||	|
||       y r\   )r   r]   rV   r^   r+   r_   s                    r0   r+   zConv3d.__init__  re   r1   c                 n    | j                  || j                  | j                        | j                        S r3   r4   r6   s     r0   r8   zConv3d.forward0  r9   r1   c                 (    t         |   | ||      S rg   rh   ri   s      r0   rC   zConv3d.from_float3  rj   r1   rk   rP   )r<   rQ   rR   rm   r(   r   r   r   r;   rS   rF   rT   r	   r   rV   rW   r   r+   r8   rn   rC   ro   rp   s   @r0   r   r      ru   r1   r   )typingr   r   r   rG   torch.nnr(   torch.ao.nn.intrinsicr   torch.nn.common_typesr   r   r	   torch.nn.modules.utilsr
   r   r   __all__r)   r*   r   r   r   r   rY   r1   r0   <module>r      s|    + +   . A A : : )kbjjoo%% k\9
Wbii 9
x>
Wbii >
B>
Wbii >
r1   