
    d5                        d dl Z d dlmZmZmZmZmZ ddlmZm	Z	m
Z
mZ ddlmZmZ  e	            rd dlZ e
            rd dlZ ej        e          Z G d d          Z eed	           G d
 de                      ZdS )    N)AnyDictListOptionalUnion   )add_end_docstringsis_tf_availableis_torch_availablelogging   )PIPELINE_INIT_ARGSPipelinec                   f    e Zd ZdZ	 ddedej        fdZd Zddede	fd	Z
d
 ZdefdZd Zd ZdS )Conversationa\  
    Utility class containing a conversation and its history. This class is meant to be used as an input to the
    [`ConversationalPipeline`]. The conversation contains several utility functions to manage the addition of new user
    inputs and generated model responses. A conversation needs to contain an unprocessed user input before being passed
    to the [`ConversationalPipeline`]. This user input is either created when the class is instantiated, or by calling
    `conversational_pipeline.append_response("input")` after a conversation turn.

    Arguments:
        text (`str`, *optional*):
            The initial user input to start the conversation. If not provided, a user input needs to be provided
            manually using the [`~Conversation.add_user_input`] method before the conversation can begin.
        conversation_id (`uuid.UUID`, *optional*):
            Unique identifier for the conversation. If not provided, a random UUID4 id will be assigned to the
            conversation.
        past_user_inputs (`List[str]`, *optional*):
            Eventual past history of the conversation of the user. You don't need to pass it manually if you use the
            pipeline interactively but if you want to recreate history you need to set both `past_user_inputs` and
            `generated_responses` with equal length lists of strings
        generated_responses (`List[str]`, *optional*):
            Eventual past history of the conversation of the model. You don't need to pass it manually if you use the
            pipeline interactively but if you want to recreate history you need to set both `past_user_inputs` and
            `generated_responses` with equal length lists of strings

    Usage:

    ```python
    conversation = Conversation("Going to the movies tonight - any suggestions?")

    # Steps usually performed by the model when generating a response:
    # 1. Mark the user input as processed (moved to the history)
    conversation.mark_processed()
    # 2. Append a mode response
    conversation.append_response("The Big lebowski.")

    conversation.add_user_input("Is it good?")
    ```Ntextconversation_idc                 x    |st          j                    }|g }|g }|| _         || _        || _        || _        d S N)uuiduuid4past_user_inputsgenerated_responsesnew_user_input)selfr   r   r   r   s        Elib/python3.11/site-packages/transformers/pipelines/conversational.py__init__zConversation.__init__8   sT      	+"jllO#!&"$.	+;.A -1    c                     t          |t                    sdS | j        |j        k    rdS | j        |j        k    o| j        |j        k    o| j        |j        k    S )NFT)
isinstancer   r   r   r   r   )r   others     r   __eq__zConversation.__eq__G   sh    %.. 	59
""45#77 F%)??F(E,EE	
r   F	overwritec                     | j         rY|r/t                              d| j          d| d           || _         dS t                              d| j          d| d           dS || _         dS )a  
        Add a user input to the conversation for the next round. This populates the internal `new_user_input` field.

        Args:
            text (`str`): The user input for the next conversation round.
            overwrite (`bool`, *optional*, defaults to `False`):
                Whether or not existing and unprocessed user input should be overwritten when this function is called.
        z8User input added while unprocessed input was existing: "z" was overwritten with: "z".z" new input ignored: "z>". Set `overwrite` to True to overwrite unprocessed user inputN)r   loggerwarning)r   r   r#   s      r   add_user_inputzConversation.add_user_inputR   s      	' 
'tOb ' '"' ' '   '+###ftOb f f!%f f f    
 #'Dr   c                 `    | j         r| j                            | j                    d| _         dS )z
        Mark the conversation as processed (moves the content of `new_user_input` to `past_user_inputs`) and empties
        the `new_user_input` field.
        N)r   r   append)r   s    r   mark_processedzConversation.mark_processedj   s7    
  	>!(()<==="r   responsec                 :    | j                             |           dS )z
        Append a response to the list of generated responses.

        Args:
            response (`str`): The model generated response.
        N)r   r)   )r   r+   s     r   append_responsezConversation.append_responses   s!     	 ''11111r   c              #      K   t          | j        | j                  D ]\  }}d|fV  d|fV  | j        rd| j        fV  dS dS )z
        Iterates over all blobs of the conversation.

        Returns: Iterator of (is_user, text_chunk) in chronological order of the conversation. `is_user` is a `bool`,
        `text_chunks` is a `str`.
        TFN)zipr   r   r   )r   
user_inputgenerated_responses      r   
iter_textszConversation.iter_texts|   s       /2$2GIa.b.b 	, 	,*J*
""""++++++ 	,+++++++	, 	,r   c                 r    d| j          d}|                                 D ]\  }}|rdnd}|| d| dz  }|S )a  
        Generates a string representation of the conversation.

        Return:
            `str`:

            Example: Conversation id: 7d15686b-dc94-49f2-9c4b-c9eac6a1f114 user >> Going to the movies tonight - any
            suggestions? bot >> The Big Lebowski
        zConversation id: z 
userbotz >> )r   r2   )r   outputis_userr   names        r   __repr__zConversation.__repr__   sc     4TY333!__.. 	- 	-MGT$/66%D,,4,,,,FFr   )NNNN)F)__name__
__module____qualname____doc__strr   UUIDr   r"   boolr'   r*   r-   r2   r9    r   r   r   r      s        # #L os2 22152 2 2 2	
 	
 	
' '3 '4 ' ' ' '0# # #2 2 2 2 2, , ,    r   r   a  
        min_length_for_response (`int`, *optional*, defaults to 32):
            The minimum length (in number of tokens) for a response.
        minimum_tokens (`int`, *optional*, defaults to 10):
            The minimum length of tokens to leave for a response.
    c                        e Zd ZdZ fdZ	 ddZddeeee         f         f fdZ	dd	ed
e
eef         fdZddZddZd	ed
e
fdZ xZS )ConversationalPipelinea  
    Multi-turn conversational pipeline.

    Example:

    ```python
    >>> from transformers import pipeline, Conversation

    >>> chatbot = pipeline(model="microsoft/DialoGPT-medium")
    >>> conversation = Conversation("Going to the movies tonight - any suggestions?")
    >>> conversation = chatbot(conversation)
    >>> conversation.generated_responses[-1]
    'The Big Lebowski'

    >>> conversation.add_user_input("Is it an action movie?")
    >>> conversation = chatbot(conversation)
    >>> conversation.generated_responses[-1]
    "It's a comedy."
    ```

    Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)

    This conversational pipeline can currently be loaded from [`pipeline`] using the following task identifier:
    `"conversational"`.

    The models that this pipeline can use are models that have been fine-tuned on a multi-turn conversational task,
    currently: *'microsoft/DialoGPT-small'*, *'microsoft/DialoGPT-medium'*, *'microsoft/DialoGPT-large'*. See the
    up-to-date list of available models on
    [huggingface.co/models](https://huggingface.co/models?filter=conversational).
    c                      t                      j        |i | | j        j        | j        j        | j        _        d S d S r   )superr   	tokenizerpad_token_id	eos_token	pad_token)r   argskwargs	__class__s      r   r   zConversationalPipeline.__init__   sG    $)&)))>&.'+~'?DN$$$ /.r   Nc                     i }i }i }|||d<   |||d<   d|v r|d         |d<   |||d<   |r|                     |           |||fS )Nmin_length_for_responseminimum_tokens
max_lengthclean_up_tokenization_spaces)update)r   rN   rO   rQ   generate_kwargspreprocess_paramsforward_paramspostprocess_paramss           r   _sanitize_parametersz+ConversationalPipeline._sanitize_parameters   s     ".;R78%/=N+,?**+:<+HN<('3A]=> 	3!!/222 .2DDDr   r   conversationsc                      t                      j        |fd|i|}t          |t                    rt	          |          dk    r|d         S |S )a,  
        Generate responses for the conversation(s) given as inputs.

        Args:
            conversations (a [`Conversation`] or a list of [`Conversation`]):
                Conversations to generate responses for.
            clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
                Whether or not to clean up the potential extra spaces in the text output.
            generate_kwargs:
                Additional keyword arguments to pass along to the generate method of the model (see the generate method
                corresponding to your framework [here](./model#generative-models)).

        Returns:
            [`Conversation`] or a list of [`Conversation`]: Conversation(s) with updated generated responses for those
            containing a new user input.
        num_workersr   r   )rE   __call__r    listlen)r   rX   rZ   rK   outputsrL   s        r   r[   zConversationalPipeline.__call__   sY    * #%''"=TTkTVTTgt$$ 	W):):1:r       conversationreturnc                    t          |t                    st          d          |j        %t          dt	          |j                   d          t          | j        d          r| j                            |          }n| 	                    |          }| j
        dk    rt          j        |g          }n | j
        dk    rt          j        |g          }||dS )Nz6ConversationalPipeline, expects Conversation as inputszConversation with UUID zl does not contain new user input to process. Add user inputs with the conversation's `add_user_input` method_build_conversation_input_idspttf)	input_idsr`   )r    r   
ValueErrorr   typer   hasattrrF   rc   _legacy_parse_and_tokenize	frameworktorch
LongTensorre   constant)r   r`   rN   rf   s       r   
preprocessz!ConversationalPipeline.preprocess   s    ,55 	WUVVV&.R$|/@*A*A R R R   4>#BCC 	FDD\RRII 77EEI>T!!()55II^t##YK00I&EEEr   
   c           	         |                     d| j        j        j                  }|d         j        d         }||z
  |k     r]t
                              d| d| d| d           ||z
  }|d         d d | d f         |d<   d|v r|d         d d | d f         |d<   |                    d	          }||d<    | j        j        di ||}| j        j        j	        rd}	n|}	|d d |	d f         |d
S )NrP   rf   r   zConversation input is to long (z), trimming it to (z - )attention_maskr`   )
output_idsr`   rA   )
getmodelconfigrP   shaper%   r&   popgenerateis_encoder_decoder)
r   model_inputsrO   rS   rP   ntrimr`   rt   start_positions
             r   _forwardzConversationalPipeline._forward  sL   $((tz7H7STT
%+A.&**NNsQssS]ssbpsssttt.D(4[(A!!!dUVV)(LL%<//1=>N1OPQPQPQTXSXSYSYPY1Z-.#''77(2%(TZ(KK<K?KK
:/ 	NNN(NOO);<l[[[r   Tc                     |d         }| j                             |d         d|          }|d         }|                                 |                    |           |S )Nrt   r   T)skip_special_tokensrQ   r`   )rF   decoder*   r-   )r   model_outputsrQ   rt   answerr`   s         r   postprocessz"ConversationalPipeline.postprocess  sm    "<0
&&qM $)E ' 
 

 %^4##%%%$$V,,,r   c                    | j         j        }g }|                                D ]j\  }}|4|                    | j                             |d          |gz              ;|                    | j                             |d                     kt          |          | j         j        k    r|| j         j         d          }|S )NF)add_special_tokens)rF   eos_token_idr2   extendencoder]   model_max_length)r   r`   r   rf   r7   r   s         r   rj   z1ConversationalPipeline._legacy_parse_and_tokenize+  s    ~2	)4466 	X 	XMGT'  !6!6tPU!6!V!VZfYg!ghhhh  !6!6tPU!6!V!VWWWWy>>DN;;;!4>#B"B"D"DEIr   )NNN)r   )r_   )rp   )T)r:   r;   r<   r=   r   rW   r   r   r   r[   r   r>   r   ro   r   r   rj   __classcell__)rL   s   @r   rC   rC      s(        >@ @ @ @ @ _cE E E E, eL$|:L,L&M      4F F| FTXY\^aYaTb F F F F(\ \ \ \&
 
 
 
|         r   rC   )r   typingr   r   r   r   r   utilsr	   r
   r   r   baser   r   
tensorflowre   rl   
get_loggerr:   r%   r   rC   rA   r   r   <module>r      sS    3 3 3 3 3 3 3 3 3 3 3 3 3 3 T T T T T T T T T T T T . . . . . . . . ?  LLL 
	H	%	%E E E E E E E EP  S S S S SX S S S S Sr   