
    d#                         d dl mZ d dlmZmZ erddlmZ  G d d          Z G d de          Z G d	 d
e          Z	dS )    )Queue)TYPE_CHECKINGOptional   )AutoTokenizerc                       e Zd ZdZd Zd ZdS )BaseStreamerzG
    Base class from which `.generate()` streamers should inherit.
    c                     t                      )z;Function that is called by `.generate()` to push new tokensNotImplementedErrorselfvalues     Alib/python3.11/site-packages/transformers/generation/streamers.pyputzBaseStreamer.put       !###    c                     t                      )zHFunction that is called by `.generate()` to signal the end of generationr   r   s    r   endzBaseStreamer.end!   r   r   N)__name__
__module____qualname____doc__r   r    r   r   r	   r	      s<         $ $ $$ $ $ $ $r   r	   c                   H    e Zd ZdZddddefdZd Zd Zdd	ed
efdZ	d Z
dS )TextStreamera  
    Simple text streamer that prints the token(s) to stdout as soon as entire words are formed.

    <Tip warning={true}>

    The API for the streamer classes is still under development and may change in the future.

    </Tip>

    Parameters:
        tokenizer (`AutoTokenizer`):
            The tokenized used to decode the tokens.
        skip_prompt (`bool`, *optional*, defaults to `False`):
            Whether to skip the prompt to `.generate()` or not. Useful e.g. for chatbots.
        decode_kwargs (`dict`, *optional*):
            Additional keyword arguments to pass to the tokenizer's `decode` method.

    Examples:

        ```python
        >>> from transformers import AutoModelForCausalLM, AutoTokenizer, TextStreamer

        >>> tok = AutoTokenizer.from_pretrained("gpt2")
        >>> model = AutoModelForCausalLM.from_pretrained("gpt2")
        >>> inputs = tok(["An increasing sequence: one,"], return_tensors="pt")
        >>> streamer = TextStreamer(tok)

        >>> # Despite returning the usual output, the streamer will also print the generated text to stdout.
        >>> _ = model.generate(**inputs, streamer=streamer, max_new_tokens=20)
        An increasing sequence: one, two, three, four, five, six, seven, eight, nine, ten, eleven,
        ```
    F	tokenizerr   skip_promptc                 Z    || _         || _        || _        g | _        d| _        d| _        d S )Nr   T)r   r   decode_kwargstoken_cache	print_lennext_tokens_are_prompt)r   r   r   r!   s       r   __init__zTextStreamer.__init__H   s8    "&* &*###r   c                 Z   t          |j                  dk    r |j        d         dk    rt          d          t          |j                  dk    r|d         }| j        r| j        r	d| _        dS | j                            |                                            | j        j	        | j        fi | j
        }|                    d          r|| j        d         }g | _        d| _        nt          |          dk    rU|                     t          |d                             r-|| j        d         }| xj        t          |          z  c_        nB|| j        |                    d          dz            }| xj        t          |          z  c_        |                     |           dS )	zm
        Receives tokens, decodes them, and prints them to stdout as soon as they form entire words.
           r   z'TextStreamer only supports batch size 1FN
 )lenshape
ValueErrorr   r$   r"   extendtolistr   decoder!   endswithr#   _is_chinese_charordrfindon_finalized_text)r   r   textprintable_texts       r   r   zTextStreamer.putR   s    u{aEKNQ$6$6FGGG!!!HE 	 ; 	*/D'F 	///$t~$T%5LL9KLL == 	2!$."2"23N!DDNNYY]]t44Sb]]CC]!$."2"23NNNc.111NNN "$.4::c??Q3F"FGNNNc.111NN~.....r   c                     t          | j                  dk    r; | j        j        | j        fi | j        }|| j        d         }g | _        d| _        nd}d| _        |                     |d           dS )z;Flushes any remaining cache and prints a newline to stdout.r   N T)
stream_end)r+   r"   r   r0   r!   r#   r$   r5   )r   r6   r7   s      r   r   zTextStreamer.endt   s     t  1$$(4>()9PPT=OPPD!$."2"23N!DDNNN&*#~$?????r   r6   r:   c                 2    t          |d|sdnd           dS )zNPrints the new text to stdout. If the stream is ending, also prints a newline.Tr9   N)flushr   )printr   r6   r:   s      r   r5   zTextStreamer.on_finalized_text   s&    d$j$BBBdCCCCCCr   c                     |dk    r|dk    sT|dk    r|dk    sH|dk    r|dk    s<|dk    r|dk    s0|d	k    r|d
k    s$|dk    r|dk    s|dk    r|dk    s|dk    r|dk    rdS dS )z6Checks whether CP is the codepoint of a CJK character.i N  i  i 4  iM  i   iߦ i  i? i@ i i  i i   i  i  i TFr   )r   cps     r   r2   zTextStreamer._is_chinese_char   s     6\\bFllfvg"--g"--g"--g"--fvg"--4ur   NF)r   r   r   r   boolr%   r   r   strr5   r2   r   r   r   r   r   &   s         B+ +/ + + + + + /  /  /D@ @ @D Dc Dt D D D D    r   r   c                   ^     e Zd ZdZ	 ddddedee         f fdZdd	ed
efdZ	d Z
d Z xZS )TextIteratorStreamera  
    Streamer that stores print-ready text in a queue, to be used by a downstream application as an iterator. This is
    useful for applications that benefit from acessing the generated text in a non-blocking way (e.g. in an interactive
    Gradio demo).

    <Tip warning={true}>

    The API for the streamer classes is still under development and may change in the future.

    </Tip>

    Parameters:
        tokenizer (`AutoTokenizer`):
            The tokenized used to decode the tokens.
        skip_prompt (`bool`, *optional*, defaults to `False`):
            Whether to skip the prompt to `.generate()` or not. Useful e.g. for chatbots.
        timeout (`float`, *optional*):
            The timeout for the text queue. If `None`, the queue will block indefinitely. Useful to handle exceptions
            in `.generate()`, when it is called in a separate thread.
        decode_kwargs (`dict`, *optional*):
            Additional keyword arguments to pass to the tokenizer's `decode` method.

    Examples:

        ```python
        >>> from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
        >>> from threading import Thread

        >>> tok = AutoTokenizer.from_pretrained("gpt2")
        >>> model = AutoModelForCausalLM.from_pretrained("gpt2")
        >>> inputs = tok(["An increasing sequence: one,"], return_tensors="pt")
        >>> streamer = TextIteratorStreamer(tok)

        >>> # Run the generation in a separate thread, so that we can fetch the generated text in a non-blocking way.
        >>> generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=20)
        >>> thread = Thread(target=model.generate, kwargs=generation_kwargs)
        >>> thread.start()
        >>> generated_text = ""
        >>> for new_text in streamer:
        ...     generated_text += new_text
        >>> generated_text
        'An increasing sequence: one, two, three, four, five, six, seven, eight, nine, ten, eleven,'
        ```
    FNr   r   r   timeoutc                      t                      j        ||fi | t                      | _        d | _        || _        d S N)superr%   r   
text_queuestop_signalrF   )r   r   r   rF   r!   	__class__s        r   r%   zTextIteratorStreamer.__init__   sD     	KAA=AAA''r   r6   r:   c                     | j                             || j                   |r(| j                             | j        | j                   dS dS )z\Put the new text in the queue. If the stream is ending, also put a stop signal in the queue.rF   N)rJ   r   rF   rK   r>   s      r   r5   z&TextIteratorStreamer.on_finalized_text   sZ    D$,777 	HO 0$,GGGGG	H 	Hr   c                     | S rH   r   r   s    r   __iter__zTextIteratorStreamer.__iter__   s    r   c                 x    | j                             | j                  }|| j        k    rt	                      |S )NrN   )rJ   getrF   rK   StopIterationr   s     r   __next__zTextIteratorStreamer.__next__   s9    ##DL#99D$$$//!Lr   )FNrA   )r   r   r   r   rB   r   floatr%   rC   r5   rP   rT   __classcell__)rL   s   @r   rE   rE      s        + +\ ae (7;NVW\o     H Hc Ht H H H H        r   rE   N)
queuer   typingr   r   models.autor   r	   r   rE   r   r   r   <module>rZ      s           * * * * * * * *  ,++++++$ $ $ $ $ $ $ $v v v v v< v v vrD D D D D< D D D D Dr   