
    c                    	   d Z ddlmZ ddlZddlZddlZddlZddlmZ ddl	m
Z
mZ ddlmZ ddlmZ ddlZddlZddlZddlmZmZ dd	lmZ ddlZdd
lmZmZmZ ddlmZm Z  ddl!m"Z"m#Z# ddlm$Z$ ddl%m&Z&  ej'        e(          Z)	 ddl*m+Z+m,Z,m-Z-m.Z.m/Z/m0Z0 n# e1$ r e"j2        w xY w	 ddl3m4Z4m5Z5m6Z6 n# e1$ r dZ6d Z4d Z5Y nw xY w G d de"j7                  Z8 G d d          Z9 G d d          Z: G d d          Z; G d d          Z< G d de"j7                  Z= G d  d!e"j7                  Z> G d" d# ed#d$                    Z?d% Z@d& ZAe(d'k    riddlBZB ejC        d(ejD        )           e)E                    d*d+F                    ejG                             ejH        I                    ejG        d                   ZJ eKejG                  d,k     r4 eL eM            d-          eN            z              ejO        d.           dd/lPm8Z8  ejQ        d01            eBjR                    ZSeST                    d2d3d45           eST                    d6d78           eST                    d9d:eUd;<           eST                    d=d>eUd?<           eST                    d@dAeVdB<           eST                    dCdDeUddd.gE           eST                    dFdGeUd;<           eST                    dHdIeUdJ<           eST                    dKdLeUd;<           eST                    dMdNeUd;<           eST                    dOdPeUd.dd.gE           eST                    dQdReUddd.gE           eST                    dSdT8           eSW                                ZXeXjY        dk    rd.ZZndZZ e;eXj[                  Z\ e8e\eXj]        eXj^        eXj_        eXj`        eXja        eZeXjb        eXjc        d.eXjd        U          ZeeXjf        r)eXjf        Zgeejh        i                    egeXjj        V           neXj[        Zgeek                    egdWz              eXjj        d.k    r eejh        i                    egdXz   d4V           neejh        i                    egdYz   dZV           eXjl        reel                    eXjl                   e)E                    d[eJ           dS dS )\aO  
Introduction
============

This module implements the word2vec family of algorithms, using highly optimized C routines,
data streaming and Pythonic interfaces.

The word2vec algorithms include skip-gram and CBOW models, using either
hierarchical softmax or negative sampling: `Tomas Mikolov et al: Efficient Estimation of Word Representations
in Vector Space <https://arxiv.org/pdf/1301.3781.pdf>`_, `Tomas Mikolov et al: Distributed Representations of Words
and Phrases and their Compositionality <https://arxiv.org/abs/1310.4546>`_.

Other embeddings
================

There are more ways to train word vectors in Gensim than just Word2Vec.
See also :class:`~gensim.models.doc2vec.Doc2Vec`, :class:`~gensim.models.fasttext.FastText`.

The training algorithms were originally ported from the C package https://code.google.com/p/word2vec/
and extended with additional functionality and
`optimizations <https://rare-technologies.com/parallelizing-word2vec-in-python/>`_ over the years.

For a tutorial on Gensim word2vec, with an interactive web app trained on GoogleNews,
visit https://rare-technologies.com/word2vec-tutorial/.

Usage examples
==============

Initialize a model with e.g.:

.. sourcecode:: pycon

    >>> from gensim.test.utils import common_texts
    >>> from gensim.models import Word2Vec
    >>>
    >>> model = Word2Vec(sentences=common_texts, vector_size=100, window=5, min_count=1, workers=4)
    >>> model.save("word2vec.model")


**The training is streamed, so ``sentences`` can be an iterable**, reading input data
from the disk or network on-the-fly, without loading your entire corpus into RAM.

Note the ``sentences`` iterable must be *restartable* (not just a generator), to allow the algorithm
to stream over your dataset multiple times. For some examples of streamed iterables,
see :class:`~gensim.models.word2vec.BrownCorpus`,
:class:`~gensim.models.word2vec.Text8Corpus` or :class:`~gensim.models.word2vec.LineSentence`.

If you save the model you can continue training it later:

.. sourcecode:: pycon

    >>> model = Word2Vec.load("word2vec.model")
    >>> model.train([["hello", "world"]], total_examples=1, epochs=1)
    (0, 2)

The trained word vectors are stored in a :class:`~gensim.models.keyedvectors.KeyedVectors` instance, as `model.wv`:

.. sourcecode:: pycon

    >>> vector = model.wv['computer']  # get numpy vector of a word
    >>> sims = model.wv.most_similar('computer', topn=10)  # get other similar words

The reason for separating the trained vectors into `KeyedVectors` is that if you don't
need the full model state any more (don't need to continue training), its state can be discarded,
keeping just the vectors and their keys proper.

This results in a much smaller and faster object that can be mmapped for lightning
fast loading and sharing the vectors in RAM between processes:

.. sourcecode:: pycon

    >>> from gensim.models import KeyedVectors
    >>>
    >>> # Store just the words + their trained embeddings.
    >>> word_vectors = model.wv
    >>> word_vectors.save("word2vec.wordvectors")
    >>>
    >>> # Load back with memory-mapping = read-only, shared across processes.
    >>> wv = KeyedVectors.load("word2vec.wordvectors", mmap='r')
    >>>
    >>> vector = wv['computer']  # Get numpy vector of a word

Gensim can also load word vectors in the "word2vec C format", as a
:class:`~gensim.models.keyedvectors.KeyedVectors` instance:

.. sourcecode:: pycon

    >>> from gensim.test.utils import datapath
    >>>
    >>> # Load a word2vec model stored in the C *text* format.
    >>> wv_from_text = KeyedVectors.load_word2vec_format(datapath('word2vec_pre_kv_c'), binary=False)
    >>> # Load a word2vec model stored in the C *binary* format.
    >>> wv_from_bin = KeyedVectors.load_word2vec_format(datapath("euclidean_vectors.bin"), binary=True)

It is impossible to continue training the vectors loaded from the C format because the hidden weights,
vocabulary frequencies and the binary tree are missing. To continue training, you'll need the
full :class:`~gensim.models.word2vec.Word2Vec` object state, as stored by :meth:`~gensim.models.word2vec.Word2Vec.save`,
not just the :class:`~gensim.models.keyedvectors.KeyedVectors`.

You can perform various NLP tasks with a trained model. Some of the operations
are already built-in - see :mod:`gensim.models.keyedvectors`.

If you're finished training a model (i.e. no more updates, only querying),
you can switch to the :class:`~gensim.models.keyedvectors.KeyedVectors` instance:

.. sourcecode:: pycon

    >>> word_vectors = model.wv
    >>> del model

to trim unneeded model state = use much less RAM and allow fast loading and memory sharing (mmap).

Embeddings with multiword ngrams
================================

There is a :mod:`gensim.models.phrases` module which lets you automatically
detect phrases longer than one word, using collocation statistics.
Using phrases, you can learn a word2vec model where "words" are actually multiword expressions,
such as `new_york_times` or `financial_crisis`:

.. sourcecode:: pycon

    >>> from gensim.models import Phrases
    >>>
    >>> # Train a bigram detector.
    >>> bigram_transformer = Phrases(common_texts)
    >>>
    >>> # Apply the trained MWE detector to a corpus, using the result to train a Word2vec model.
    >>> model = Word2Vec(bigram_transformer[common_texts], min_count=1)

Pretrained models
=================

Gensim comes with several already pre-trained models, in the
`Gensim-data repository <https://github.com/RaRe-Technologies/gensim-data>`_:

.. sourcecode:: pycon

    >>> import gensim.downloader
    >>> # Show all available models in gensim-data
    >>> print(list(gensim.downloader.info()['models'].keys()))
    ['fasttext-wiki-news-subwords-300',
     'conceptnet-numberbatch-17-06-300',
     'word2vec-ruscorpora-300',
     'word2vec-google-news-300',
     'glove-wiki-gigaword-50',
     'glove-wiki-gigaword-100',
     'glove-wiki-gigaword-200',
     'glove-wiki-gigaword-300',
     'glove-twitter-25',
     'glove-twitter-50',
     'glove-twitter-100',
     'glove-twitter-200',
     '__testing_word2vec-matrix-synopsis']
    >>>
    >>> # Download the "glove-twitter-25" embeddings
    >>> glove_vectors = gensim.downloader.load('glove-twitter-25')
    >>>
    >>> # Use the downloaded vectors as usual:
    >>> glove_vectors.most_similar('twitter')
    [('facebook', 0.948005199432373),
     ('tweet', 0.9403423070907593),
     ('fb', 0.9342358708381653),
     ('instagram', 0.9104824066162109),
     ('chat', 0.8964964747428894),
     ('hashtag', 0.8885937333106995),
     ('tweets', 0.8878158330917358),
     ('tl', 0.8778461217880249),
     ('link', 0.8778210878372192),
     ('internet', 0.8753897547721863)]

    )divisionN)default_timer)defaultdict
namedtuple)Iterable)GeneratorType)QueueEmpty)float32)keep_vocab_itemcall_on_class_only
deprecated)KeyedVectorspseudorandom_weak_vector)utilsmatutils)Vocab)get_supported_extensions)train_batch_sgtrain_batch_cbowscore_sentence_sgscore_sentence_cbowMAX_WORDS_IN_BATCHFAST_VERSION)train_epoch_sgtrain_epoch_cbowCORPUSFILE_VERSIONc
                      t          d          Nz3Training with corpus_file argument is not supportedRuntimeError
modelcorpus_fileoffset_cython_vocab
_cur_epoch_expected_examples_expected_words_work_neu1compute_losss
             6lib/python3.11/site-packages/gensim/models/word2vec.pyr   r           PQQQ    c
                      t          d          r    r!   r#   s
             r.   r   r      r/   r0   c                       e Zd Zdddddddddddd	d	dd
dedd	ddedddddfdZ	 	 dAdZ	 dBdZd ZdCdZ		 	 dDdZ
dEdZd Zd ZdFdZdGdZ ed          d             Zd Zd Z ed          dGd             Z	 dEd!Zd" Zd# Z	 	 	 dHd&Z	 	 dId'Zd( ZdId)Z	 	 dJd*Z	 dKd+Z	 	 dLd,Zd- Zd. Z d/ Z!dMd0Z"dId1Z#d2 Z$d3 Z%d4 Z& e'd5          dd$dfd6Z(dNd8Z)d9 Z*d: Z+ fd;Z, fd<Z-e.dd= fd>
            Z/ fd?Z0d@ Z1 xZ2S )OWord2VecNd   g?   MbP?      g-C6?r         ?F Tc                    |}t          |          | _        t          |
          | _        || _        d| _        d| _        || _        t          |          | _        t          |          | _	        t          |          | _
        t          |          | _        t          |          | _        t          j                            |	          | _        t          |          | _        t          |          | _        || _        t          |          | _        t          |          | _        d| _        t          |          | _        d| _        d| _        || _        || _        || _        || _        || _        || _        d| _         d| _!        tE          | d          stG          |          | _$        t          j%        dtL                    | j$        _'        || _(        |	| _)        tE          | d          s|| _*        || _+        tX          | _-        ||p| .                    |||dz              | /                    |||           | 0                    ||| j        | j        | j        | j	        | j
        | j        |		  	         n8|tb          2                    d
           |rtb          2                    d           | 3                    dti          |                      dS )uc   Train, use and evaluate neural networks described in https://code.google.com/p/word2vec/.

        Once you're finished training a model (=no more updates, only querying)
        store and use only the :class:`~gensim.models.keyedvectors.KeyedVectors` instance in ``self.wv``
        to reduce memory.

        The full model can be stored/loaded via its :meth:`~gensim.models.word2vec.Word2Vec.save` and
        :meth:`~gensim.models.word2vec.Word2Vec.load` methods.

        The trained word vectors can also be stored/loaded from a format compatible with the
        original word2vec implementation via `self.wv.save_word2vec_format`
        and :meth:`gensim.models.keyedvectors.KeyedVectors.load_word2vec_format`.

        Parameters
        ----------
        sentences : iterable of iterables, optional
            The `sentences` iterable can be simply a list of lists of tokens, but for larger corpora,
            consider an iterable that streams the sentences directly from disk/network.
            See :class:`~gensim.models.word2vec.BrownCorpus`, :class:`~gensim.models.word2vec.Text8Corpus`
            or :class:`~gensim.models.word2vec.LineSentence` in :mod:`~gensim.models.word2vec` module for such examples.
            See also the `tutorial on data streaming in Python
            <https://rare-technologies.com/data-streaming-in-python-generators-iterators-iterables/>`_.
            If you don't supply `sentences`, the model is left uninitialized -- use if you plan to initialize it
            in some other way.
        corpus_file : str, optional
            Path to a corpus file in :class:`~gensim.models.word2vec.LineSentence` format.
            You may use this argument instead of `sentences` to get performance boost. Only one of `sentences` or
            `corpus_file` arguments need to be passed (or none of them, in that case, the model is left uninitialized).
        vector_size : int, optional
            Dimensionality of the word vectors.
        window : int, optional
            Maximum distance between the current and predicted word within a sentence.
        min_count : int, optional
            Ignores all words with total frequency lower than this.
        workers : int, optional
            Use these many worker threads to train the model (=faster training with multicore machines).
        sg : {0, 1}, optional
            Training algorithm: 1 for skip-gram; otherwise CBOW.
        hs : {0, 1}, optional
            If 1, hierarchical softmax will be used for model training.
            If 0, and `negative` is non-zero, negative sampling will be used.
        negative : int, optional
            If > 0, negative sampling will be used, the int for negative specifies how many "noise words"
            should be drawn (usually between 5-20).
            If set to 0, no negative sampling is used.
        ns_exponent : float, optional
            The exponent used to shape the negative sampling distribution. A value of 1.0 samples exactly in proportion
            to the frequencies, 0.0 samples all words equally, while a negative value samples low-frequency words more
            than high-frequency words. The popular default value of 0.75 was chosen by the original Word2Vec paper.
            More recently, in https://arxiv.org/abs/1804.04212, Caselles-Dupré, Lesaint, & Royo-Letelier suggest that
            other values may perform better for recommendation applications.
        cbow_mean : {0, 1}, optional
            If 0, use the sum of the context word vectors. If 1, use the mean, only applies when cbow is used.
        alpha : float, optional
            The initial learning rate.
        min_alpha : float, optional
            Learning rate will linearly drop to `min_alpha` as training progresses.
        seed : int, optional
            Seed for the random number generator. Initial vectors for each word are seeded with a hash of
            the concatenation of word + `str(seed)`. Note that for a fully deterministically-reproducible run,
            you must also limit the model to a single worker thread (`workers=1`), to eliminate ordering jitter
            from OS thread scheduling. (In Python 3, reproducibility between interpreter launches also requires
            use of the `PYTHONHASHSEED` environment variable to control hash randomization).
        max_vocab_size : int, optional
            Limits the RAM during vocabulary building; if there are more unique
            words than this, then prune the infrequent ones. Every 10 million word types need about 1GB of RAM.
            Set to `None` for no limit.
        max_final_vocab : int, optional
            Limits the vocab to a target vocab size by automatically picking a matching min_count. If the specified
            min_count is more than the calculated min_count, the specified min_count will be used.
            Set to `None` if not required.
        sample : float, optional
            The threshold for configuring which higher-frequency words are randomly downsampled,
            useful range is (0, 1e-5).
        hashfxn : function, optional
            Hash function to use to randomly initialize weights, for increased training reproducibility.
        epochs : int, optional
            Number of iterations (epochs) over the corpus. (Formerly: `iter`)
        trim_rule : function, optional
            Vocabulary trimming rule, specifies whether certain words should remain in the vocabulary,
            be trimmed away, or handled using the default (discard if word count < min_count).
            Can be None (min_count will be used, look to :func:`~gensim.utils.keep_vocab_item`),
            or a callable that accepts parameters (word, count, min_count) and returns either
            :attr:`gensim.utils.RULE_DISCARD`, :attr:`gensim.utils.RULE_KEEP` or :attr:`gensim.utils.RULE_DEFAULT`.
            The rule, if given, is only used to prune vocabulary during build_vocab() and is not stored as part of the
            model.

            The input parameters are of the following types:
                * `word` (str) - the word we are examining
                * `count` (int) - the word's frequency count in the corpus
                * `min_count` (int) - the minimum count threshold.
        sorted_vocab : {0, 1}, optional
            If 1, sort the vocabulary by descending frequency before assigning word indexes.
            See :meth:`~gensim.models.keyedvectors.KeyedVectors.sort_by_descending_frequency()`.
        batch_words : int, optional
            Target size (in words) for batches of examples passed to worker threads (and
            thus cython routines).(Larger batches will be passed if individual
            texts are longer than 10000 words, but the standard cython code truncates to that maximum.)
        compute_loss: bool, optional
            If True, computes and stores loss value which can be retrieved using
            :meth:`~gensim.models.word2vec.Word2Vec.get_latest_training_loss`.
        callbacks : iterable of :class:`~gensim.models.callbacks.CallbackAny2Vec`, optional
            Sequence of callbacks to be executed at specific stages during training.
        shrink_windows : bool, optional
            New in 4.1. Experimental.
            If True, the effective window size is uniformly sampled from  [1, `window`]
            for each target word during training, to match the original word2vec algorithm's
            approximate weighting of context words by distance. Otherwise, the effective
            window size is always fixed to `window` words to either side.

        Examples
        --------
        Initialize and train a :class:`~gensim.models.word2vec.Word2Vec` model

        .. sourcecode:: pycon

            >>> from gensim.models import Word2Vec
            >>> sentences = [["cat", "say", "meow"], ["dog", "say", "woof"]]
            >>> model = Word2Vec(sentences, min_count=1)

        Attributes
        ----------
        wv : :class:`~gensim.models.keyedvectors.KeyedVectors`
            This object essentially contains the mapping between words and embeddings. After training, it can be used
            directly to query those embeddings in various ways. See the module level docstring for examples.

        r   Nwvr7   dtypelayer1_sizecorpus_iterabler%   passes)rA   r%   	trim_rule)	rA   r%   total_examplestotal_wordsepochsstart_alpha	end_alphar-   	callbackszThe rule, if given, is only used to prune vocabulary during build_vocab() and is not stored as part of the model. Model initialized without sentences. trim_rule provided, if any, will be ignored.zCallbacks are no longer retained by the model, so must be provided whenever training is triggered, as in initialization with a corpus or calling `train()`. The callbacks provided in this initialization without triggering train will be ignored.created)params)5intvector_sizeworkersrF   train_counttotal_train_timebatch_wordssgfloatalpha	min_alphawindowboolshrink_windowsnprandomRandomStatehsnegativens_exponent	cbow_meanr-   running_training_lossmin_alpha_yet_reachedcorpus_countcorpus_total_wordsmax_final_vocabmax_vocab_size	min_countsamplesorted_vocab	null_word	cum_table	raw_vocabhasattrr   r<   onesREALvectors_lockfhashfxnseedr?   commentr   load_check_corpus_sanitybuild_vocabtrainloggerwarningadd_lifecycle_eventstr)self	sentencesr%   rM   rT   rV   rf   re   rg   rq   rN   rU   rR   r\   r]   r^   r_   rp   rF   ri   rC   rh   rQ   r-   rI   rr   rd   rX   rA   s                                r.   __init__zWord2Vec.__init__   s   L ${++7|| !&b''5\\
y))&kk">22i++D11b''H&Y ..%&"%*5\\""#.,"("tT"" 	0";//DG !# 6 6 6	t]++ 	+*D&	 	#+ 	#%%oS^hnqrhr%ttt_+ajkkkJJ /[Y]Yj 3DKUYU_.t7HT]  _ _ _ _
  DCD D D  #"# # # 	  3t99 =====r0   '  c                 `   |                      ||d           |                     ||||          \  }}	|	| _        || _         | j        d|||d|}
|                     |
d                   |
d<   |                     |           |                     d	|t          |          
           dS )a  Build vocabulary from a sequence of sentences (can be a once-only generator stream).

        Parameters
        ----------
        corpus_iterable : iterable of list of str
            Can be simply a list of lists of tokens, but for larger corpora,
            consider an iterable that streams the sentences directly from disk/network.
            See :class:`~gensim.models.word2vec.BrownCorpus`, :class:`~gensim.models.word2vec.Text8Corpus`
            or :class:`~gensim.models.word2vec.LineSentence` module for such examples.
        corpus_file : str, optional
            Path to a corpus file in :class:`~gensim.models.word2vec.LineSentence` format.
            You may use this argument instead of `sentences` to get performance boost. Only one of `sentences` or
            `corpus_file` arguments need to be passed (not both of them).
        update : bool
            If true, the new words in `sentences` will be added to model's vocab.
        progress_per : int, optional
            Indicates how many words to process before showing/updating the progress.
        keep_raw_vocab : bool, optional
            If False, the raw vocabulary will be deleted after the scaling is done to free up RAM.
        trim_rule : function, optional
            Vocabulary trimming rule, specifies whether certain words should remain in the vocabulary,
            be trimmed away, or handled using the default (discard if word count < min_count).
            Can be None (min_count will be used, look to :func:`~gensim.utils.keep_vocab_item`),
            or a callable that accepts parameters (word, count, min_count) and returns either
            :attr:`gensim.utils.RULE_DISCARD`, :attr:`gensim.utils.RULE_KEEP` or :attr:`gensim.utils.RULE_DEFAULT`.
            The rule, if given, is only used to prune vocabulary during current method call and is not stored as part
            of the model.

            The input parameters are of the following types:
                * `word` (str) - the word we are examining
                * `count` (int) - the word's frequency count in the corpus
                * `min_count` (int) - the minimum count threshold.

        **kwargs : object
            Keyword arguments propagated to `self.prepare_vocab`.

        r7   r@   )rA   r%   progress_perrC   )updatekeep_raw_vocabrC   num_retained_words
vocab_sizememoryr   ru   )r   rC   Nr:   )	rt   
scan_vocabrb   rc   prepare_vocabestimate_memoryprepare_weightsry   rz   )r{   rA   r%   r   r   r   rC   kwargsrE   rb   report_valuess              r.   ru   zWord2Vec.build_vocab  s    R 	!!/{cd!eee$(OO+S_kt %4 %v %v!\("-**w&clwwpvww"&"6"6-PdBe"6"f"fhF+++  vY XXXXXr0   c                    t                               d           |}t                               dt          |          t          |                                                     |pd| _        || _        |                     |||          }|                     |d                   |d<   | 	                    |           d	S )
a  Build vocabulary from a dictionary of word frequencies.

        Parameters
        ----------
        word_freq : dict of (str, int)
            A mapping from a word in the vocabulary to its frequency count.
        keep_raw_vocab : bool, optional
            If False, delete the raw vocabulary after the scaling is done to free up RAM.
        corpus_count : int, optional
            Even if no corpus is provided, this argument can set corpus_count explicitly.
        trim_rule : function, optional
            Vocabulary trimming rule, specifies whether certain words should remain in the vocabulary,
            be trimmed away, or handled using the default (discard if word count < min_count).
            Can be None (min_count will be used, look to :func:`~gensim.utils.keep_vocab_item`),
            or a callable that accepts parameters (word, count, min_count) and returns either
            :attr:`gensim.utils.RULE_DISCARD`, :attr:`gensim.utils.RULE_KEEP` or :attr:`gensim.utils.RULE_DEFAULT`.
            The rule, if given, is only used to prune vocabulary during current method call and is not stored as part
            of the model.

            The input parameters are of the following types:
                * `word` (str) - the word we are examining
                * `count` (int) - the word's frequency count in the corpus
                * `min_count` (int) - the minimum count threshold.

        update : bool, optional
            If true, the new provided words in `word_freq` dict will be added to model's vocab.

        z$Processing provided word frequenciesz:collected %i unique word types, with total frequency of %ir   )r   rC   r   r   r   r   r   N)
rw   infolensumvaluesrb   rk   r   r   r   )r{   	word_freqr   rb   rC   r   rk   r   s           r.   build_vocab_from_freqzWord2Vec.build_vocab_from_freq  s    > 	:;;; 	H	NNC	 0 0 2 233	
 	
 	
 )-A" **.T]fl*mm"&"6"6-PdBe"6"f"fhF+++++r0   c           	      6   d}d}d}t          t                    }d}t          |          D ]\  }}	|sBt          |	t                    r(t
                              dt          |	                     |dz  }||z  dk    r*t
                              d||t          |                     |	D ]}
||
xx         dz  cc<   |t          |	          z  }| j
        r4t          |          | j
        k    rt          j        |||           |dz  }|dz   }|| _        ||fS )Nr   r   r7   zoEach 'sentences' item should be a list of words (usually unicode strings). First item here is instead plain %s.zDPROGRESS: at sentence #%i, processed %i words, keeping %i word typesrC   )r   rL   	enumerate
isinstancerz   rw   rx   typer   r   re   r   prune_vocabrk   )r{   r|   r   rC   sentence_norE   
min_reducevocabchecked_string_typessentencewordrb   s               r.   _scan_vocabzWord2Vec._scan_vocab%  sY   
C   %.y%9%9 	  	 !K' *h,, NN?X  
 %)$\)Q. Zc%jj   ! ! !dq 3x==(K"  s5zzD4G'G  !%yIIIIa
"QL((r0   c                     t                               d           |rt          |          }|                     |||          \  }}t                               dt	          | j                  ||           ||fS )Nz%collecting all words and their countszFcollected %i word types from a corpus of %i raw words and %i sentences)rw   r   LineSentencer   r   rk   )r{   rA   r%   r   rN   rC   rE   rb   s           r.   r   zWord2Vec.scan_vocabE  s~    ;<<< 	8*;77O$($4$4_lT]$^$^!\Tl	
 	
 	

 L((r0   c                     |p j         }|p j        }dx}}| _         j        t	           j                                         fdd          }	d}
 j        t          |	          k     r j        |	 j                          dz   }
t          |
|           _         	                    dd j         d	| d
|
 d j                    |st                              d           dg }}|s&g  j        _        | _         | _        i  j        _         j                                        D ]\  }}t!          || j        |          rb|                    |           ||z  }|sEt           j        j                   j        j        |<    j        j                            |           |dz  }||z  }|s6 j        j        D ])} j                            |d j        |                    *t          |          |z   }t          |          dz  t          |d          z  } 	                    dd j         dt          |           d|dd| d| d           ||z   }|dz  t          |d          z  } 	                    dd j         d| d|dd| d| d           nBt                              d           dx}}g }g } j                                        D ]\  }}t!          || j        |          r j                            |          r|                    |           ||z  }|s	 U|                    |           ||z  }|sEt           j        j                   j        j        |<    j        j                            |           |dz  }||z  }|s j                            dgt+          d          g            j        j        D ]S} j                            |d j                            |d           j                            |d          z              Tt          |          t          |          z   |z   }t          |          dz  t          |d          z  }t          |          dz  t          |d          z  } 	                    ddt          |           d|dd| dt          |           d|dd| d           ||z   }||z   }|s|}n6|d k     r||z  }n*t1          |d!t3          j        d"          z   z  d#z            }d$\  }}|D ]}} j        |         }t3          j        ||z            dz   ||z  z  }|d k     r|dz  }|||z  z  }nd }||z  }|s1 j                            |d%t3          j        |d&z                       ~|sH|sFt                              d't           j                             t9          t0                     _        t                              d(||            	                    dd)| d|d*z  t          |d          z  d+d,| d           |||t1          |          t          |          d-} j        r                                   j        r|s j                                           j!        r "                                  j#        r $                                 |S ).a$  Apply vocabulary settings for `min_count` (discarding less-frequent words)
        and `sample` (controlling the downsampling of more-frequent words).

        Calling with `dry_run=True` will only simulate the provided settings and
        report the size of the retained vocabulary, effective corpus length, and
        estimated memory requirements. Results are both printed via logging and
        returned as a dict.

        Delete the raw vocabulary after the scaling is done to free up RAM,
        unless `keep_raw_vocab` is set.

        r   Nc                     j         |          S N)rk   )r   r{   s    r.   <lambda>z(Word2Vec.prepare_vocab.<locals>.<lambda>m  s    $.Y]J^ r0   T)keyreverser7   r   zmax_final_vocab=z and min_count=z resulted in calc_min_count=z, effective_min_count=msgzCreating a fresh vocabularyr   countr4   zeffective_min_count=z	 retains z unique words (z.2fz% of original z, drops )z leaves z word corpus (z"Updating model with new vocabulary)attrstypeszadded z new unique words (z) and increased the count of z pre-existing words (      ?r8   r5      r   r   
sample_intl    z.deleting the raw counts dictionary of %i itemsz*sample=%g downsamples %i most-common wordszdownsampling leaves estimated       Y@.1fz%% of prior )drop_uniqueretain_totaldownsample_uniquedownsample_totalr   )%rf   rg   effective_min_countrd   sortedrk   keysr   maxry   rw   r   r<   index_to_keykey_to_indexitemsr   appendset_vecattrhas_index_forallocate_vecattrsr   get_vecattrgetrL   rY   sqrtuint32r   ri   add_null_wordrh   sort_by_descending_frequencyr\   create_binary_treer]   make_cum_table)r{   r   r   rC   rf   rg   dry_run
drop_totalr   rh   calc_min_countr   retain_wordsr   voriginal_unique_totalretain_unique_pctoriginal_total
retain_pct	new_totalpre_exist_total	new_wordspre_exist_wordspre_exist_unique_pctnew_unique_pctthreshold_countr   r   wword_probabilityr   s   `                              r.   r   zWord2Vec.prepare_vocabS  s     /	&4;#$$
[ $-   	!$."5"5"7"7=^=^=^=^hlmmmLN#c,&7&77 X!%T=Q0R!SVW!W'*>9'E'ED$$$jt'; j jI j j)7j jOSOgj j %     R	7KK5666)*B,L *')$!*$')$>//11 	$ 	$a"4D,DPYZZZ $ ''--- A%L" :589M5N5N,T2,33D9991$K!OJJ M G0 M MDG''gt~d7KLLLL$'$5$5$C! #L 1 1C 7#>SUV:W:W W$$q4+C q qcR^N_N_ q q/pq qCXq qbmq q q %    *J6N%+c.!.D.DDJ$$\4+C \ \\ \ \"[\ \6D\ \NX\ \ \ %     KK<===*++II O>//11 $ $a"4D,DPYZZZ $w,,T22 
>'..t444'1,& ! !((...!Q	& >9<TW=Q9R9RDG06 G077===1$K!OJJ y))	$q'')KKK G0 y yDG''gtw7J7J4QX7Y7Y\`\j\n\nosuv\w\w7wxxxx$'$8$83y>>$IK$W!#&#7#7##=DY[\@]@]#]  ^^c1C8Mq4Q4QQN$$lS^^ l lb l l,l lKNK_K_l l+?kl lShl l l %    %6L$6L  	A*OOc\ 	A$|3OO "&A

N";a"?@@O.2++ 
	` 
	`Aq!A "O(; < <q @_WXEXY#% &!Q&! $4q$88  #&  A%  `##A|RY?OS\?]5^5^___ 	.~ 	.KKH#dnJ]J]^^^(--DN@&J[\\\  e1A e e$u,s</C/CCde eUae e e 	! 	
 	
 	
 '[l #$4 5 5SQ]M^M^
 

 > 	!     	3V 	3G002227 	&##%%%= 	"!!!r0   c                    |pt          | j                  }|pi }|| j        rdndz  |d<   || j        z  t	          j        t                    j        z  |d<   | j        r,|| j        z  t	          j        t                    j        z  |d<   | j	        r,|| j        z  t	          j        t                    j        z  |d<   t          |                                          |d<   t                              d|| j        |d                    |S )	a7  Estimate required memory for a model using current settings and provided vocabulary size.

        Parameters
        ----------
        vocab_size : int, optional
            Number of unique tokens in the vocabulary
        report : dict of (str, int), optional
            A dictionary from string representations of the model's memory consuming members to their size in bytes.

        Returns
        -------
        dict of (str, int)
            A dictionary from string representations of the model's memory consuming members to their size in bytes.

        i  i  r   vectorssyn1syn1negtotalzBestimated required memory for %i words and %i dimensions: %i bytes)r   r<   r\   rM   rY   r>   rn   itemsizer?   r]   r   r   rw   r   )r{   r   reports      r.   r   zWord2Vec.estimate_memory  s       /3tw<<
2$tw(?C@w&)99BHTNN<SSy7 	U'$*::RXd^^=TTF6N= 	X *T-= =@W WF9fmmoo..wP(&/	
 	
 	
 r0   c                     d}t          | j                  | j        j        |<   | j        j                            |           | j                            |dd           d S )N r   r7   )r   r<   r   r   r   r   )r{   r   s     r.   r   zWord2Vec.add_null_word+  sW    %(\\T"##D)))D'1-----r0   c                 .    t          | j                   dS )a  Create a `binary Huffman tree <https://en.wikipedia.org/wiki/Huffman_coding>`_ using stored vocabulary
        word counts. Frequent words will have shorter binary codes.
        Called internally from :meth:`~gensim.models.word2vec.Word2VecVocab.build_vocab`.

        N)_assign_binary_codesr<   r{   s    r.   r   zWord2Vec.create_binary_tree1  s     	TW%%%%%r0   c                 :   t          | j        j                  }t          j        |t          j                  | _        d}t          |          D ]7}| j                            |d          }||t          | j
                  z  z  }8d}t          |          D ]T}| j                            |d          }||t          | j
                  z  z  }t          ||z  |z            | j        |<   Ut          | j                  dk    r| j        d         |k    sJ dS dS )a  Create a cumulative-distribution table using stored vocabulary word counts for
        drawing random words in the negative-sampling training routines.

        To draw a word index, choose a random integer up to the maximum value in the table (cum_table[-1]),
        then finding that integer's sorted insertion point (as if by `bisect_left` or `ndarray.searchsorted()`).
        That insertion point is the drawn index, coming up in proportion equal to the increment at that slot.

        r=           r   r   r   N)r   r<   r   rY   zerosr   rj   ranger   rS   r^   round)r{   domainr   train_words_pow
word_indexr   
cumulatives          r.   r   zWord2Vec.make_cum_table9  s,    -..
*BI>>>
++ 	> 	>JG''
G<<EueD,<&=&===OO

++ 	V 	VJG''
G<<E%t'7!8!888J).zO/Kf/T)U)UDN:&&t~" 	0>"%////	0 	0//r0   c                 ^    |s|                                   dS |                                  dS )zBBuild tables and model weights based on final vocabulary settings.N)init_weightsupdate_weights)r{   r   s     r.   r   zWord2Vec.prepare_weightsQ  s<      	"!!!!!r0   zBUse gensim.models.keyedvectors.pseudorandom_weak_vector() directlyc                 0    t          ||| j                  S )N)seed_stringrp   )r   rp   )r{   r   rM   s      r.   seeded_vectorzWord2Vec.seeded_vectorY  s    'VZVbccccr0   c                 ~   t                               d           | j                            | j                   | j        r9t          j        t          | j                  | j	        ft                    | _        | j        r;t          j        t          | j                  | j	        ft                    | _        dS dS )z_Reset all projection weights to an initial (untrained) state, but keep the existing vocabulary.zresetting layer weightsrq   r=   N)rw   r   r<   resize_vectorsrq   r\   rY   r   r   r?   rn   r   r]   r   r   s    r.   r   zWord2Vec.init_weights]  s    -...DI...7 	O#dg,,0@!ANNNDI= 	R8S\\43C$DDQQQDLLL	R 	Rr0   c                 X   t                               d           t          | j        j                  st          d          t          | j        j                  }| j                            | j                   t          | j        j                  |z
  }| j        r@t          j
        | j        t          j        || j        ft                    g          | _        | j        rDt          j        || j        ft                    }t          j
        | j        |g          | _        dS dS )zTCopy all the existing weights, and reset the weights for the newly added vocabulary.zupdating layer weightszYou cannot do an online vocabulary-update of a model which has no prior vocabulary. First build the vocabulary of your model with a corpus before doing an online update.r   r=   N)rw   r   r   r<   r   r"   r   rq   r\   rY   vstackr   r   r?   rn   r]   r   )r{   preresize_countgained_vocabpads       r.   r   zWord2Vec.update_weightsg  s   ,---47?## 	h   dgo..DI...47?++o=7 	g	49bhdFV7W_c.d.d.d"effDI= 	:(L$*:;4HHHC9dlC%899DLLL	: 	:r0   zGensim 4.0.0 implemented internal optimizations that make calls to init_sims() unnecessary. init_sims() is now obsoleted and will be completely removed in future versions. See https://github.com/RaRe-Technologies/gensim/wiki/Migrating-from-Gensim-3.x-to-4c                 <    | j                             |           dS )a  
        Precompute L2-normalized vectors. Obsoleted.

        If you need a single unit-normalized vector for some key, call
        :meth:`~gensim.models.keyedvectors.KeyedVectors.get_vector` instead:
        ``word2vec_model.wv.get_vector(key, norm=True)``.

        To refresh norms after you performed some atypical out-of-band vector tampering,
        call `:meth:`~gensim.models.keyedvectors.KeyedVectors.fill_norms()` instead.

        Parameters
        ----------
        replace : bool
            If True, forget the original trained vectors and only keep the normalized ones.
            You lose information if you do this.

        )replaceN)r<   	init_sims)r{   r  s     r.   r  zWord2Vec.init_simsz  s#    . 	'*****r0   c	                     |\  }
}| j         r"t          | |||||||
|| j        
  
        \  }}}n!t          | |||||||
|| j        
  
        \  }}}|||fS r   )rR   r   r-   r   )r{   r%   	thread_idr&   cython_vocabthread_private_mem	cur_epochrD   rE   r   workneu1examplestally	raw_tallys                  r.   _do_train_epochzWord2Vec._do_train_epoch  s     (
d7 		)7k6<T49J* *&HeYY
 *:k6<T49J* *&HeY
 	))r0   c           	          |\  }}d}| j         r|t          | |||| j                  z  }n|t          | ||||| j                  z  }||                     |          fS )a  Train the model on a single batch of sentences.

        Parameters
        ----------
        sentences : iterable of list of str
            Corpus chunk to be used in this training batch.
        alpha : float
            The learning rate used in this batch.
        inits : (np.ndarray, np.ndarray)
            Each worker threads private work memory.

        Returns
        -------
        (int, int)
             2-tuple (effective word count after ignoring unknown words and sentence length trimming, total word count).

        r   )rR   r   r-   r   _raw_word_count)r{   r|   rT   initsr  r  r  s          r.   _do_train_jobzWord2Vec._do_train_job  su    $ 
d7 	]^D)UD$BSTTTEE%dIudD$J[\\\Ed**95555r0   c                     d| j         _        dS )z;Clear any cached values that training may have invalidated.N)r<   normsr   s    r.   _clear_post_trainzWord2Vec._clear_post_train  s    r0   r   r   c                    |p| j         | _         |p| j        | _        || _        |                     |||           |                     |||           |                     dd| j         dt          | j                   d| j	         d| j
         d| j         d	| j         d
| j         d| j         d| j                    || _        d| _        |D ]}|                    |            d}d}t'                      dz
  }d}t)          | j                  D ]x}|D ]}|                    |            | | j        |f||||	|
|d|\  }}}n | j        |f||||d|\  }}}||z  }||z  }||z  }|D ]}|                    |            yt'                      |z
  }|                     ||||           | xj        dz  c_        |                                  |D ]}|                    |            ||fS )a  Update the model's neural weights from a sequence of sentences.

        Notes
        -----
        To support linear learning-rate decay from (initial) `alpha` to `min_alpha`, and accurate
        progress-percentage logging, either `total_examples` (count of sentences) or `total_words` (count of
        raw words in sentences) **MUST** be provided. If `sentences` is the same corpus
        that was provided to :meth:`~gensim.models.word2vec.Word2Vec.build_vocab` earlier,
        you can simply use `total_examples=self.corpus_count`.

        Warnings
        --------
        To avoid common mistakes around the model's ability to do multiple training passes itself, an
        explicit `epochs` argument **MUST** be provided. In the common and recommended case
        where :meth:`~gensim.models.word2vec.Word2Vec.train` is only called once, you can set `epochs=self.epochs`.

        Parameters
        ----------
        corpus_iterable : iterable of list of str
            The ``corpus_iterable`` can be simply a list of lists of tokens, but for larger corpora,
            consider an iterable that streams the sentences directly from disk/network, to limit RAM usage.
            See :class:`~gensim.models.word2vec.BrownCorpus`, :class:`~gensim.models.word2vec.Text8Corpus`
            or :class:`~gensim.models.word2vec.LineSentence` in :mod:`~gensim.models.word2vec` module for such examples.
            See also the `tutorial on data streaming in Python
            <https://rare-technologies.com/data-streaming-in-python-generators-iterators-iterables/>`_.
        corpus_file : str, optional
            Path to a corpus file in :class:`~gensim.models.word2vec.LineSentence` format.
            You may use this argument instead of `sentences` to get performance boost. Only one of `sentences` or
            `corpus_file` arguments need to be passed (not both of them).
        total_examples : int
            Count of sentences.
        total_words : int
            Count of raw words in sentences.
        epochs : int
            Number of iterations (epochs) over the corpus.
        start_alpha : float, optional
            Initial learning rate. If supplied, replaces the starting `alpha` from the constructor,
            for this one call to`train()`.
            Use only if making multiple calls to `train()`, when you want to manage the alpha learning-rate yourself
            (not recommended).
        end_alpha : float, optional
            Final learning rate. Drops linearly from `start_alpha`.
            If supplied, this replaces the final `min_alpha` from the constructor, for this one call to `train()`.
            Use only if making multiple calls to `train()`, when you want to manage the alpha learning-rate yourself
            (not recommended).
        word_count : int, optional
            Count of words already trained. Set this to 0 for the usual
            case of training on all words in sentences.
        queue_factor : int, optional
            Multiplier for size of queue (number of workers * queue_factor).
        report_delay : float, optional
            Seconds to wait before reporting progress.
        compute_loss: bool, optional
            If True, computes and stores loss value which can be retrieved using
            :meth:`~gensim.models.word2vec.Word2Vec.get_latest_training_loss`.
        callbacks : iterable of :class:`~gensim.models.callbacks.CallbackAny2Vec`, optional
            Sequence of callbacks to be executed at specific stages during training.

        Examples
        --------
        .. sourcecode:: pycon

            >>> from gensim.models import Word2Vec
            >>> sentences = [["cat", "say", "meow"], ["dog", "say", "woof"]]
            >>>
            >>> model = Word2Vec(min_count=1)
            >>> model.build_vocab(sentences)  # prepare the model vocabulary
            >>> model.train(sentences, total_examples=model.corpus_count, epochs=model.epochs)  # train word vectors
            (1, 30)

        )rF   rD   rE   r@   rv   ztraining model with z workers on z vocabulary and z features, using sg=z hs=z sample=z
 negative=z window=z shrink_windows=r   r   r   h㈵>N)r
  rD   rE   queue_factorreport_delayrI   )r
  rD   rE   rI   r7   )rT   rU   rF   _check_training_sanityrt   ry   rN   r   r<   r?   rR   r\   rg   r]   rV   rX   r-   r`   on_train_beginr   r   on_epoch_begin_train_epoch_train_epoch_corpusfileon_epoch_end_log_train_endrO   r  on_train_end)r{   rA   r%   rD   rE   rF   rG   rH   
word_countr  r  r-   rI   r   callbacktrained_word_countraw_word_countstart	job_tallyr
  trained_word_count_epochraw_word_count_epochjob_tally_epochtotal_elapseds                           r.   rv   zWord2Vec.train  s&   Z !.DJ
"4dn##6.^i#jjj!!/{ci!jjj  ft| f fTW f f#f f9=f fFJgf fW[Wbf f Mf f37;f fPTPcf f 	! 	
 	
 	
 )%("! 	* 	*H##D))))')	t{++ 	, 	,I% . .''---- 3RcRVRc#S3/8 +,Ua'S3 S3 ,2S3 S3O(*>
 SoRVRnS3+4^al'S3 S3+1S3 S3O(*> "::22N(I% , ,%%d++++, &%/N,>yYYYA   ! 	( 	(H!!$''''!>11r0   c	                     |                                  }
 | j        |||||
|f||d|	\  }}}|                    |||f           |                    d           dS )a  Train the model on a `corpus_file` in LineSentence format.

        This function will be called in parallel by multiple workers (threads or processes) to make
        optimal use of multicore machines.

        Parameters
        ----------
        corpus_file : str
            Path to a corpus file in :class:`~gensim.models.word2vec.LineSentence` format.
        thread_id : int
            Thread index starting from 0 to `number of workers - 1`.
        offset : int
            Offset (in bytes) in the `corpus_file` for particular worker.
        cython_vocab : :class:`~gensim.models.word2vec_inner.CythonVocab`
            Copy of the vocabulary in order to access it without GIL.
        progress_queue : Queue of (int, int, int)
            A queue of progress reports. Each report is represented as a tuple of these 3 elements:
                * Size of data chunk processed, for example number of sentences in the corpus chunk.
                * Effective word count used in training (after ignoring unknown words and trimming the sentence length).
                * Total word count used in training.
        **kwargs : object
            Additional key word parameters for the specific model inheriting from this class.

        )rD   rE   N)_get_thread_working_memr  put)r{   r%   r  r&   r  progress_queuer
  rD   rE   r   r	  r  r  r  s                 r.   _worker_loop_corpusfilez Word2Vec._worker_loop_corpusfileM  s    8 "99;;%9T%9FL:Li&N){&N &NFL&N &N"% 	HeY78884     r0   c                 V   |                                  }d}	 |                                }||                    d           nJ|\  }}|                     |||          \  }}	|                    t	          |          ||	f           |dz  }vt
                              d|           dS )a  Train the model, lifting batches of data from the queue.

        This function will be called in parallel by multiple workers (threads or processes) to make
        optimal use of multicore machines.

        Parameters
        ----------
        job_queue : Queue of (list of objects, float)
            A queue of jobs still to be processed. The worker will take up jobs from this queue.
            Each job is represented by a tuple where the first element is the corpus chunk to be processed and
            the second is the floating-point learning rate.
        progress_queue : Queue of (int, int, int)
            A queue of progress reports. Each report is represented as a tuple of these 3 elements:
                * Size of data chunk processed, for example number of sentences in the corpus chunk.
                * Effective word count used in training (after ignoring unknown words and trimming the sentence length).
                * Total word count used in training.

        r   TNr7   z!worker exiting, processed %i jobs)r/  r   r0  r  r   rw   debug)
r{   	job_queuer1  r	  jobs_processedjobdata_iterablerT   r  r  s
             r.   _worker_loopzWord2Vec._worker_loopr  s    & "99;;
	 --//C ""4(((#& M5#11-HZ[[E9M 2 2E9EFFFaN
	  	8.IIIIIr0   c                    g d}}d\  }}	|                      d|          }
d}t          |          D ]\  }}|                     |g          }||z   | j        k    r|                    |           ||z  }D|dz  }|                    ||
f           |r|	t          |          z  }	d|	z  |z  }n ||                     |          z  }d|z  |z  }|                      ||          }
|g|}}|r|dz  }|                    ||
f           |dk    r%| j        dk    rt          	                    d           t          | j                  D ]}|                    d           t                              d|           dS )	aw  Fill the jobs queue using the data found in the input stream.

        Each job is represented by a tuple where the first element is the corpus chunk to be processed and
        the second is a dictionary of parameters.

        Parameters
        ----------
        data_iterator : iterable of list of objects
            The input dataset. This will be split in chunks and these chunks will be pushed to the queue.
        job_queue : Queue of (list of object, float)
            A queue of jobs still to be processed. The worker will take up jobs from this queue.
            Each job is represented by a tuple where the first element is the corpus chunk to be processed and
            the second is the floating-point learning rate.
        cur_epoch : int, optional
            The current training epoch, needed to compute the training parameters for each job.
            For example in many implementations the learning rate would be dropping with the number of epochs.
        total_examples : int, optional
            Count of objects in the `data_iterator`. In the usual case this would correspond to the number of sentences
            in a corpus. Used to log progress.
        total_words : int, optional
            Count of total objects in `data_iterator`. In the usual case this would correspond to the number of raw
            words in a corpus. Used to log progress.

        r   r   r   r7   r   ztrain() called with an empty iterator (if not intended, be sure to provide a corpus that offers restartable iteration = an iterable).Nzjob loop exiting, total %i jobs)_get_next_alphar   r  rQ   r   r0  r   rO   rw   rx   r   rN   r4  )r{   data_iteratorr5  r
  rD   rE   	job_batch
batch_sizepushed_wordspushed_examples
next_alphajob_nodata_idxdatadata_lengthepoch_progress_s                    r.   _job_producerzWord2Vec._job_producer  s   2 !#A:	(,%o))#y99
'66 	< 	<NHd..v66K K'4+;; <  &&&k)

!y*5666 " F#s9~~5O%(?%:^%KNN !D$8$8$C$CCL%(<%7+%EN!11.)LL
 *.:		 	3aKFMM9j1222Q; 	4+q0 	NN`   t|$$ 	  	 AMM$6?????r0   c                    d\  }}	}
t                      dz
  d}}d}| j        }|dk    r|                                }|!|dz  }t                              d|           =|\  }}}|dz  }||z  }|	|z  }	|
|z  }
t                      |z
  }||k    r"|                     ||||||
||	|	  	         ||z   }|dk    t                      |z
  }|                     ||||
||	||           | xj        |z  c_        |	|
|fS )a^  Get the progress report for a single training epoch.

        Parameters
        ----------
        progress_queue : Queue of (int, int, int)
            A queue of progress reports. Each report is represented as a tuple of these 3 elements:
                * size of data chunk processed, for example number of sentences in the corpus chunk.
                * Effective word count used in training (after ignoring unknown words and trimming the sentence length).
                * Total word count used in training.
        job_queue : Queue of (list of object, float)
            A queue of jobs still to be processed. The worker will take up jobs from this queue.
            Each job is represented by a tuple where the first element is the corpus chunk to be processed and
            the second is the floating-point learning rate.
        cur_epoch : int, optional
            The current training epoch, needed to compute the training parameters for each job.
            For example in many implementations the learning rate would be dropping with the number of epochs.
        total_examples : int, optional
            Count of objects in the `data_iterator`. In the usual case this would correspond to the number of sentences
            in a corpus. Used to log progress.
        total_words : int, optional
            Count of total objects in `data_iterator`. In the usual case this would correspond to the number of raw
            words in a corpus. Used to log progress.
        report_delay : float, optional
            Number of seconds between two consecutive progress report messages in the logger.
        is_corpus_file_mode : bool, optional
            Whether training is file-based (corpus_file argument) or not.

        Returns
        -------
        (int, int, int)
            The epoch report consisting of three elements:
                * size of data chunk processed, for example number of sentences in the corpus chunk.
                * Effective word count used in training (after ignoring unknown words and trimming the sentence length).
                * Total word count used in training.

        )r   r   r   r  r   r   Nr7   z:worker thread finished; awaiting finish of %i more threads)r   rN   r   rw   r4  _log_progress_log_epoch_endrP   )r{   r1  r5  r
  rD   rE   r  is_corpus_file_modeexample_countr&  r'  r(  next_reportr)  unfinished_worker_countr   r  trained_words	raw_wordselapseds                       r.   _log_epoch_progresszWord2Vec._log_epoch_progress  sx   P =D9)>*__w6{	"&,%) 	5#''))F '1,'Y[rsss17.HmYNI X%M-/i'N $oo-G+% 5""~y-"K1CWN N N &4) &) 	5,  //E)}nnk)<	> 	> 	> 	(!>9<<r0   c                     |st          d          ddlm} ddlm}  | j         j        t           |                    t                      t          j
                                      t          j        |          |d<   |d<   |d<    fd	t           j                  D             }	|	D ]}
d
|
_        |
                                                      d|||d
          \  }}}|||fS )a  Train the model for a single epoch.

        Parameters
        ----------
        corpus_file : str
            Path to a corpus file in :class:`~gensim.models.word2vec.LineSentence` format.
        cur_epoch : int, optional
            The current training epoch, needed to compute the training parameters for each job.
            For example in many implementations the learning rate would be dropping with the number of epochs.
        total_examples : int, optional
            Count of objects in the `data_iterator`. In the usual case this would correspond to the number of sentences
            in a corpus, used to log progress.
        total_words : int
            Count of total objects in `data_iterator`. In the usual case this would correspond to the number of raw
            words in a corpus, used to log progress. Must be provided in order to seek in `corpus_file`.
        **kwargs : object
            Additional key word parameters for the specific model inheriting from this class.

        Returns
        -------
        (int, int, int)
            The training report for this epoch consisting of three elements:
                * Size of data chunk processed, for example number of sentences in the corpus chunk.
                * Effective word count used in training (after ignoring unknown words and trimming the sentence length).
                * Total word count used in training.

        z<total_words must be provided alongside corpus_file argument.r   )CythonVocab)FastText)r\   fasttextr
  rD   rE   c           
      h    g | ].}t          j        j        |j        z  |z  f           /S )targetargsr   )	threadingThreadr2  rN   ).0r  r%   corpus_file_sizer  r1  r{   thread_kwargss     r.   
<listcomp>z4Word2Vec._train_epoch_corpusfile.<locals>.<listcomp>R  sg     
 
 
  3,<t|,Ki,WYegu %  
 
 
r0   TN)r1  r5  r
  rD   rE   rL  )
ValueError!gensim.models.word2vec_corpusfilerU  gensim.models.fasttextrV  r<   r\   r   r	   ospathgetsizecopyr   rN   daemonr(  rS  )r{   r%   r
  rD   rE   rI   r   rU  rV  rN   threadr&  r'  r)  r_  r  r1  r`  s   ``            @@@@r.   r   z Word2Vec._train_epoch_corpusfile%  sm   <  	][\\\AAAAAA333333"{47twDRZA[A[\\\7??;77	&))%.k"*8&''2m$
 
 
 
 
 
 
 
 
  %T\22
 
 
  	 	F FMLLNNNN8<8P8P)TY){X\ 9Q 9^ 9^5NI ">9<<r0   c           
          t          | j        z            t          |dz    j        z             fdt           j                  D             }|                    t	          j         j        |f|||d                     |D ]}	d|	_        |	                                  	                    ||||d          \  }
}}|
||fS )	a  Train the model for a single epoch.

        Parameters
        ----------
        data_iterable : iterable of list of object
            The input corpus. This will be split in chunks and these chunks will be pushed to the queue.
        cur_epoch : int, optional
            The current training epoch, needed to compute the training parameters for each job.
            For example in many implementations the learning rate would be dropping with the number of epochs.
        total_examples : int, optional
            Count of objects in the `data_iterator`. In the usual case this would correspond to the number of sentences
            in a corpus, used to log progress.
        total_words : int, optional
            Count of total objects in `data_iterator`. In the usual case this would correspond to the number of raw
            words in a corpus, used to log progress.
        queue_factor : int, optional
            Multiplier for size of queue -> size = number of workers * queue_factor.
        report_delay : float, optional
            Number of seconds between two consecutive progress report messages in the logger.

        Returns
        -------
        (int, int, int)
            The training report for this epoch consisting of three elements:
                * Size of data chunk processed, for example number of sentences in the corpus chunk.
                * Effective word count used in training (after ignoring unknown words and trimming the sentence length).
                * Total word count used in training.

        maxsizer7   c                 J    g | ]}t          j        j        f            S ))rZ  r[  )r\  r]  r9  )r^  rG  r5  r1  r{   s     r.   ra  z)Word2Vec._train_epoch.<locals>.<listcomp>  sK     
 
 
  (13 3 3
 
 
r0   )r
  rD   rE   rY  TF)r
  rD   rE   r  rL  )
r	   rN   r   r   r\  r]  rH  ri  r(  rS  )r{   r8  r
  rD   rE   r  r  rI   rN   rj  r&  r'  r)  r5  r1  s   `            @@r.   r  zWord2Vec._train_epochf  s1   B ,"=>>>	q(8DL'HIII
 
 
 
 
 
 4<((	
 
 
 	y'%+!*n]hiik k k 	l 	l 	l
  	 	F FMLLNNNN8<8P8PI>#,TY 9Q 9
 9
5NI
 ">9<<r0   c                     | j         }| j        }||z   | j        z  }|||z
  |z  z
  }t          ||          }|| _        |S )as  Get the correct learning rate for the next iteration.

        Parameters
        ----------
        epoch_progress : float
            Ratio of finished work in the current epoch.
        cur_epoch : int
            Number of current iteration.

        Returns
        -------
        float
            The learning rate to be used in the next training epoch.

        )rT   rU   rF   r   ra   )r{   rF  r
  rG   rH   progressrA  s          r.   r;  zWord2Vec._get_next_alpha  sT      jN	.$+= K)$;x#GG
J//
%/"r0   c                     t          j        | j        t                    }t          j        | j        t                    }||fS )zComputes the memory used per worker thread.

        Returns
        -------
        (np.ndarray, np.ndarray)
            Each worker threads private work memory.

        r=   )r   zeros_alignedr?   rn   )r{   r  r  s      r.   r/  z Word2Vec._get_thread_working_mem  s>     %d&6dCCC%d&6dCCCTzr0   c                 4    t          d |D                       S )a  Get the number of words in a given job.

        Parameters
        ----------
        job: iterable of list of str
            The corpus chunk processed in a single batch.

        Returns
        -------
        int
            Number of raw words in the corpus chunk.

        c              3   4   K   | ]}t          |          V  d S r   )r   )r^  r   s     r.   	<genexpr>z+Word2Vec._raw_word_count.<locals>.<genexpr>  s(      55X3x==555555r0   )r   )r{   r7  s     r.   r  zWord2Vec._raw_word_count  s!     55555555r0   c                     ||t          d          ||t          d          |1t          j                            |          st          d|z            |'t	          |t
                    st          d|z            |.t	          |t                    r|dk    rt          d| d          |Wt          j                            |          \  }}|                                t                      v rt          d	| d
          dS dS )z0Checks whether the corpus parameters make sense.NzCEither one of corpus_file or corpus_iterable value must be providedzJBoth corpus_file and corpus_iterable must not be provided at the same timezDParameter corpus_file must be a valid path to a file, got %r insteadzKThe corpus_iterable must be an iterable of lists of strings, got %r insteadr7   z3Using a generator as corpus_iterable can't support z$ passes. Try a re-iterable sequence.zcTraining from compressed files is not supported with the `corpus_path` argument. Please decompress z" or use `corpus_iterable` instead.)
	TypeErrorre  rf  isfiler   r   r   splitextlowerr   )r{   rA   r%   rB   rG  
corpus_exts         r.   rt   zWord2Vec._check_corpus_sanity  s    	c? 	cabbb 	j 	jhiii 	r27>>++F+F 	rbeppqqq 	qz/8/T/T 	q]`ooq q q 	t:o}+U+U 	tZ`cdZd 	trfrrrt t t  	G,,[99MAz!!%=%?%?? Y)4Y Y Y  	 	 r0   c                 4   | j         | j        k    rt                              d           | j        j        st          d          t          | j        j                  st          d          ||t          d          ||dk    rt          d          dS )a6  Checks whether the training parameters make sense.

        Parameters
        ----------
        epochs : int
            Number of training epochs. A positive integer.
        total_examples : int, optional
            Number of documents in the corpus. Either `total_examples` or `total_words` **must** be supplied.
        total_words : int, optional
            Number of words in the corpus. Either `total_examples` or `total_words` **must** be supplied.
        **kwargs : object
            Unused. Present to preserve signature among base and inherited implementations.

        Raises
        ------
        RuntimeError
            If one of the required training pre/post processing steps have not been performed.
        ValueError
            If the combination of input parameters is inconsistent.

        z6Effective 'alpha' higher than previous training cyclesz9you must first build vocabulary before training the modelz5you must initialize vectors before training the modelNzYou must specify either total_examples or total_words, for proper learning-rate and progress calculations. If you've just built the vocabulary using the same corpus, using the count cached in the model is sufficient: total_examples=model.corpus_count.r   zRYou must specify an explicit epochs count. The usual value is epochs=model.epochs.)
rT   ra   rw   rx   r<   r   r"   r   r   rb  )r{   rF   rD   rE   r   s        r.   r  zWord2Vec._check_training_sanity  s    , :22 	UNNSTTTw# 	\Z[[[47?## 	XVWWW 	> 	Q    	sVq[ 	sqrrr	s 	sr0   c
           
      N   |rRt                               d|d|z  |z  ||	z  |dnt          j        |          t          j        |                     dS t                               d|d|z  |z  ||	z  |dnt          j        |          t          j        |                     dS )a  Callback used to log progress for long running jobs.

        Parameters
        ----------
        job_queue : Queue of (list of object, float)
            The queue of jobs still to be performed by workers. Each job is represented as a tuple containing
            the batch of data to be processed and the floating-point learning rate.
        progress_queue : Queue of (int, int, int)
            A queue of progress reports. Each report is represented as a tuple of these 3 elements:
                * size of data chunk processed, for example number of sentences in the corpus chunk.
                * Effective word count used in training (after ignoring unknown words and trimming the sentence length).
                * Total word count used in training.
        cur_epoch : int
            The current training iteration through the corpus.
        example_count : int
            Number of examples (could be sentences for example) processed until now.
        total_examples : int
            Number of all examples present in the input corpus.
        raw_word_count : int
            Number of words used in training until now.
        total_words : int
            Number of all words in the input corpus.
        trained_word_count : int
            Number of effective words used in training until now (after ignoring unknown words and trimming
            the sentence length).
        elapsed : int
            Elapsed time since the beginning of training in seconds.

        Notes
        -----
        If you train the model via `corpus_file` argument, there is no job_queue, so reported job_queue size will
        always be equal to -1.

        zPEPOCH %i - PROGRESS: at %.2f%% examples, %.0f words/s, in_qsize %i, out_qsize %ir   Nr   zMEPOCH %i - PROGRESS: at %.2f%% words, %.0f words/s, in_qsize %i, out_qsize %i)rw   r   r   qsize)
r{   r5  r1  r
  rM  rD   r'  rE   r&  rR  s
             r.   rJ  zWord2Vec._log_progress  s    L  	KKb5=0>ACUX_C_CU[-C-CU[Q_E`E`     KK_5>1K?ASV]A]CU[-C-CU[Q_E`E`    r0   c	           	          t                               d||||||z             |rdS |r#||k    rt                               d|||           |r%||k    r!t                               d|||           dS dS dS )a  Callback used to log the end of a training epoch.

        Parameters
        ----------
        cur_epoch : int
            The current training iteration through the corpus.
        example_count : int
            Number of examples (could be sentences for example) processed until now.
        total_examples : int
            Number of all examples present in the input corpus.
        raw_word_count : int
            Number of words used in training until now.
        total_words : int
            Number of all words in the input corpus.
        trained_word_count : int
            Number of effective words used in training until now (after ignoring unknown words and trimming
            the sentence length).
        elapsed : int
            Elapsed time since the beginning of training in seconds.
        is_corpus_file_mode : bool
            Whether training is file-based (corpus_file argument) or not.

        Warnings
        --------
        In case the corpus is changed while the epoch was running.

        zZEPOCH %i: training on %i raw words (%i effective words) took %.1fs, %.0f effective words/sNzGEPOCH %i: supplied example count (%i) did not equal expected count (%i)zHEPOCH %i: supplied raw word count (%i) did not equal expected count (%i))rw   r   rx   )	r{   r
  rM  rD   r'  rE   r&  rR  rL  s	            r.   rK  zWord2Vec._log_epoch_endJ  s    > 	h~'97DVY`D`	
 	
 	
  	F  	n= 	NNY[d~    	;.8 	NNZ\e    	 	 	 	r0   c                 X    |                      dd| d| d|dd||z  dd		           d
S )a  Callback to log the end of training.

        Parameters
        ----------
        raw_word_count : int
            Number of words used in the whole training.
        trained_word_count : int
            Number of effective words used in training (after ignoring unknown words and trimming the sentence length).
        total_elapsed : int
            Total time spent during training in seconds.
        job_tally : int
            Total number of jobs processed during training.

        rv   ztraining on z raw words (z effective words) took r   zs, z.0fz effective words/sr   N)ry   )r{   r'  r&  r-  r)  s        r.   r"  zWord2Vec._log_train_end~  s     	  e> e e7I e e!de e*<}*Lde e e 	! 	 	 	 	 	r0   g    .Ac           
      "    t                               d j        t           j                   j         j         j         j         j	                    j        j
        st          d           j        st          d           fdt                      d}}t          | j        z            t          |dz    j        z            fdt           j                  D             }|D ]}	d	|	_        |	                                 d
}
t#          j        t&                    d}d
}t)          t+          j        t)          |          |                    }	 	 t/          |          \  }}|dz
  |z  k    r.t                               d           |dz  }t3                      t                               d|                               |           n_# t2          $ rR t                               d||z
  dz              t           j                  D ]}                    d           d	}Y nw xY w	 ||dz   k     s|sh                    |          }|
|z  }
|dz  }t                      |z
  }||k    r't                               dd|
z  |
|z             ||z   }||dz   k     f|hn# t:          $ r Y nw xY wdt                      |z
  }d j        _        t                               d|
||
|z             d|
         S )a  Score the log probability for a sequence of sentences.
        This does not change the fitted model in any way (see :meth:`~gensim.models.word2vec.Word2Vec.train` for that).

        Gensim has currently only implemented score for the hierarchical softmax scheme,
        so you need to have run word2vec with `hs=1` and `negative=0` for this to work.

        Note that you should specify `total_sentences`; you'll run into problems if you ask to
        score more than this number of sentences but it is inefficient to set the value too high.

        See the `article by Matt Taddy: "Document Classification by Inversion of Distributed Language Representations"
        <https://arxiv.org/pdf/1504.07295.pdf>`_ and the
        `gensim demo <https://github.com/piskvorky/gensim/blob/develop/docs/notebooks/deepir.ipynb>`_ for examples of
        how to use such scores in document classification.

        Parameters
        ----------
        sentences : iterable of list of str
            The `sentences` iterable can be simply a list of lists of tokens, but for larger corpora,
            consider an iterable that streams the sentences directly from disk/network.
            See :class:`~gensim.models.word2vec.BrownCorpus`, :class:`~gensim.models.word2vec.Text8Corpus`
            or :class:`~gensim.models.word2vec.LineSentence` in :mod:`~gensim.models.word2vec` module for such examples.
        total_sentences : int, optional
            Count of sentences.
        chunksize : int, optional
            Chunksize of jobs
        queue_factor : int, optional
            Multiplier for size of queue (number of workers * queue_factor).
        report_delay : float, optional
            Seconds to wait before reporting progress.

        zoscoring sentences with %i workers on %i vocabulary and %i features, using sg=%s hs=%s sample=%s and negative=%sz7you must first build vocabulary before scoring new datazWe have currently only implemented score for the hierarchical softmax scheme, so you need to have run word2vec with hs=1 and negative=0 for this to work.c                  f   t          j        dt                    } t          j        	j        t                    }	                                 }|dS d}|D ]B\  }}|k    r n6	j        rt          	||           }nt          	|| |          }|
|<   |dz  }C
                    |           u)zZCompute log probability for each sentence, lifting lists of sentences from the jobs queue.r7   r=   TNr   )rY   r   rn   r   rr  r?   r   rR   r   r   r0  )r  r  r7  nssentence_idr   scorer5  r1  r{   sentence_scorestotal_sentencess          r.   worker_loopz#Word2Vec.score.<locals>.worker_loop  s    8AT***D)$*:$GGGD'mmoo E-0  )K"o5 w P 1$$ G G 3D(D$ O O38OK0!GBB""2&&&'r0   r   rl  r7   c                 :    g | ]}t          j                   S ))rZ  )r\  r]  )r^  rG  r  s     r.   ra  z"Word2Vec.score.<locals>.<listcomp>  s'    UUUA9#;777UUUr0   Tr   r=   FzMterminating after %i sentences (set higher total_sentences if you want more).zputting job #%i in the queuez;reached end of input; waiting to finish %i outstanding jobsNz/PROGRESS: at %.2f%% sentences, %.0f sentences/sr   z1scoring %i sentences took %.1fs, %.0f sentences/s)rw   r   rN   r   r<   r?   rR   r\   rg   r]   r   r"   r   r	   r   ri  r(  r   rr  rn   r   r   groupernextrx   StopIterationr4  r0  r   r
   r  )r{   r|   r  	chunksizer  r  r(  rN  rN   rj  sentence_count	push_done	done_jobsjobs_sourcerB  r   rG  r  rR  r5  r1  r  r  s   ` `                @@@@r.   r  zWord2Vec.score  s   @ 	:L#dg,,(8$'47K		
 	
 	
 w# 	ZXYYYw 	^  
	' 	' 	' 	' 	' 	' 	' 	' 	'( +__c{,"=>>>	q(8DL'HIIIUUUUt|ATATUUU 	 	F FMLLNNNN"0MMM		i	.B.BI N NOO!	! $[ 1 1QJ)+o= *NNg'   aKF'//);VDDDe$$$$  ! ! !Y[adm[mpq[qrrrt|,, ( (AMM$'''' 				!
6A:. i '++I66B"b(NNI+oo5G+- =M!N2NW4L   '.&<  6A:. i     A!	F  //E)?G^g%=	
 	
 	
 //s'   'A<G$ $AI ?I A3J8 8
KK
   c                 P     j         st          d          t           j        d          rt           d          st          d           fd|D             }|st                              d           dS t          j         j        j        |         d	          }|r j	        r|t          |          z  }t          j        t          j        | j        j                            t          j                  z  t          j        |d
          } fd|D             S )a  Get the probability distribution of the center word given context words.

        Note this performs a CBOW-style propagation, even in SG models,
        and doesn't quite weight the surrounding words the same as in
        training -- so it's just one crude way of using a trained model
        as a predictor.

        Parameters
        ----------
        context_words_list : list of (str and/or int)
            List of context words, which may be words themselves (str)
            or their index in `self.wv.vectors` (int).
        topn : int, optional
            Return `topn` words and their probabilities.

        Returns
        -------
        list of (str, float)
            `topn` length list of tuples of (word, probability).

        zWe have currently only implemented predict_output_word for the negative sampling scheme, so you need to have run word2vec with negative > 0 for this to work.r   r   z>Parameters required for predicting the output words not found.c                 V    g | ]%}|j         v j                             |          &S r:   )r<   	get_index)r^  r   r{   s     r.   ra  z0Word2Vec.predict_output_word.<locals>.<listcomp>1  s6    ZZZ!QRVRY\Z**1--ZZZr0   zHAll the input context words are out-of-vocabulary for the current model.Nr   )axisT)topnr   c                 D    g | ]}j         j        |         |         fS r:   )r<   r   )r^  index1prob_valuesr{   s     r.   ra  z0Word2Vec.predict_output_word.<locals>.<listcomp>@  s-    ^^^%f-{6/BC^^^r0   )r]   r"   rl   r<   rw   rx   rY   r   r   r_   r   expdotr   Tr   argsort)r{   context_words_listr  word2_indicesl1top_indicesr  s   `     @r.   predict_output_wordzWord2Vec.predict_output_word  sD   , } 	W  
 tw	** 	a'$	2J2J 	a_```ZZZZ7IZZZ 	NNefff4VDGOM2;;; 	%T^ 	%#m$$$B fRVB7788rvk***&{tLLL^^^^^R]^^^^r0   c                    t          | j                  | _        |j        j        | j        _        |j        j        | j        _        |j        j        | j        _        |j        | _        |j        | _        |                                  dS )a=  Borrow shareable pre-built structures from `other_model` and reset hidden layer weights.

        Structures copied are:
            * Vocabulary
            * Index to word mapping
            * Cumulative frequency table (used for negative sampling)
            * Cached corpus length

        Useful when testing multiple models on the same corpus in parallel. However, as the models
        then share all vocabulary-related structures other than vectors, neither should then
        expand their vocabulary (which could leave the other in an inconsistent, broken state).
        And, any changes to any per-word 'vecattr' will affect both models.


        Parameters
        ----------
        other_model : :class:`~gensim.models.word2vec.Word2Vec`
            Another model to copy the internal structures from.

        N)	r   rM   r<   r   r   expandosrj   rb   r   )r{   other_models     r.   
reset_fromzWord2Vec.reset_fromB  sp    * t/00*~:*~:&>2$.'4r0   c                 ~    | j         j        dt          | j        j                  d| j        j        d| j        dS )zHuman readable representation of the model's state.

        Returns
        -------
        str
            Human readable representation of the model's state, including the vocabulary size, vector size
            and learning rate.

        z<vocab=z, vector_size=z, alpha=>)	__class____name__r   r<   r   rM   rT   r   s    r.   __str__zWord2Vec.__str___  sJ     N###S)=%>%>%>%>@S@S@SUYU_U_U_
 	
r0   c                 H     t          t          |           j        |i | dS )a%  Save the model.
        This saved model can be loaded again using :func:`~gensim.models.word2vec.Word2Vec.load`, which supports
        online training and getting vectors for vocabulary words.

        Parameters
        ----------
        fname : str
            Path to the file.

        N)superr3   save)r{   r[  r   r  s      r.   r  zWord2Vec.savem  s-     	#h"D3F33333r0   c           	          t          |                              dg          }t          t          |                               |||||||          S )zFArrange any special handling for the `gensim.utils.SaveLoad` protocol.rj   )setunionr  r3   _save_specials)	r{   fname
separately	sep_limitignorepickle_protocolcompresssubnamer  s	           r.   r  zWord2Vec._save_specialsz  sT     V""K?33Xt$$33:y&/8WV V 	Vr0   )rethrowc                :   	  t          t          |           j        |i |}t          |t                    s1d}t	          dt          |          dt          |                     |S # t          $ r%}|r|t                              d           |d}~ww xY w)a  Load a previously saved :class:`~gensim.models.word2vec.Word2Vec` model.

        See Also
        --------
        :meth:`~gensim.models.word2vec.Word2Vec.save`
            Save model.

        Parameters
        ----------
        fname : str
            Path to the saved file.

        Returns
        -------
        :class:`~gensim.models.word2vec.Word2Vec`
            Loaded model.

        TzModel of type z can't be loaded by zModel load error. Was model saved using code from an older Gensim Version? Try loading older model using gensim-3.8.3, then re-saving, to restore compatibility with current code.N)	r  r3   rs   r   AttributeErrorr   rz   rw   error)clsr  r[  r   r$   aer  s         r.   rs   zWord2Vec.load  s    (	-E(C((-t>v>>EeX.. i$nQUV[Q\Q\Q\Q\^abe^f^f^f%ghhhL 	 	 	 LL34 4 4 H	s   A'A+ +
B5 BBc           	      2    t          t          |           j        |i | t          | d          sd| _        | j        r)t          | j        d          r|                                  t          | d          sd| _        t          | d          sd| _	        t          | j        d          s:t          | j        d          r%t          j        d	t          
          | j        _        t          | d          s*t          j                            | j                  | _        t          | d          sd| _        d| _        t          | d          s| j        | _        | `t          | d          sd| _        t          | d          r+dD ]&}t-          | |t/          | j        |                     '| `t          | d          r@dD ];}t          | j        |          r$t-          | |t/          | j        |                     <| `t          | d          s	d| _        dS dS )zXHandle special requirements of `.load()` protocol, usually up-converting older versions.r^   r9   r   rb   Nrc   ro   r   r7   r=   rZ   r   rO   r   rF   rd   
vocabulary)re   rf   rg   rh   ri   rk   
trainables)rp   r?   rq   r   r   rX   T)r  r3   _load_specialsrl   r^   r]   r<   r   rb   rc   rY   rm   rn   ro   rZ   default_rngrq   rO   rP   iterrF   rd   setattrgetattrr  r  rX   )r{   r[  r   ar  s       r.   r  zWord2Vec._load_specials  s=   ,h,d=f===t]++ 	$#D= 	"WTWn== 	"!!!t^,, 	% $Dt122 	+&*D#tw00 	;WTWi5P5P 	;$&GAT$:$:$:DG!tX&& 	@)//TY/??DKt]++ 	& D$%D!tX&& 	)DK	t.// 	(#'D 4&& 	 h > >a!!<!<====4&& 	 J B B4?A.. BD!WT_a%@%@AAAt-.. 	'"&D	' 	'r0   c                     | j         S )zGet current value of the training loss.

        Returns
        -------
        float
            Current training loss.

        )r`   r   s    r.   get_latest_training_lossz!Word2Vec.get_latest_training_loss  s     ))r0   )NNFr~   FN)FNNF)NNr~   NN)FFNNNF)NN)r   )F)NNNNNNNr   r   r   Fr:   )r   NN)NNr   NNr   N)r   NNr:   )r   NNr   r   r:   )NNr7   )r  )3r  
__module____qualname__hashr   r}   ru   r   r   r   r   r   r   r   r   r   r   r   r   r   r  r  r  r  rv   r2  r9  rH  rS  r   r  r;  r/  r  rt   r  rJ  rK  r"  rL   r  r  r  r  r  r  classmethodrs   r  r  __classcell__)r  s   @r.   r3   r3      sf        d5YZfg1a6Q4XYef8JY^jl$tM> M> M> M>` V[,01Y 1Y 1Y 1Yh ^c/, /, /, /,b) ) )@) ) ) ) AE16w w w wr   >. . .& & &0 0 0 00" " " " ZTUUd d VUdR R R: : :& Z	^ 
+ + + 
+, .2* * * *&6 6 64  
 \`FGLNG2 G2 G2 G2T [\-1#! #! #! #!J J  J  JDE@ E@ E@ E@P TXDHH= H= H= H=V ^`?= ?= ?= ?=D PT8:9= 9= 9= 9=v  0  6 6 6    .&s &s &s &sP3 3 3j2 2 2h  ( 03s3xx3UVef 0 0 0 0B-_ -_ -_ -_^  :
 
 
4 4 4 4 4V V V V V !&             [ D#' #' #' #' #'J	* 	* 	* 	* 	* 	* 	*r0   r3   c                       e Zd Zd Zd ZdS )BrownCorpusc                     || _         dS )zIterate over sentences from the `Brown corpus <https://en.wikipedia.org/wiki/Brown_Corpus>`_
        (part of `NLTK data <https://www.nltk.org/data.html>`_).

        N)dirname)r{   r  s     r.   r}   zBrownCorpus.__init__  s    
 r0   c              #     K   t          j        | j                  D ]}t           j                            | j        |          }t           j                            |          sGt          j        |d          5 }|D ]G}t          j        |          }d |	                                D             }d |D             }|sC|V  H	 d d d            n# 1 swxY w Y   d S )Nrbc                     g | ]=}t          |                    d                     dk    (|                    d           >S )/r   )r   split)r^  ts     r.   ra  z(BrownCorpus.__iter__.<locals>.<listcomp>  s@    !_!_!_1AGGTWLLHYHY]^H^!_!''#,,!_!_!_r0   c                     g | ]B\  }}|d d                                          !|                                d|d d         CS )Nr   r  )isalpharz  )r^  tokentags      r.   ra  z(BrownCorpus.__iter__.<locals>.<listcomp>  sU    rrrJE3`cdfefdf`g`o`o`q`qrs2A2ww?rrrr0   )
re  listdirr  rf  joinrx  r   open
to_unicoder  )r{   r  finline
token_tagswordss         r.   __iter__zBrownCorpus.__iter__  s,     Z-- 	  	 EGLLu55E7>>%(( E4(( 
 C 	  	 D +D11D "`!_

!_!_!_JrrR\rrrE  ! KKKK	 
  
  
  
  
  
  
  
  
  
  
  
  
  
  
 		  	 s   8ACC	C	N)r  r  r  r}   r  r:   r0   r.   r  r    s2                   r0   r  c                       e Zd ZefdZd ZdS )Text8Corpusc                 "    || _         || _        dS )zbIterate over sentences from the "text8" corpus, unzipped from http://mattmahoney.net/dc/text8.zip.N)r  max_sentence_length)r{   r  r  s      r.   r}   zText8Corpus.__init__  s    
#6   r0   c              #     K   g d}}t          j        | j        d          5 }	 ||                    d          z   }||k    rBt          j        |                                          }|                    |           |r|V  n|                    d          }|dk    rJt          j        |d |                                                   ||d                                          fng |f\  }}|                    |           t          |          | j
        k    r8|d | j
                 V  || j
        d          }t          |          | j
        k    83	 d d d            d S # 1 swxY w Y   d S )Nr0   r  Ti        r   )r   r  r  readr  r  extendrfindstripr   r  )r{   r   restr  textr  
last_tokens          r.   r  zText8Corpus.__iter__  s      S$Z
D)) 	CSCchhtnn,4< !,T2288::EOOE*** '&!ZZ--
=G1_]u/[j[0ABBHHJJ#JKK06688: :SUW[R\ t&&&(mmt'?? C"#<D$<#<===='(@(A(ABH (mmt'?? CC 	C 	C 	C 	C 	C 	C 	C 	C 	C 	C 	C 	C 	C 	C 	C 	C 	C 	Cs   D5E$$E(+E(Nr  r  r  r   r}   r  r:   r0   r.   r  r    s?        2D 7 7 7 7
C C C C Cr0   r  c                        e Zd ZedfdZd ZdS )r   Nc                 0    || _         || _        || _        dS )a  Iterate over a file that contains sentences: one line = one sentence.
        Words must be already preprocessed and separated by whitespace.

        Parameters
        ----------
        source : string or a file-like object
            Path to the file on disk, or an already-open file object (must support `seek(0)`).
        limit : int or None
            Clip the file to the first `limit` lines. Do no clipping if `limit is None` (the default).

        Examples
        --------
        .. sourcecode:: pycon

            >>> from gensim.test.utils import datapath
            >>> sentences = LineSentence(datapath('lee_background.cor'))
            >>> for sentence in sentences:
            ...     pass

        N)sourcer  limitr{   r  r  r  s       r.   r}   zLineSentence.__init__  s    * #6 


r0   c              #     K   	 | j                             d           t          j        | j         | j                  D ]n}t          j        |                                          }d}|t          |          k     r1|||| j	        z            V  || j	        z  }|t          |          k     1odS # t          $ r t          j        | j         d          5 }t          j        || j                  D ]n}t          j        |                                          }d}|t          |          k     r1|||| j	        z            V  || j	        z  }|t          |          k     1o	 ddd           Y dS # 1 swxY w Y   Y dS w xY w)z(Iterate through the lines in the source.r   r  N)r  seek	itertoolsislicer  r   r  r  r   r  r  r  )r{   r  ir  s       r.   r  zLineSentence.__iter__%  s     	6 KQ!(djAA 2 2'--3355#d))m 2q!d&>">>????11A #d))m 22 2  	6 	6 	6DK.. 6#%,S$*== 6 6D +D117799DAc$ii- 6"1a$*B&B#BCCCCT55 c$ii- 666 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6	6s7   B(B. .$E:B
E+E:+E/	/E:2E/	3E:9E:r  r:   r0   r.   r   r     s<        3ET    26 6 6 6 6r0   r   c                        e Zd ZedfdZd ZdS )PathLineSentencesNc                     | _         | _        | _        t          j                             j                   rBt                              d           t                              d            j         g _        nt          j        	                     j                   rt          j        
                     j         d           _         t                              d j                    t          j         j                    _         fd j        D              _         j                                         nt          d          t                              dd
                     j                             d	S )
aM  Like :class:`~gensim.models.word2vec.LineSentence`, but process all files in a directory
        in alphabetical order by filename.

        The directory must only contain files that can be read by :class:`gensim.models.word2vec.LineSentence`:
        .bz2, .gz, and text files. Any file not ending with .bz2 or .gz is assumed to be a text file.

        The format of files (either text, or compressed text files) in the path is one sentence = one line,
        with words already preprocessed and separated by whitespace.

        Warnings
        --------
        Does **not recurse** into subdirectories.

        Parameters
        ----------
        source : str
            Path to the directory.
        limit : int or None
            Read only the first `limit` lines from each file. Read all if limit is None (the default).

        z=single file given as source, rather than a directory of filesz=consider using models.word2vec.LineSentence for a single file zreading directory %sc                 $    g | ]}j         |z   S r:   )r  )r^  filenamer{   s     r.   ra  z.PathLineSentences.__init__.<locals>.<listcomp>_  s     XXX8h 6XXXr0   z"input is neither a file nor a pathz$files read into PathLineSentences:%s
N)r  r  r  re  rf  rx  rw   r4  input_filesisdirr  r   r  sortrb  r  s   `   r.   r}   zPathLineSentences.__init__=  s6   , #6 
7>>$+&& 	CLLXYYYLLXYYY $}DW]]4;'' 	C',,t{B77DKKK.<<<!z$+66DXXXXtGWXXXD!!####ABBB:DIIdFV<W<WXXXXXr0   c              #     K   | j         D ]}t                              d|           t          j        |d          5 }t          j        || j                  D ]n}t          j        |          	                                }d}|t          |          k     r1|||| j        z            V  || j        z  }|t          |          k     1o	 ddd           n# 1 swxY w Y   dS )ziterate through the fileszreading file %sr  r   N)r  rw   r   r   r  r  r  r  r  r  r   r  )r{   	file_namer  r  r  s        r.   r  zPathLineSentences.__iter__e  s2     ) 	6 	6IKK)9555It,, 6%,S$*== 6 6D +D117799DAc$ii- 6"1Q)A%A#ABBBBT55 c$ii- 666 6 6 6 6 6 6 6 6 6 6 6 6 6 6	6 	6s   B
CC	C	r  r:   r0   r.   r  r  <  sA        3ET &Y &Y &Y &YP
6 
6 
6 
6 
6r0   r  c                       e Zd ZdZdS )Word2VecVocabDObsolete class retained for now as load-compatibility state capture.Nr  r  r  __doc__r:   r0   r.   r  r  r          NNDr0   r  c                       e Zd ZdZdS )Word2VecTrainablesr  Nr  r:   r0   r.   r  r  w  r  r0   r  c                       e Zd Zd ZdS )Heapitemc                 "    | j         |j         k     S r   )r   )r{   others     r.   __lt__zHeapitem.__lt__}  s    zEK''r0   N)r  r  r  r  r:   r0   r.   r  r  |  s#        ( ( ( ( (r0   r  zcount, index, left, rightc                     t           fdt          t           j                            D                       }t	          j        |           t          t                     dz
            D ]m}t	          j        |          t	          j        |          }}t	          j        |t          |j	        |j	        z   |t                     z   ||                     n|S )Nc              3   d   K   | ]*}t                              |d           |dd          V  +dS )r   N)r  r   )r^  r  r<   s     r.   ru  z_build_heap.<locals>.<genexpr>  s?      ii733QdCCiiiiiir0   r7   )r   indexleftright)
listr   r   r   heapqheapifyheappopheappushr  r   )r<   heapr  min1min2s   `    r.   _build_heapr&    s    iiiiUSVWYWfSgSgMhMhiiiiiD	M$3r77Q; 
 
]4((%-*=*=d(dj!8CGGRV^bccc	
 	
 	
 	
 Kr0   c                    t                               dt          |                      t          |           }|st                               d           dS d}|d         g g fg}|rf|                                \  }}}|d         t          |           k     rT|d         }|                     |d|           |                     |d|           t          t          |          |          }nt          j        t          |          |j
        t          |           z
  gz   t          j                  }|                    |j        t          j        t          |          dgz   t          j                  |f           |                    |j        t          j        t          |          dgz   t          j                  |f           |ft                               d	|           dS )
a  
    Appends a binary code to each vocab term.

    Parameters
    ----------
    wv : KeyedVectors
        A collection of word-vectors.

    Sets the .code and .point attributes of each node.
    Each code is a numpy.array containing 0s and 1s.
    Each point is an integer.

    z)constructing a huffman tree from %i wordsz,built huffman tree with maximum node depth 0Nr   r7   codepointr=   z-built huffman tree with maximum node depth %i)rw   r   r   r&  popr   r   rY   arrayr  r  r   r   r  uint8r  )r<   r#  	max_depthstacknodecodespointsks           r.   r   r     s    KK;SWWEEEr??D  	BCCC I1gr2E
 \#iikkeV7SWW 	\QANN1fe,,,NN1gv...CJJ	22II Xd6lldj3r77.B-CC29UUUFLL$)RXd5kkQC.?rx%P%P%PRXYZZZLL$*bhtE{{aS/@&Q&Q&QSYZ[[[  \ KK?KKKKKr0   __main__z:%(asctime)s : %(threadName)s : %(levelname)s : %(message)s)formatlevelz
running %s r   r  r7   )r3   raise)allz-trainz0Use text data from file TRAIN to train the modelT)helprequiredz-outputz2Use file OUTPUT to save the resulting word vectors)r9  z-windowz6Set max skip length WINDOW between words; default is 5r5   )r9  r   defaultz-sizez(Set size of word vectors; default is 100r4   z-samplezSet threshold for occurrence of words. Those that appear with higher frequency in the training data will be randomly down-sampled; default is 1e-3, useful range is (0, 1e-5)r6   z-hsz1Use Hierarchical Softmax; default is 0 (not used))r9  r   r;  choicesz	-negativezRNumber of negative examples; default is 5, common values are 3 - 10 (0 = not used)z-threadsz Use THREADS threads (default 12)   z-iterz(Run more training iterations (default 5)z
-min_countzKThis will discard words that appear less than MIN_COUNT times; default is 5z-cbowzOUse the continuous bag of words model; default is 1 (use 0 for skip-gram model)z-binaryz=Save the resulting vectors in binary mode; default is 0 (off)z	-accuracyz6Use questions from file ACCURACY to evaluate the model)
rM   rf   rN   rV   rg   rR   r\   r]   r_   rF   )binaryz.modelz
.model.binz
.model.txtFzfinished running %s)mr  
__future__r   loggingsysre  r  timeitr   collectionsr   r   collections.abcr   r   r   r\  r  rh  queuer	   r
   numpyr   rn   rY   gensim.utilsr   r   r   gensim.models.keyedvectorsr   r   gensimr   r   r   smart_open.compressionr   	getLoggerr  rw   gensim.models.word2vec_innerr   r   r   r   r   r   ImportError	NO_CYTHONrc  r   r   r   SaveLoadr3   r  r  r   r  r  r  r  r&  r   argparsebasicConfigINFOr   r  argvrf  basenameprogramr   printglobalslocalsexitgensim.models.word2vecseterrArgumentParserparseradd_argumentrL   rS   
parse_argsr[  cbowskipgramrv   corpussizerf   threadsrV   rg   r\   r]   r  r$   outputoutfiler<   save_word2vec_formatr>  r  accuracyr:   r0   r.   <module>ri     sI  k kZ         



 				              / / / / / / / / $ $ $ $ $ $                        ! ! ! ! ! !     H H H H H H H H H H M M M M M M M M " " " " " " " " - , , , , , ; ; ; ; ; ;		8	$	$
                    
/Rfffffffffff R R RR R RR R R R RR"a* a* a* a* a*u~ a* a* a*H7               4C C C C C C C C8.6 .6 .6 .6 .6 .6 .6 .6b36 36 36 36 36 36 36 36l	 	 	 	 	EN 	 	 	
	 	 	 	 	 	 	 	
( ( ( ( (zz*&ABB ( ( (
  *L *L *L^ z S0OOOGKl    KKchhsx00111 gsx{++G
s38}}q ggii	"VVXX-...//////BI'$X$&&F
'Ydhiii
	(\]]]
	(`gjtuvvv
&PWZdghhh
; D     G!aV     n!     
)KRU_abbb
&PWZdefff
h!     g!aV     W!aV     *bcccDyA~ \$*%%FHDI{4;8!DI  E { '+%%gdk%BBBB*

7X%&&&{a L%%g&<T%JJJJ%%g&<U%KKK} &t}%%%
KK%w/////gS0 S0s   B B&*
B5 5CC