
\c           @   s  d  Z  d d l Z d d l Z d d l m Z m Z d d l m Z m	 Z	 d d l
 m Z m Z d d l
 m Z m Z d d	 l
 m Z d d
 l
 m Z m Z d d l m Z m Z m Z d d l m Z d d l m Z d d l m Z d d l m Z d d l m Z d d l  m! Z! m" Z" d d l# m$ Z$ m% Z% d d l m& Z& d d l m' Z' d d l# m( Z( d d l# m) Z) d d l# m* Z* d d l# m+ Z+ d d l# m, Z, d d l# m- Z- d d l# m. Z. d d l# m/ Z/ d d l0 m1 Z1 i d d 6d d 6d  d! 6d" d# 6d$ d% 6d& d' 6Z2 i d( d) 6d d* 6d d+ 6d  d, 6Z3 d- Z4 d. e5 f d/     YZ6 d0 e j7 e e e  f d1     YZ8 d2   Z9 e: d3  Z; d4 e j7 e e8 e  f d5     YZ< d6 e< f d7     YZ= d8 e8 e f d9     YZ> d: e> f d;     YZ? d S(<   sF   Classification and regression using Stochastic Gradient Descent (SGD).iN(   t   ABCMetat   abstractmethodi   (   t   Parallelt   delayed(   t   clonet   is_classifieri   (   t   LinearClassifierMixint   SparseCoefMixin(   t   make_dataset(   t   BaseEstimatort   RegressorMixin(   t   check_arrayt   check_random_statet	   check_X_y(   t   safe_sparse_dot(   t   _check_partial_fit_first_call(   t   check_is_fitted(   t   ConvergenceWarning(   t   six(   t   StratifiedShuffleSplitt   ShuffleSplit(   t	   plain_sgdt   average_sgd(   t   compute_class_weight(   t
   deprecated(   t   Hinge(   t   SquaredHinge(   t   Log(   t   ModifiedHuber(   t   SquaredLoss(   t   Huber(   t   EpsilonInsensitive(   t   SquaredEpsilonInsensitive(   t   _joblib_parallel_argst   constantt   optimali   t
   invscalingi   t   adaptivei   t   pa1i   t   pa2i    t   nonet   l2t   l1t
   elasticnetg?t   _ValidationScoreCallbackc           B   s#   e  Z d  Z d d  Z d   Z RS(   s5   Callback for early stopping based on validation scorec         C   sU   t  |  |  _ d |  j _ | d  k	 r6 | |  j _ n  | |  _ | |  _ | |  _ d  S(   Ni   (   R   t	   estimatort   t_t   Nonet   classes_t   X_valt   y_valt   sample_weight_val(   t   selfR-   R1   R2   R3   t   classes(    (    sG   lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.pyt   __init__3   s    		c         C   sL   |  j  } | j d d  | _ t j |  | _ | j |  j |  j |  j	  S(   Ni   i(
   R-   t   reshapet   coef_t   npt
   atleast_1dt
   intercept_t   scoreR1   R2   R3   (   R4   t   coeft	   interceptt   est(    (    sG   lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.pyt   __call__=   s    	N(   t   __name__t
   __module__t   __doc__R/   R6   R@   (    (    (    sG   lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.pyR,   0   s   	t   BaseSGDc           B   s   e  Z d  Z d d d d e d d e d d d d d d	 e d d
 e e d d  Z d   Z e d    Z	 e e d  Z
 d   Z d   Z d   Z d   Z d d d  Z d   Z d d  Z RS(   s1   Base class for SGD classification and regression.R)   g-C6?g      ?g333333?i    g?R#   g        g      ?i   c         C   s   | |  _  | |  _ | |  _ | |  _ | |  _ | |  _ | |  _ | |  _ |	 |  _ | |  _	 |
 |  _
 | |  _ | |  _ | |  _ | |  _ | |  _ | |  _ | |  _ | |  _ | |  _ | |  _ |  j d t  d  S(   Nt   set_max_iter(   t   losst   penaltyt   learning_ratet   epsilont   alphat   Ct   l1_ratiot   fit_interceptt   shufflet   random_statet   verboset   eta0t   power_tt   early_stoppingt   validation_fractiont   n_iter_no_changet
   warm_startt   averaget   n_itert   max_itert   tolt   _validate_paramst   False(   R4   RF   RG   RJ   RK   RL   RM   RY   RZ   RN   RP   RI   RO   RH   RQ   RR   RS   RT   RU   RV   RW   RX   (    (    sG   lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.pyR6   G   s,    																					c         O   s-   t  t |   j | |   |  j d t  |  S(   NRE   (   t   superRD   t
   set_paramsR[   R\   (   R4   t   argst   kwargs(    (    sG   lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.pyR^   g   s    c         C   s   d S(   s
   Fit model.N(    (   R4   t   Xt   y(    (    sG   lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.pyt   fitl   s    c         C   s  t  |  j t  s! t d   n  t  |  j t  sB t d   n  |  j r` | r` t d   n  |  j d k	 r |  j d k r t d |  j   n  d |  j k o d k n s t d   n  |  j d k  r t d	   n  |  j	 d
 k  r t d   n  d |  j
 k  od k  n s,t d   n  |  j d k r\|  j d k r\t d   q\n  |  j d k r|  j d k rt d   n  |  j |  j  |  j |  j  |  j |  j k rt d |  j   n  | sd S|  j |  _ |  j d k	 rt j d t  |  j } d |  _ n |  j d k rh|  j d k rh| s_t j d t |   j t  n  d } nM |  j d k rt j d t |   j t  n  |  j d k	 r|  j n d } | |  _ d S(   s   Validate input params. s$   shuffle must be either True or Falses+   early_stopping must be either True or Falses/   early_stopping should be False with partial_fiti    s   max_iter must be > zero. Got %fg        g      ?s   l1_ratio must be in [0, 1]s   alpha must be >= 0i   s   n_iter_no_change must be >= 1s%   validation_fraction must be in ]0, 1[R"   R$   R%   s   eta0 must be > 0R#   sg   alpha must be > 0 since learning_rate is 'optimal'. alpha is used to compute the optimal learning rate.s   The loss %s is not supported. Nsa   n_iter parameter is deprecated in 0.19 and will be removed in 0.21. Use max_iter and tol instead.s   max_iter and tol parameters have been added in %s in 0.19. If both are left unset, they default to max_iter=5 and tol=None. If tol is not None, max_iter defaults to max_iter=1000. From 0.21, default max_iter will be 1000, and default tol will be 1e-3.i   s  max_iter and tol parameters have been added in %s in 0.19. If max_iter is set but tol is left unset, the default value for tol in 0.19 and 0.20 will be None (which is equivalent to -infinity, so it has no effect) but will change in 0.21 to 1e-3. Specify tol to silence this warning.i  (   R"   R$   R%   (   t
   isinstanceRN   t   boolt
   ValueErrorRS   RY   R/   RL   RJ   RU   RT   RH   RQ   t   _get_penalty_typeRG   t   _get_learning_rate_typeRF   t   loss_functionsRZ   t   _tolRX   t   warningst   warnt   DeprecationWarningt   typeRA   t   FutureWarningt	   _max_iter(   R4   RE   t   for_partial_fitRY   (    (    sG   lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.pyR[   p   s^    		
	
c         C   ss   yK |  j  | } | d | d } } | d k r@ |  j f } n  | |   SWn! t k
 rn t d |   n Xd S(	   s7   Get concrete ``LossFunction`` object for str ``loss``. i    i   t   hubert   epsilon_insensitivet   squared_epsilon_insensitives   The loss %s is not supported. N(   Rr   Rs   Rt   (   Ri   RI   t   KeyErrorRf   (   R4   RF   t   loss_t
   loss_classR_   (    (    sG   lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.pyt   _get_loss_function   s    	c         C   s4   y t  | SWn! t k
 r/ t d |   n Xd  S(   Ns#   learning rate %s is not supported. (   t   LEARNING_RATE_TYPESRu   Rf   (   R4   RH   (    (    sG   lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.pyRh      s
    c         C   sF   t  |  j   } y t | SWn! t k
 rA t d |   n Xd  S(   Ns   Penalty %s is not supported. (   t   strt   lowert   PENALTY_TYPESRu   Rf   (   R4   RG   (    (    sG   lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.pyRg      s
    c         C   sq   | d k r- t j | d t j d d } n t j | d t j d d } | j d | k rm t d   n  | S(   s   Set the sample weight array.t   dtypet   orderRK   i    s+   Shapes of X and sample_weight do not match.N(   R/   R9   t   onest   float64t   asarrayt   shapeRf   (   R4   t   sample_weightt	   n_samples(    (    sG   lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.pyt   _validate_sample_weight   s    !	c         C   s}  | d k r | d
 k	 r] t j | d d } | j | | f k rQ t d   n  | |  _ n' t j | | f d t j d d |  _ | d
 k	 r t j | d d } | j | f k r t d   n  | |  _ qt j | d t j d d |  _ n| d
 k	 rYt j | d t j d d } | j	   } | j | f k rMt d   n  | |  _ n! t j | d t j d d |  _ | d
 k	 rt j | d t j } | j d k r| j d k rt d   n  | j
 d  |  _ n! t j d d t j d d |  _ |  j d	 k ry|  j |  _ |  j |  _ t j |  j j d t j d d |  _ t j |  j j d t j d d |  _ n  d
 S(   s4   Allocate mem for parameters; initialize if provided.i   R~   RK   s+   Provided ``coef_`` does not match dataset. R}   s/   Provided intercept_init does not match dataset.s*   Provided coef_init does not match dataset.i   i    N(   i   (    (   R/   R9   R   R   Rf   R8   t   zerosR   R;   t   ravelR7   RW   t   standard_coef_t   standard_intercept_t   average_coef_t   average_intercept_(   R4   t	   n_classest
   n_featurest	   coef_initt   intercept_init(    (    sG   lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.pyt   _allocate_parameter_mem   sP    		!		c         C   s  | j  d } t j | d t j } |  j s2 | St |   rG t } n t } | d |  j d |  j	  } t
 | j t j d | j  d d f  |   \ } } | j  d d k s | j  d d k r t d | |  j | j  d | j  d f   n  d | | <| S(   sG  Split the dataset between training set and validation set.

        Parameters
        ----------
        y : array, shape (n_samples, )
            Target values.

        Returns
        -------
        validation_mask : array, shape (n_samples, )
            Equal to 1 on the validation set, 0 on the training set.
        i    R}   t	   test_sizeRO   R   i   s   Splitting %d samples into a train set and a validation set with validation_fraction=%r led to an empty set (%d and %d samples). Please either change validation_fraction, increase number of samples, or disable early_stopping.(   R   R9   R   t   uint8RS   R   R   R   RT   RO   t   nextt   splitRf   (   R4   Rb   R   t   validation_maskt   splitter_typet   cvt	   idx_traint   idx_val(    (    sG   lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.pyt   _make_validation_split  s"    		7&
c         C   s2   |  j  s d  St |  | | | | | | d | S(   NR5   (   RS   R/   R,   (   R4   R   Ra   Rb   R   R5   (    (    sG   lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.pyt   _make_validation_score_cbE  s
    	N(   RA   RB   RC   t   TrueR/   R\   R6   R^   R   Rc   R[   Rx   Rh   Rg   R   R   R   R   (    (    (    sG   lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.pyRD   D   s&   				K				<	'c         C   s)  t  j | j d t  j d d } d | | |  j | k <d } d } t |  j  d k r |  j s |  j j	   } |  j
 d } q|  j j	   } |  j d } |  j j	   } |  j d } nZ |  j s |  j | } |  j
 | } n4 |  j | } |  j | } |  j | } |  j | } | | | | | f S(   se   Initialization for fit_binary.

    Returns y, coef, intercept, average_coef, average_intercept.
    R}   R~   RK   g      i    i   N(   R9   R   R   R   R0   R/   t   lenRW   R8   R   R;   R   R   R   R   (   R?   Rb   t   it   y_it   average_interceptt   average_coefR=   R>   (    (    sG   lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.pyt   _prepare_fit_binaryO  s(    !		c         C   s  t  |  | |  \ } } } } } | j d | j d k oM |
 j d k n sX t  t | | |
  \ } } |  j |  j  } |  j |  } | d k r |  j |  } n  t	 j
 d d g d | j } |  j | | | |
 d | } t |  j  } | j d t	 j t	 j  j  } |  j d k	 r3|  j n t	 j } |  j st | | |  j | | | |  j | | |  j | t |  j  | | t |  j  t |  j  t |  j  | | |	 | |  j |  j  |  j! |  } n t" | | | | |  j | | | |  j | | |  j | t |  j  | | t |  j  t |  j  t |  j  | | |	 | |  j |  j  |  j! | |  j  \ } } } } } t# |  j$  d k r| |  j% d <n | |  j% | <| | | f } | S(   s\  Fit a single binary classifier.

    The i'th class is considered the "positive" class.

    Parameters
    ----------
    est : Estimator object
        The estimator to fit

    i : int
        Index of the positive class

    X : numpy array or sparse matrix of shape [n_samples,n_features]
        Training data

    y : numpy array of shape [n_samples, ]
        Target values

    alpha : float
        The regularization parameter

    C : float
        Maximum step size for passive aggressive

    learning_rate : string
        The learning rate. Accepted values are 'constant', 'optimal',
        'invscaling', 'pa1' and 'pa2'.

    max_iter : int
        The maximum number of iterations (epochs)

    pos_weight : float
        The weight of the positive class

    neg_weight : float
        The weight of the negative class

    sample_weight : numpy array of shape [n_samples, ]
        The weight of each sample

    validation_mask : numpy array of shape [n_samples, ] or None
        Precomputed validation mask in case _fit_binary is called in the
        context of a one-vs-rest reduction.
    i    ii   R}   R5   i   N(&   R   R   t   AssertionErrorR   Rg   RG   Rh   R/   R   R9   t   arrayR}   R   R   RO   t   randintt   iinfot   int32t   maxRZ   t   infRW   R   t   loss_function_RL   RS   t   intRU   RM   RP   RN   RQ   RR   R.   R   R   R0   R   (   R?   R   Ra   Rb   RJ   RK   RH   RY   t
   pos_weightt
   neg_weightR   R   R   R=   R>   R   R   t   datasett   intercept_decayt   penalty_typet   learning_rate_typeR5   t   validation_score_cbRO   t   seedRZ   t   resultt   standard_coeft   standard_interceptt   n_iter_(    (    sG   lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.pyt
   fit_binaryo  sN    1!7!"			t   BaseSGDClassifierc           B   s4  e  Z i	 e d  f d 6e d  f d 6e d f d 6e f d 6e f d 6e f d 6e e f d 6e	 e f d	 6e
 e f d
 6Z e d d d d e d d e d e d d d d d e d d d e e d d   Z e e d  d     Z d   Z d d d d  Z d   Z d   Z d d d  Z d d d d  Z RS(   g      ?t   hinget   squared_hingeg        t
   perceptront   logt   modified_hubert   squared_lossRr   Rs   Rt   R)   g-C6?g333333?i    R#   g      ?g?i   c      )   C   s   t  t |   j d | d | d | d | d | d | d | d | d	 |	 d
 |
 d | d | d | d | d | d | d | d | d | d |  | |  _ | |  _ d  S(   NRF   RG   RJ   RL   RM   RY   RZ   RN   RP   RI   RO   RH   RQ   RR   RS   RT   RU   RV   RW   RX   (   R]   R   R6   t   class_weightt   n_jobs(   R4   RF   RG   RJ   RL   RM   RY   RZ   RN   RP   RI   R   RO   RH   RQ   RR   RS   RT   RU   R   RV   RW   RX   (    (    sG   lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.pyR6     s    
	sr   Attribute loss_function was deprecated in version 0.19 and will be removed in 0.21. Use ``loss_function_`` insteadc         C   s   |  j  S(   N(   R   (   R4   (    (    sG   lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.pyt   loss_function  s    c         C   s  t  | | d d t j d d d t \ } } | j \ } } t |  |  |  j j d } t |  j |  j |  |  _	 |  j
 |	 |  }	 t |  d d   d  k s |
 d  k	 r |  j | | |
 |  n9 | |  j j d k r t d	 | |  j j d f   n  |  j |  |  _ t |  d
  s)d |  _ n  | d k rf|  j | | d | d | d | d |	 d | nM | d k r|  j | | d | d | d | d |	 d | n t d |   |  S(   Nt   csrR}   R~   RK   t   accept_large_sparsei    R8   is6   Number of features %d does not match previous data %d.R.   g      ?i   RJ   RH   R   RY   s>   The number of classes has to be greater than one; got %d class(   R   R9   R   R\   R   R   R0   R   R   t   _expanded_class_weightR   t   getattrR/   R   R8   Rf   Rx   R   t   hasattrR.   t   _fit_multiclasst   _fit_binary(   R4   Ra   Rb   RJ   RK   RF   RH   RY   R5   R   R   R   R   R   R   (    (    sG   lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.pyt   _partial_fit  s>    	$


c
         C   s  |  j    t |  d  r% d  |  _ n  t | | d d t j d d d t \ } } | j \ }
 } t j	 |  } |  j
 r t |  d  r | d  k r |  j } n  | d  k r |  j } q n d  |  _ d  |  _ |  j d k r	|  j |  _ |  j |  _ d  |  _ d  |  _ n  d	 |  _ |  j | | | | | | |  j | |	 | |  |  j d  k	 r|  j t j k r|  j |  j k rt j d
 t  n  |  S(   NR0   R   R}   R~   RK   R   R8   i    g      ?sh   Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit.(   R[   R   R/   R0   R   R9   R   R\   R   t   uniqueRV   R8   R;   RW   R   R   R   R   R.   R   Rp   Rj   R   R   Rk   Rl   R   (   R4   Ra   Rb   RJ   RK   RF   RH   R   R   R   R   R   R5   (    (    sG   lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.pyt   _fit3  s6    
				"	
c         C   s  t  |  d | | | | | | |  j d |  j d |  \ } }	 }
 |  j |
 | j d 7_ |
 |  _ |  j d k r |  j |  j d k r |  j j d d  |  _ |  j	 |  _
 q|  j j d d  |  _ t j |	  |  _ |  j |  _
 n' | j d d  |  _ t j |	  |  _
 d S(   s$   Fit a binary classifier on X and y. i   i    iN(   R   R   R.   R   R   RW   R   R7   R8   R   R;   R   R9   R:   R   (   R4   Ra   Rb   RJ   RK   R   RH   RY   R=   R>   R   (    (    sG   lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.pyR   ^  s     

	c      
      sR   j     t d  j d  j t d d             f	 d   t t  j   D  } d }	 x? t |  D]1 \ }
 \ } } } |  j	 |
 <t
 |	 |  }	 q W j |	  j d 7_ |	  _  j d k rN j  j d k r j  _  j  _	 qN j  _ t j  j	   _  j  _	 n  d	 S(
   s   Fit a multi-class classifier by combining binary classifiers

        Each binary classifier predicts one class versus all others. This
        strategy is called OvA (One versus All) or OvR (One versus Rest).
        R   RP   t   requiret	   sharedmemc         3   sL   |  ]B } t  t   |         j | d   d  Vq d S(   g      ?R   N(   R   R   R   (   t   .0R   (	   RK   Ra   RJ   RH   RY   R   R4   R   Rb   (    sG   lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.pys	   <genexpr>  s   g        i    g      ?N(   R   R   R   RP   R!   t   rangeR   R0   t	   enumerateR;   R   R.   R   R   RW   R   R8   R   R   R9   R:   R   (   R4   Ra   Rb   RJ   RK   RH   R   RY   R   R   R   t   _R>   t   n_iter_i(    (	   RK   Ra   RJ   RH   RY   R   R4   R   Rb   sG   lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.pyR   x  s$    	$"	c         C   s   |  j  d t  |  j d k r: t d j |  j    n  |  j | | d |  j d d d |  j d |  j d	 d
 d | d | d d d d 	S(   s  Perform one epoch of stochastic gradient descent on given samples.

        Internally, this method uses ``max_iter = 1``. Therefore, it is not
        guaranteed that a minimum of the cost function is reached after calling
        it once. Matters such as objective convergence and early stopping
        should be handled by the user.

        Parameters
        ----------
        X : {array-like, sparse matrix}, shape (n_samples, n_features)
            Subset of the training data

        y : numpy array, shape (n_samples,)
            Subset of the target values

        classes : array, shape (n_classes,)
            Classes across all calls to partial_fit.
            Can be obtained by via `np.unique(y_all)`, where y_all is the
            target vector of the entire dataset.
            This argument is required for the first call to partial_fit
            and can be omitted in the subsequent calls.
            Note that y doesn't need to contain all labels in `classes`.

        sample_weight : array-like, shape (n_samples,), optional
            Weights applied to individual samples.
            If not provided, uniform weights are assumed.

        Returns
        -------
        self : returns an instance of self.
        Rq   t   balancedsF  class_weight '{0}' is not supported for partial_fit. In order to use 'balanced' weights, use compute_class_weight('{0}', classes, y). In place of y you can us a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.RJ   RK   g      ?RF   RH   RY   i   R5   R   R   R   (   R   N(
   R[   R   R   Rf   t   formatR   RJ   RF   RH   R/   (   R4   Ra   Rb   R5   R   (    (    sG   lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.pyt   partial_fit  s     	'c         C   sC   |  j  | | d |  j d d d |  j d |  j d | d | d | S(	   s  Fit linear model with Stochastic Gradient Descent.

        Parameters
        ----------
        X : {array-like, sparse matrix}, shape (n_samples, n_features)
            Training data

        y : numpy array, shape (n_samples,)
            Target values

        coef_init : array, shape (n_classes, n_features)
            The initial coefficients to warm-start the optimization.

        intercept_init : array, shape (n_classes,)
            The initial intercept to warm-start the optimization.

        sample_weight : array-like, shape (n_samples,), optional
            Weights applied to individual samples.
            If not provided, uniform weights are assumed. These weights will
            be multiplied with class_weight (passed through the
            constructor) if class_weight is specified

        Returns
        -------
        self : returns an instance of self.
        RJ   RK   g      ?RF   RH   R   R   R   (   R   RJ   RF   RH   (   R4   Ra   Rb   R   R   R   (    (    sG   lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.pyRc     s    N(   RA   RB   R   R   R   R   R   R   t   DEFAULT_EPSILONR   R    Ri   R   R   R/   R\   R6   t   propertyR   R   R   R   R   R   R   Rc   (    (    (    sG   lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.pyR     s<   


				/*		&/t   SGDClassifierc           B   s   e  Z d  Z d d d d e d d e d e d d d d d e d	 d
 d e e d d  Z d   Z e	 d    Z
 d   Z e	 d    Z d   Z RS(   s&  Linear classifiers (SVM, logistic regression, a.o.) with SGD training.

    This estimator implements regularized linear models with stochastic
    gradient descent (SGD) learning: the gradient of the loss is estimated
    each sample at a time and the model is updated along the way with a
    decreasing strength schedule (aka learning rate). SGD allows minibatch
    (online/out-of-core) learning, see the partial_fit method.
    For best results using the default learning rate schedule, the data should
    have zero mean and unit variance.

    This implementation works with data represented as dense or sparse arrays
    of floating point values for the features. The model it fits can be
    controlled with the loss parameter; by default, it fits a linear support
    vector machine (SVM).

    The regularizer is a penalty added to the loss function that shrinks model
    parameters towards the zero vector using either the squared euclidean norm
    L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
    parameter update crosses the 0.0 value because of the regularizer, the
    update is truncated to 0.0 to allow for learning sparse models and achieve
    online feature selection.

    Read more in the :ref:`User Guide <sgd>`.

    Parameters
    ----------
    loss : str, default: 'hinge'
        The loss function to be used. Defaults to 'hinge', which gives a
        linear SVM.

        The possible options are 'hinge', 'log', 'modified_huber',
        'squared_hinge', 'perceptron', or a regression loss: 'squared_loss',
        'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.

        The 'log' loss gives logistic regression, a probabilistic classifier.
        'modified_huber' is another smooth loss that brings tolerance to
        outliers as well as probability estimates.
        'squared_hinge' is like hinge but is quadratically penalized.
        'perceptron' is the linear loss used by the perceptron algorithm.
        The other losses are designed for regression but can be useful in
        classification as well; see SGDRegressor for a description.

    penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
        The penalty (aka regularization term) to be used. Defaults to 'l2'
        which is the standard regularizer for linear SVM models. 'l1' and
        'elasticnet' might bring sparsity to the model (feature selection)
        not achievable with 'l2'.

    alpha : float
        Constant that multiplies the regularization term. Defaults to 0.0001
        Also used to compute learning_rate when set to 'optimal'.

    l1_ratio : float
        The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
        l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
        Defaults to 0.15.

    fit_intercept : bool
        Whether the intercept should be estimated or not. If False, the
        data is assumed to be already centered. Defaults to True.

    max_iter : int, optional
        The maximum number of passes over the training data (aka epochs).
        It only impacts the behavior in the ``fit`` method, and not the
        `partial_fit`.
        Defaults to 5. Defaults to 1000 from 0.21, or if tol is not None.

        .. versionadded:: 0.19

    tol : float or None, optional
        The stopping criterion. If it is not None, the iterations will stop
        when (loss > previous_loss - tol). Defaults to None.
        Defaults to 1e-3 from 0.21.

        .. versionadded:: 0.19

    shuffle : bool, optional
        Whether or not the training data should be shuffled after each epoch.
        Defaults to True.

    verbose : integer, optional
        The verbosity level

    epsilon : float
        Epsilon in the epsilon-insensitive loss functions; only if `loss` is
        'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
        For 'huber', determines the threshold at which it becomes less
        important to get the prediction exactly right.
        For epsilon-insensitive, any differences between the current prediction
        and the correct label are ignored if they are less than this threshold.

    n_jobs : int or None, optional (default=None)
        The number of CPUs to use to do the OVA (One Versus All, for
        multi-class problems) computation.
        ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
        ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
        for more details.

    random_state : int, RandomState instance or None, optional (default=None)
        The seed of the pseudo random number generator to use when shuffling
        the data.  If int, random_state is the seed used by the random number
        generator; If RandomState instance, random_state is the random number
        generator; If None, the random number generator is the RandomState
        instance used by `np.random`.

    learning_rate : string, optional
        The learning rate schedule:

        'constant':
            eta = eta0
        'optimal': [default]
            eta = 1.0 / (alpha * (t + t0))
            where t0 is chosen by a heuristic proposed by Leon Bottou.
        'invscaling':
            eta = eta0 / pow(t, power_t)
        'adaptive':
            eta = eta0, as long as the training keeps decreasing.
            Each time n_iter_no_change consecutive epochs fail to decrease the
            training loss by tol or fail to increase validation score by tol if
            early_stopping is True, the current learning rate is divided by 5.

    eta0 : double
        The initial learning rate for the 'constant', 'invscaling' or
        'adaptive' schedules. The default value is 0.0 as eta0 is not used by
        the default schedule 'optimal'.

    power_t : double
        The exponent for inverse scaling learning rate [default 0.5].

    early_stopping : bool, default=False
        Whether to use early stopping to terminate training when validation
        score is not improving. If set to True, it will automatically set aside
        a fraction of training data as validation and terminate training when
        validation score is not improving by at least tol for
        n_iter_no_change consecutive epochs.

        .. versionadded:: 0.20

    validation_fraction : float, default=0.1
        The proportion of training data to set aside as validation set for
        early stopping. Must be between 0 and 1.
        Only used if early_stopping is True.

        .. versionadded:: 0.20

    n_iter_no_change : int, default=5
        Number of iterations with no improvement to wait before early stopping.

        .. versionadded:: 0.20

    class_weight : dict, {class_label: weight} or "balanced" or None, optional
        Preset for the class_weight fit parameter.

        Weights associated with classes. If not given, all classes
        are supposed to have weight one.

        The "balanced" mode uses the values of y to automatically adjust
        weights inversely proportional to class frequencies in the input data
        as ``n_samples / (n_classes * np.bincount(y))``

    warm_start : bool, optional
        When set to True, reuse the solution of the previous call to fit as
        initialization, otherwise, just erase the previous solution.
        See :term:`the Glossary <warm_start>`.

        Repeatedly calling fit or partial_fit when warm_start is True can
        result in a different solution than when calling fit a single time
        because of the way the data is shuffled.
        If a dynamic learning rate is used, the learning rate is adapted
        depending on the number of samples already seen. Calling ``fit`` resets
        this counter, while ``partial_fit`` will result in increasing the
        existing counter.

    average : bool or int, optional
        When set to True, computes the averaged SGD weights and stores the
        result in the ``coef_`` attribute. If set to an int greater than 1,
        averaging will begin once the total number of samples seen reaches
        average. So ``average=10`` will begin averaging after seeing 10
        samples.

    n_iter : int, optional
        The number of passes over the training data (aka epochs).
        Defaults to None. Deprecated, will be removed in 0.21.

        .. versionchanged:: 0.19
            Deprecated

    Attributes
    ----------
    coef_ : array, shape (1, n_features) if n_classes == 2 else (n_classes,            n_features)
        Weights assigned to the features.

    intercept_ : array, shape (1,) if n_classes == 2 else (n_classes,)
        Constants in decision function.

    n_iter_ : int
        The actual number of iterations to reach the stopping criterion.
        For multiclass fits, it is the maximum over every binary fit.

    loss_function_ : concrete ``LossFunction``

    Examples
    --------
    >>> import numpy as np
    >>> from sklearn import linear_model
    >>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
    >>> Y = np.array([1, 1, 2, 2])
    >>> clf = linear_model.SGDClassifier(max_iter=1000, tol=1e-3)
    >>> clf.fit(X, Y)
    ... #doctest: +NORMALIZE_WHITESPACE
    SGDClassifier(alpha=0.0001, average=False, class_weight=None,
           early_stopping=False, epsilon=0.1, eta0=0.0, fit_intercept=True,
           l1_ratio=0.15, learning_rate='optimal', loss='hinge', max_iter=1000,
           n_iter=None, n_iter_no_change=5, n_jobs=None, penalty='l2',
           power_t=0.5, random_state=None, shuffle=True, tol=0.001,
           validation_fraction=0.1, verbose=0, warm_start=False)

    >>> print(clf.predict([[-0.8, -1]]))
    [1]

    See also
    --------
    sklearn.svm.LinearSVC, LogisticRegression, Perceptron

    R   R)   g-C6?g333333?i    R#   g        g      ?g?i   c      -   C   s   t  t |   j d | d | d | d | d | d | d | d | d	 |	 d
 |
 d | d | d | d | d | d | d | d | d | d | d | d |  d  S(   NRF   RG   RJ   RL   RM   RY   RZ   RN   RP   RI   R   RO   RH   RQ   RR   RS   RT   RU   R   RV   RW   RX   (   R]   R   R6   (   R4   RF   RG   RJ   RL   RM   RY   RZ   RN   RP   RI   R   RO   RH   RQ   RR   RS   RT   RU   R   RV   RW   RX   (    (    sG   lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.pyR6     s    c         C   s)   |  j  d k r% t d |  j    n  d  S(   NR   R   s3   probability estimates are not available for loss=%r(   R   R   (   RF   t   AttributeError(   R4   (    (    sG   lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.pyt   _check_proba  s    c         C   s   |  j    |  j S(   sL  Probability estimates.

        This method is only available for log loss and modified Huber loss.

        Multiclass probability estimates are derived from binary (one-vs.-rest)
        estimates by simple normalization, as recommended by Zadrozny and
        Elkan.

        Binary probability estimates for loss="modified_huber" are given by
        (clip(decision_function(X), -1, 1) + 1) / 2. For other loss functions
        it is necessary to perform proper probability calibration by wrapping
        the classifier with
        :class:`sklearn.calibration.CalibratedClassifierCV` instead.

        Parameters
        ----------
        X : {array-like, sparse matrix}, shape (n_samples, n_features)

        Returns
        -------
        array, shape (n_samples, n_classes)
            Returns the probability of the sample for each class in the model,
            where classes are ordered as they are in `self.classes_`.

        References
        ----------
        Zadrozny and Elkan, "Transforming classifier scores into multiclass
        probability estimates", SIGKDD'02,
        http://www.research.ibm.com/people/z/zadrozny/kdd2002-Transf.pdf

        The justification for the formula in the loss="modified_huber"
        case is in the appendix B in:
        http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf
        (   R   t   _predict_proba(   R4   (    (    sG   lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.pyt   predict_proba  s    $
c         C   s  t  |  d  |  j d k r) |  j |  S|  j d k rot |  j  d k } |  j |  } | r t j | j d d f  } | d  d   d f } n | } t j	 | d d |  | d 7} | d	 :} | r | d  d   d f c | 8<| } ny | j
 d
 d  } | d k } t j |  rKd | | d  d   f <t |  j  | | <n  | | j | j d d f  :} | St d |  j   d  S(   NR.   R   R   i   i    i   ig      ?g       @t   axissV   predict_(log_)proba only supported when loss='log' or loss='modified_huber' (%r given)(   R   RF   t   _predict_proba_lrR   R0   t   decision_functionR9   R   R   t   clipt   sumt   anyR7   t   NotImplementedError(   R4   Ra   t   binaryt   scorest   prob2t   probt   prob_sumt   all_zero(    (    sG   lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.pyR     s2    

	 c         C   s   |  j    |  j S(   s  Log of probability estimates.

        This method is only available for log loss and modified Huber loss.

        When loss="modified_huber", probability estimates may be hard zeros
        and ones, so taking the logarithm is not possible.

        See ``predict_proba`` for details.

        Parameters
        ----------
        X : array-like, shape (n_samples, n_features)

        Returns
        -------
        T : array-like, shape (n_samples, n_classes)
            Returns the log-probability of the sample for each class in the
            model, where classes are ordered as they are in
            `self.classes_`.
        (   R   t   _predict_log_proba(   R4   (    (    sG   lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.pyt   predict_log_proba;  s    
c         C   s   t  j |  j |   S(   N(   R9   R   R   (   R4   Ra   (    (    sG   lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.pyR   T  s    N(   RA   RB   RC   R   R/   R   R\   R6   R   R   R   R   R   R   (    (    (    sG   lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.pyR     s   					'	+t   BaseSGDRegressorc           B   s   e  Z i e f d  6e e f d 6e e f d 6e e f d 6Z e d  d d d e	 d d e	 d e d d d	 d
 e d d e e d d   Z d   Z d d  Z d d d d  Z d d d d  Z d   Z d   Z d   Z RS(   R   Rr   Rs   Rt   R)   g-C6?g333333?i    R$   g{Gz?g      ?g?i   c      )   C   s   t  t |   j d | d | d | d | d | d | d | d | d	 |	 d
 |
 d | d | d | d | d | d | d | d | d | d |  d  S(   NRF   RG   RJ   RL   RM   RY   RZ   RN   RP   RI   RO   RH   RQ   RR   RS   RT   RU   RV   RW   RX   (   R]   R   R6   (   R4   RF   RG   RJ   RL   RM   RY   RZ   RN   RP   RI   RO   RH   RQ   RR   RS   RT   RU   RV   RW   RX   (    (    sG   lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.pyR6   b  s    c         C   sh  t  | | d d t d d d t j d t \ } } | j t j d t } | j \ } } |  j | |  } t |  d d   d  k r |  j	 d | |	 |
  n9 | |  j
 j d	 k r t d
 | |  j
 j d	 f   n  |  j d k rBt |  d d   d  k rBt j | d t j d d |  _ t j d d t j d d |  _ n  |  j | | | | | | | |  |  S(   NR   t   copyR~   RK   R}   R   R8   i   is6   Number of features %d does not match previous data %d.i    R   (   R   R\   R9   R   t   astypeR   R   R   R/   R   R8   Rf   RW   R   R   R   t   _fit_regressor(   R4   Ra   Rb   RJ   RK   RF   RH   RY   R   R   R   R   R   (    (    sG   lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.pyR   t  s&    $
'	$
c         C   sV   |  j  d t  |  j | | |  j d d d |  j d |  j d d d | d	 d d
 d S(   s:  Perform one epoch of stochastic gradient descent on given samples.

        Internally, this method uses ``max_iter = 1``. Therefore, it is not
        guaranteed that a minimum of the cost function is reached after calling
        it once. Matters such as objective convergence and early stopping
        should be handled by the user.

        Parameters
        ----------
        X : {array-like, sparse matrix}, shape (n_samples, n_features)
            Subset of training data

        y : numpy array of shape (n_samples,)
            Subset of target values

        sample_weight : array-like, shape (n_samples,), optional
            Weights applied to individual samples.
            If not provided, uniform weights are assumed.

        Returns
        -------
        self : returns an instance of self.
        Rq   RK   g      ?RF   RH   RY   i   R   R   R   N(   R[   R   R   RJ   RF   RH   R/   (   R4   Ra   Rb   R   (    (    sG   lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.pyR     s    	c
   
      C   s+  |  j    |  j r^ t |  d d   d  k	 r^ | d  k rC |  j } n  | d  k rp |  j } qp n d  |  _ d  |  _ |  j d k r |  j |  _ |  j |  _ d  |  _	 d  |  _
 n  d |  _ |  j | | | | | | |  j |	 | | 
 |  j d  k	 r'|  j t j k r'|  j |  j k r't j d t  n  |  S(   NR8   i    g      ?sh   Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit.(   R[   RV   R   R/   R8   R;   RW   R   R   R   R   R.   R   Rp   Rj   R9   R   R   Rk   Rl   R   (
   R4   Ra   Rb   RJ   RK   RF   RH   R   R   R   (    (    sG   lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.pyR     s,    
!				"	
c         C   sC   |  j  | | d |  j d d d |  j d |  j d | d | d | S(	   s  Fit linear model with Stochastic Gradient Descent.

        Parameters
        ----------
        X : {array-like, sparse matrix}, shape (n_samples, n_features)
            Training data

        y : numpy array, shape (n_samples,)
            Target values

        coef_init : array, shape (n_features,)
            The initial coefficients to warm-start the optimization.

        intercept_init : array, shape (1,)
            The initial intercept to warm-start the optimization.

        sample_weight : array-like, shape (n_samples,), optional
            Weights applied to individual samples (1. for unweighted).

        Returns
        -------
        self : returns an instance of self.
        RJ   RK   g      ?RF   RH   R   R   R   (   R   RJ   RF   RH   (   R4   Ra   Rb   R   R   R   (    (    sG   lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.pyRc     s
    c         C   sZ   t  |  d d d g d t t | d d } t | |  j j d t |  j } | j   S(   s  Predict using the linear model

        Parameters
        ----------
        X : {array-like, sparse matrix}, shape (n_samples, n_features)

        Returns
        -------
        array, shape (n_samples,)
           Predicted target values per element in X.
        R.   R8   R;   t
   all_or_anyt   accept_sparseR   t   dense_output(	   R   t   allR   R   R8   t   TR   R;   R   (   R4   Ra   R   (    (    sG   lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.pyt   _decision_function  s
    c         C   s   |  j  |  S(   s  Predict using the linear model

        Parameters
        ----------
        X : {array-like, sparse matrix}, shape (n_samples, n_features)

        Returns
        -------
        array, shape (n_samples,)
           Predicted target values per element in X.
        (   R   (   R4   Ra   (    (    sG   lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.pyt   predict  s    c	         C   s  t  | | |  \ }	 }
 |  j |  } |  j |  j  } |  j |  } t |  d  sc d |  _ n  |  j |  } |  j | | | |  } t	 |  j
  } | j d t j t j  j  } |  j d  k	 r |  j n t j } |  j d k rCt |  j |  j d |  j |  j d | | | | |  j |	 | |  j | t |  j  | | t |  j  t |  j  t |  j  | d d | |  j  |  j! |  j |
 |  j  \ |  _ |  _ |  _ |  _ |  _" t j# |  j  |  _ t j# |  j  |  _ |  j |  j" | j$ d 7_ |  j |  j d k r(|  j |  _% |  j |  _& q|  j |  _% |  j |  _& n t' |  j% |  j& d | | | | |  j |	 | |  j | t |  j  | | t |  j  t |  j  t |  j  | d d | |  j  |  j! |  j |
  \ |  _% |  _& |  _" |  j |  j" | j$ d 7_ t j# |  j&  |  _& d  S(   NR.   g      ?i    ((   R   Rx   Rg   RG   Rh   R   R.   R   R   R   RO   R   R9   R   R   R   Rj   R/   R   RW   R   R   R   R   R   RL   RS   R   RU   RM   RP   RN   RQ   RR   R   R:   R   R8   R;   R   (   R4   Ra   Rb   RJ   RK   RF   RH   R   RY   R   R   R   R   R   R   R   RO   R   RZ   (    (    sG   lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.pyR     s~    !"	

	-	
	N(   RA   RB   R   R   R   R   R    Ri   R   R   R/   R\   R6   R   R   R   Rc   R   R   R   (    (    (    sG   lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.pyR   X  s.   
						!		t   SGDRegressorc           B   sS   e  Z d  Z d d d d e d d e d e d d d d e d	 d
 e e d d  Z RS(   s   Linear model fitted by minimizing a regularized empirical loss with SGD

    SGD stands for Stochastic Gradient Descent: the gradient of the loss is
    estimated each sample at a time and the model is updated along the way with
    a decreasing strength schedule (aka learning rate).

    The regularizer is a penalty added to the loss function that shrinks model
    parameters towards the zero vector using either the squared euclidean norm
    L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
    parameter update crosses the 0.0 value because of the regularizer, the
    update is truncated to 0.0 to allow for learning sparse models and achieve
    online feature selection.

    This implementation works with data represented as dense numpy arrays of
    floating point values for the features.

    Read more in the :ref:`User Guide <sgd>`.

    Parameters
    ----------
    loss : str, default: 'squared_loss'
        The loss function to be used. The possible values are 'squared_loss',
        'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'

        The 'squared_loss' refers to the ordinary least squares fit.
        'huber' modifies 'squared_loss' to focus less on getting outliers
        correct by switching from squared to linear loss past a distance of
        epsilon. 'epsilon_insensitive' ignores errors less than epsilon and is
        linear past that; this is the loss function used in SVR.
        'squared_epsilon_insensitive' is the same but becomes squared loss past
        a tolerance of epsilon.

    penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
        The penalty (aka regularization term) to be used. Defaults to 'l2'
        which is the standard regularizer for linear SVM models. 'l1' and
        'elasticnet' might bring sparsity to the model (feature selection)
        not achievable with 'l2'.

    alpha : float
        Constant that multiplies the regularization term. Defaults to 0.0001
        Also used to compute learning_rate when set to 'optimal'.

    l1_ratio : float
        The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
        l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
        Defaults to 0.15.

    fit_intercept : bool
        Whether the intercept should be estimated or not. If False, the
        data is assumed to be already centered. Defaults to True.

    max_iter : int, optional
        The maximum number of passes over the training data (aka epochs).
        It only impacts the behavior in the ``fit`` method, and not the
        `partial_fit`.
        Defaults to 5. Defaults to 1000 from 0.21, or if tol is not None.

        .. versionadded:: 0.19

    tol : float or None, optional
        The stopping criterion. If it is not None, the iterations will stop
        when (loss > previous_loss - tol). Defaults to None.
        Defaults to 1e-3 from 0.21.

        .. versionadded:: 0.19

    shuffle : bool, optional
        Whether or not the training data should be shuffled after each epoch.
        Defaults to True.

    verbose : integer, optional
        The verbosity level.

    epsilon : float
        Epsilon in the epsilon-insensitive loss functions; only if `loss` is
        'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
        For 'huber', determines the threshold at which it becomes less
        important to get the prediction exactly right.
        For epsilon-insensitive, any differences between the current prediction
        and the correct label are ignored if they are less than this threshold.

    random_state : int, RandomState instance or None, optional (default=None)
        The seed of the pseudo random number generator to use when shuffling
        the data.  If int, random_state is the seed used by the random number
        generator; If RandomState instance, random_state is the random number
        generator; If None, the random number generator is the RandomState
        instance used by `np.random`.

    learning_rate : string, optional
        The learning rate schedule:

        'constant':
            eta = eta0
        'optimal':
            eta = 1.0 / (alpha * (t + t0))
            where t0 is chosen by a heuristic proposed by Leon Bottou.
        'invscaling': [default]
            eta = eta0 / pow(t, power_t)
        'adaptive':
            eta = eta0, as long as the training keeps decreasing.
            Each time n_iter_no_change consecutive epochs fail to decrease the
            training loss by tol or fail to increase validation score by tol if
            early_stopping is True, the current learning rate is divided by 5.

    eta0 : double
        The initial learning rate for the 'constant', 'invscaling' or
        'adaptive' schedules. The default value is 0.0 as eta0 is not used by
        the default schedule 'optimal'.

    power_t : double
        The exponent for inverse scaling learning rate [default 0.5].

    early_stopping : bool, default=False
        Whether to use early stopping to terminate training when validation
        score is not improving. If set to True, it will automatically set aside
        a fraction of training data as validation and terminate training when
        validation score is not improving by at least tol for
        n_iter_no_change consecutive epochs.

        .. versionadded:: 0.20

    validation_fraction : float, default=0.1
        The proportion of training data to set aside as validation set for
        early stopping. Must be between 0 and 1.
        Only used if early_stopping is True.

        .. versionadded:: 0.20

    n_iter_no_change : int, default=5
        Number of iterations with no improvement to wait before early stopping.

        .. versionadded:: 0.20

    warm_start : bool, optional
        When set to True, reuse the solution of the previous call to fit as
        initialization, otherwise, just erase the previous solution.
        See :term:`the Glossary <warm_start>`.

        Repeatedly calling fit or partial_fit when warm_start is True can
        result in a different solution than when calling fit a single time
        because of the way the data is shuffled.
        If a dynamic learning rate is used, the learning rate is adapted
        depending on the number of samples already seen. Calling ``fit`` resets
        this counter, while ``partial_fit``  will result in increasing the
        existing counter.

    average : bool or int, optional
        When set to True, computes the averaged SGD weights and stores the
        result in the ``coef_`` attribute. If set to an int greater than 1,
        averaging will begin once the total number of samples seen reaches
        average. So ``average=10`` will begin averaging after seeing 10
        samples.

    n_iter : int, optional
        The number of passes over the training data (aka epochs).
        Defaults to None. Deprecated, will be removed in 0.21.

        .. versionchanged:: 0.19
            Deprecated

    Attributes
    ----------
    coef_ : array, shape (n_features,)
        Weights assigned to the features.

    intercept_ : array, shape (1,)
        The intercept term.

    average_coef_ : array, shape (n_features,)
        Averaged weights assigned to the features.

    average_intercept_ : array, shape (1,)
        The averaged intercept term.

    n_iter_ : int
        The actual number of iterations to reach the stopping criterion.

    Examples
    --------
    >>> import numpy as np
    >>> from sklearn import linear_model
    >>> n_samples, n_features = 10, 5
    >>> np.random.seed(0)
    >>> y = np.random.randn(n_samples)
    >>> X = np.random.randn(n_samples, n_features)
    >>> clf = linear_model.SGDRegressor(max_iter=1000, tol=1e-3)
    >>> clf.fit(X, y)
    ... #doctest: +NORMALIZE_WHITESPACE
    SGDRegressor(alpha=0.0001, average=False, early_stopping=False,
           epsilon=0.1, eta0=0.01, fit_intercept=True, l1_ratio=0.15,
           learning_rate='invscaling', loss='squared_loss', max_iter=1000,
           n_iter=None, n_iter_no_change=5, penalty='l2', power_t=0.25,
           random_state=None, shuffle=True, tol=0.001, validation_fraction=0.1,
           verbose=0, warm_start=False)

    See also
    --------
    Ridge, ElasticNet, Lasso, sklearn.svm.SVR

    R   R)   g-C6?g333333?i    R$   g{Gz?g      ?g?i   c      )   C   s   t  t |   j d | d | d | d | d | d | d | d | d	 |	 d
 |
 d | d | d | d | d | d | d | d | d | d |  d  S(   NRF   RG   RJ   RL   RM   RY   RZ   RN   RP   RI   RO   RH   RQ   RR   RS   RT   RU   RV   RW   RX   (   R]   R   R6   (   R4   RF   RG   RJ   RL   RM   RY   RZ   RN   RP   RI   RO   RH   RQ   RR   RS   RT   RU   RV   RW   RX   (    (    sG   lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.pyR6   /  s    N(   RA   RB   RC   R   R/   R   R\   R6   (    (    (    sG   lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.pyR   f  s   					(@   RC   t   numpyR9   Rk   t   abcR    R   t   utils._joblibR   R   t   baseR   R   R   R   R   R	   R
   t   utilsR   R   R   t   utils.extmathR   t   utils.multiclassR   t   utils.validationR   t
   exceptionsR   t	   externalsR   t   model_selectionR   R   t   sgd_fastR   R   R   R   R   R   R   R   R   R   R   R    t   utils.fixesR!   Ry   R|   R   t   objectR,   t   with_metaclassRD   R   R/   R   R   R   R   R   (    (    (    sG   lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.pyt   <module>   sX   "% 	!h  j 