This document contains scikit-learn code fragments:
- with raise or function calls whose function name contains "warn"/"Warn",
- and that also contain some mechanism that triggers a
__repr__call (repr,%r,{!r}. and variations thereof like %(name)r or {name!r})—some non-__repr__mechanisms might be displayed too.
-
Line 55, col. 12 in
clone():raise TypeError( "Cannot clone object '%s' (type %s): it does not seem to be a scikit-learn estimator as it does not implement a 'get_params' methods." % (repr(estimator), type(estimator)))
-
Line 211, col. 4 in
test_perfect_checkerboard():raise SkipTest( 'This test is failing on the buildbot, but cannot reproduce. Temporarily disabling it until it can be reproduced and fixed.' )
-
Line 329, col. 8 in
k_means():raise ValueError( "precompute_distances should be 'auto' or True/False, but a value of %r was passed" % precompute_distances)
-
Line 112, col. 8 in
affinity_propagation():raise ValueError('S must be a square array (shape=%s)' % repr(S.shape))
-
Line 440, col. 12 in
dump_svmlight_file():raise ValueError('expected y of shape (n_samples, 1), got %r' % (yval.shape,)) -
Line 444, col. 12 in
dump_svmlight_file():raise ValueError('expected y of shape (n_samples,), got %r' % (yval.shape,)) -
Line 449, col. 8 in
dump_svmlight_file():raise ValueError( 'X.shape[0] and y.shape[0] should be the same, got %r and %r instead.' % (Xval.shape[0], yval.shape[0])) -
Line 472, col. 12 in
dump_svmlight_file():raise ValueError('expected query_id of shape (n_samples,), got %r' % ( query_id.shape,))
-
Line 792, col. 12 in
make_blobs():raise ValueError('Parameter `centers` must be array-like. Got {!r} instead' .format(centers))
-
Line 385, col. 12 in
_fetch_lfw_pairs():raise ValueError('invalid line %d: %r' % (i + 1, components)) -
Line 507, col. 8 in
fetch_lfw_pairs():raise ValueError("subset='%s' is invalid: should be one of %r" % (subset, list(sorted(label_filenames.keys()))))
-
Line 381, col. 8 in
fetch_20newsgroups_vectorized():raise ValueError( "%r is not a valid subset: should be one of ['train', 'test', 'all']" % subset)
-
Line 518, col. 8 in
dict_learning():raise ValueError('Coding method %r not supported as a fit algorithm.' % method)
-
Line 301, col. 12 in
LatentDirichletAllocation():raise ValueError("Invalid 'n_components' parameter: %r" % self._n_components) -
Line 305, col. 12 in
LatentDirichletAllocation():raise ValueError("Invalid 'total_samples' parameter: %r" % self.total_samples) -
Line 309, col. 12 in
LatentDirichletAllocation():raise ValueError("Invalid 'learning_offset' parameter: %r" % self. learning_offset) -
Line 313, col. 12 in
LatentDirichletAllocation():raise ValueError("Invalid 'learning_method' parameter: %r" % self. learning_method)
-
Line 291, col. 8 in
fastica():raise exc( "Unknown function %r; should be one of 'logcosh', 'exp', 'cube' or callable" % fun)
-
Line 220, col. 12 in
IncrementalPCA():raise ValueError( 'n_components=%r invalid for n_features=%d, need more rows than columns for IncrementalPCA processing' % (self.n_components, n_features)) -
Line 224, col. 12 in
IncrementalPCA():raise ValueError( 'n_components=%r must be less or equal to the batch number of samples %d.' % (self.n_components, n_samples))
-
Line 179, col. 12 in
TruncatedSVD():raise ValueError('unknown algorithm %r' % self.algorithm)
-
Line 422, col. 12 in
PCA():raise ValueError( "n_components=%r must be between 0 and min(n_samples, n_features)=%r with svd_solver='full'" % (n_components, min(n_samples, n_features))) -
Line 428, col. 16 in
PCA():raise ValueError( 'n_components=%r must be of type int when greater than or equal to 1, was of type=%r' % (n_components, type(n_components))) -
Line 483, col. 12 in
PCA():raise ValueError("n_components=%r cannot be a string with svd_solver='%s'" % (n_components, svd_solver)) -
Line 487, col. 12 in
PCA():raise ValueError( "n_components=%r must be between 1 and min(n_samples, n_features)=%r with svd_solver='%s'" % (n_components, min(n_samples, n_features), svd_solver)) -
Line 493, col. 12 in
PCA():raise ValueError( 'n_components=%r must be of type int when greater than or equal to 1, was of type=%r' % (n_components, type(n_components))) -
Line 498, col. 12 in
PCA():raise ValueError( "n_components=%r must be strictly less than min(n_samples, n_features)=%r with svd_solver='%s'" % (n_components, min(n_samples, n_features), svd_solver))
-
Line 205, col. 8 in
_check_string_param():raise ValueError('Invalid solver parameter: got %r instead of one of %r' % (solver, allowed_solver)) -
Line 211, col. 8 in
_check_string_param():raise ValueError( 'Invalid regularization parameter: got %r instead of one of %r' % ( regularization, allowed_regularization)) -
Line 217, col. 8 in
_check_string_param():raise ValueError( 'Invalid beta_loss parameter: solver %r does not handle beta_loss = %r' % (solver, beta_loss)) -
Line 241, col. 8 in
_beta_loss_to_float():raise ValueError( 'Invalid beta_loss parameter: got %r instead of one of %r, or a float.' % (beta_loss, allowed_beta_loss.keys())) -
Line 378, col. 8 in
_initialize_nmf():raise ValueError('Invalid init parameter: got %r instead of one of %r' % ( init, (None, 'random', 'nndsvd', 'nndsvda', 'nndsvdar'))) -
Line 1003, col. 8 in
non_negative_factorization():raise ValueError( 'Number of components must be a positive integer; got (n_components=%r)' % n_components) -
Line 1006, col. 8 in
non_negative_factorization():raise ValueError( 'Maximum number of iterations must be a positive integer; got (max_iter=%r)' % max_iter) -
Line 1009, col. 8 in
non_negative_factorization():raise ValueError( 'Tolerance for stopping criteria must be positive; got (tol=%r)' % tol)
-
Line 70, col. 12 in
QuantileEstimator():raise ValueError('`alpha` must be in (0, 1.0) but was %r' % alpha) -
Line 267, col. 12 in
RegressionLossFunction():raise ValueError('``n_classes`` must be 1 for regression but was %r' % n_classes) -
Line 813, col. 12 in
BaseGradientBoosting():raise ValueError('n_estimators must be greater than 0 but was %r' % self. n_estimators) -
Line 817, col. 12 in
BaseGradientBoosting():raise ValueError('learning_rate must be greater than 0 but was %r' % self. learning_rate) -
Line 837, col. 12 in
BaseGradientBoosting():raise ValueError('subsample must be in (0,1] but was %r' % self.subsample) -
Line 847, col. 20 in
BaseGradientBoosting():raise ValueError( 'init=%r must be valid BaseEstimator and support both fit and predict' % self.init) -
Line 852, col. 12 in
BaseGradientBoosting():raise ValueError('alpha must be in (0.0, 1.0) but was %r' % self.alpha) -
Line 868, col. 16 in
BaseGradientBoosting():raise ValueError( "Invalid value for max_features: %r. Allowed string values are 'auto', 'sqrt' or 'log2'." % self.max_features) -
Line 886, col. 12 in
BaseGradientBoosting():raise ValueError( 'n_iter_no_change should either be None or an integer. %r was passed' % self.n_iter_no_change) -
Line 892, col. 12 in
BaseGradientBoosting():raise ValueError("'presort' should be in {}. Got {!r} instead.".format( allowed_presort, self.presort)) -
Line 1685, col. 12 in
GradientBoostingClassifier():raise AttributeError('loss=%r does not support predict_proba' % self.loss) -
Line 1736, col. 12 in
GradientBoostingClassifier():raise AttributeError('loss=%r does not support predict_proba' % self.loss)
-
Line 162, col. 12 in
VotingClassifier():raise ValueError("Voting must be 'soft' or 'hard'; got (voting=%r)" % self. voting) -
Line 250, col. 12 in
VotingClassifier():raise AttributeError('predict_proba is not available when voting=%r' % self .voting)
-
Line 212, col. 16 in
IsolationForest():raise ValueError('max_samples must be in (0, 1], got %r' % self.max_samples)
-
Line 817, col. 16 in
CountVectorizer():raise ValueError(u'max_features=%r, neither a positive integer nor None' % max_features)
-
Line 106, col. 12 in
FeatureHasher():raise TypeError('n_features must be integral, got %r (%s).' % (n_features, type(n_features))) -
Line 112, col. 12 in
FeatureHasher():raise ValueError("input_type must be 'dict', 'pair' or 'string', got %r." % input_type)
-
Line 239, col. 12 in
_compute_n_patches():raise ValueError('Invalid value for max_patches: %r' % max_patches)
-
Line 416, col. 12 in
SelectPercentile():raise ValueError('percentile should be >=0, <=100; got %r' % self.percentile) -
Line 490, col. 12 in
SelectKBest():raise ValueError( "k should be >=0, <= n_features = %d; got %r. Use k='all' to return all features." % (X.shape[1], self.k)) -
Line 740, col. 12 in
GenericUnivariateSelect():raise ValueError( 'The mode passed should be one of %s, %r, (type %s) was passed.' % ( self._selection_modes.keys(), self.mode, type(self.mode)))
-
Line 187, col. 12 in
SimpleImputer():raise ValueError( 'SimpleImputer does not support data with dtype {0}. Please provide either a numeric array (with a floating point or integer dtype) or categorical data represented either as an array with integer dtype or an array of string values with an object dtype.' .format(X.dtype))
-
Line 1210, col. 12 in
LogisticRegression():raise ValueError('Penalty term must be positive; got (C=%r)' % self.C) -
Line 1213, col. 12 in
LogisticRegression():raise ValueError( 'Maximum number of iteration must be positive; got (max_iter=%r)' % self.max_iter) -
Line 1216, col. 12 in
LogisticRegression():raise ValueError( 'Tolerance for stopping criteria must be positive; got (tol=%r)' % self.tol ) -
Line 1254, col. 12 in
LogisticRegression():raise ValueError( 'This solver needs samples of at least 2 classes in the data, but the data contains only one class: %r' % classes_[0]) -
Line 1618, col. 12 in
LogisticRegressionCV():raise ValueError( 'Maximum number of iteration must be positive; got (max_iter=%r)' % self.max_iter) -
Line 1621, col. 12 in
LogisticRegressionCV():raise ValueError( 'Tolerance for stopping criteria must be positive; got (tol=%r)' % self.tol ) -
Line 1655, col. 12 in
LogisticRegressionCV():raise ValueError( 'This solver needs samples of at least 2 classes in the data, but the data contains only one class: %r' % classes[0])
-
Line 479, col. 12 in
enet_path():raise ValueError( "Precompute should be one of True, False, 'auto' or array-like. Got %r" % precompute) -
Line 701, col. 12 in
ElasticNet():raise ValueError( 'precompute should be one of True, False or array-like. Got %r' % self. precompute) -
Line 1091, col. 12 in
LinearModelCV():raise ValueError('y has 0 samples: %r' % y)
-
Line 43, col. 8 in
_resample_model():raise ValueError("'scaling' should be between 0 and 1. Got %r instead." % scaling) -
Line 111, col. 12 in
BaseRandomizedLinearModel():raise ValueError( "'memory' should either be a string or a sklearn.externals.joblib.Memory instance, got 'memory={!r}' instead." .format(type(memory))) -
Line 630, col. 8 in
lasso_stability_path():raise ValueError( "Parameter 'scaling' should be between 0 and 1. Got %r instead." % scaling)
-
Line 282, col. 12 in
BaseSGD():raise ValueError( 'Splitting %d samples into a train set and a validation set with validation_fraction=%r led to an empty set (%d and %d samples). Please either change validation_fraction, increase number of samples, or disable early_stopping.' % (n_samples, self.validation_fraction, X_train.shape[0], X_val.shape[0])) -
Line 962, col. 12 in
SGDClassifier():raise AttributeError('probability estimates are not available for loss=%r' % self.loss) -
Line 1043, col. 12 in
SGDClassifier():raise NotImplementedError( "predict_(log_)proba only supported when loss='log' or loss='modified_huber' (%r given)" % self.loss)
-
Line 50, col. 8 in
check_clusterings():raise ValueError('labels_true must be 1D: shape is %r' % (labels_true.shape,)) -
Line 53, col. 8 in
check_clusterings():raise ValueError('labels_pred must be 1D: shape is %r' % (labels_pred.shape,))
-
Line 94, col. 12 in
_check_reg_targets():raise ValueError( "Allowed 'multioutput' string values are {}. You provided multioutput={!r}" .format(allowed_multioutput_str, multioutput))
-
Line 1041, col. 20 in
precision_recall_fscore_support():raise ValueError('pos_label=%r is not a valid label: %r' % (pos_label, present_labels)) -
Line 1048, col. 8 in
precision_recall_fscore_support():warnings.warn( "Note that pos_label (set to %r) is ignored when average != 'binary' (got %r). You may use labels=[pos_label] to specify a single positive class." % (pos_label, average), UserWarning)
-
Line 158, col. 8 in
check_paired_arrays():raise ValueError( 'X and Y should be of same shape. They were respectively %r and %r long.' % (X.shape, Y.shape)) -
Line 507, col. 12 in
manhattan_distances():raise TypeError('sum_over_features=%r not supported for sparse matrices' % sum_over_features) -
Line 1131, col. 8 in
_check_chunk_size():raise TypeError( 'reduce_func returned %r. Expected sequence(s) of length %d.' % ( reduced if is_tuple else reduced[0], chunk_size)) -
Line 1564, col. 8 in
pairwise_kernels():raise ValueError('Unknown kernel %r' % metric)
-
Line 311, col. 12 in
roc_auc_score():raise ValueError('Expected max_frp in range ]0, 1], got: %r' % max_fpr)
-
Line 218, col. 12 in
get_scorer():raise ValueError('%r is not a valid scoring value. Valid options are %s' % (scoring, sorted(SCORERS.keys()))) -
Line 257, col. 8 in
check_scoring():raise TypeError( "estimator should be an estimator implementing 'fit' method, %r was passed" % estimator) -
Line 268, col. 12 in
check_scoring():raise ValueError( 'scoring value %r looks like it is a metric function rather than a scorer. A scorer should require an estimator as its first parameter. Please use `make_scorer` to convert a metric to a scorer.' % scoring) -
Line 280, col. 12 in
check_scoring():raise TypeError( "If no scoring is specified, the estimator passed should have a 'score' method. The estimator %r does not." % estimator) -
Line 289, col. 8 in
check_scoring():raise ValueError( 'scoring value should either be a callable, string or None. %r was passed' % scoring) -
Line 352, col. 16 in
_check_multimetric_scoring():raise ValueError(err_msg + 'Duplicate elements were found in the given list. %r' % repr(scoring)) -
Line 357, col. 24 in
_check_multimetric_scoring():raise ValueError(err_msg + 'One or more of the elements were callables. Use a dict of score name mapped to the scorer callable. Got %r' % repr(scoring)) -
Line 363, col. 24 in
_check_multimetric_scoring():raise ValueError(err_msg + 'Non-string types were found in the given list. Got %r' % repr(scoring)) -
Line 370, col. 16 in
_check_multimetric_scoring():raise ValueError(err_msg + 'Empty list was given. %r' % repr(scoring)) -
Line 376, col. 16 in
_check_multimetric_scoring():raise ValueError( 'Non-string types were found in the keys of the given dict. scoring=%r' % repr(scoring)) -
Line 379, col. 16 in
_check_multimetric_scoring():raise ValueError('An empty dict was passed. %r' % repr(scoring))
-
Line 97, col. 12 in
ParameterGrid():raise TypeError('Parameter grid is not a dict or a list ({!r})'.format( param_grid)) -
Line 108, col. 16 in
ParameterGrid():raise TypeError('Parameter grid is not a dict ({!r})'.format(grid)) -
Line 112, col. 20 in
ParameterGrid():raise TypeError('Parameter grid value is not iterable (key={!r}, value={!r})' .format(key, grid[key])) -
Line 626, col. 16 in
BaseSearchCV():raise ValueError( 'For multi-metric scoring, the parameter refit must be set to a scorer key to refit an estimator with the best parameter setting on the whole data and make the best_* attributes available for that metric. If this is not needed, refit should be set to False explicitly. %r was passed.' % self.refit)
-
Line 990, col. 12 in
test_learning_curve():raise RuntimeError('Unexpected warning: %r' % w[0].message) -
Line 1007, col. 12 in
test_learning_curve():raise RuntimeError('Unexpected warning: %r' % w[0].message) -
Line 1198, col. 8 in
test_validation_curve():raise RuntimeError('Unexpected warning: %r' % w[0].message)
-
Line 581, col. 12 in
_score():raise ValueError( 'scoring must return a number, got %s (%s) instead. (scorer=%r)' % (str (score), type(score), scorer))
-
Line 579, col. 12 in
StratifiedKFold():raise ValueError('Supported target types are: {}. Got {!r} instead.'.format (allowed_target_types, type_of_target_y)) -
Line 1649, col. 12 in
_validate_shuffle_split_init():raise ValueError('Invalid value for test_size: %r' % test_size) -
Line 1664, col. 12 in
_validate_shuffle_split_init():raise ValueError('Invalid value for train_size: %r' % train_size)
-
Line 365, col. 12 in
RadiusNeighborsClassifier():raise ValueError( 'No neighbors found for test samples %r, you can try using larger radius, give a label for outliers, or consider removing them from your dataset.' % outliers)
-
Line 425, col. 16 in
OneHotEncoder():raise TypeError( "Wrong type for parameter `n_values`. Expected 'auto', int or array of ints, got %r" % type(X))
-
Line 142, col. 12 in
KBinsDiscretizer():raise ValueError("Valid options for 'encode' are {}. Got encode={!r} instead." .format(valid_encode, self.encode)) -
Line 147, col. 12 in
KBinsDiscretizer():raise ValueError( "Valid options for 'strategy' are {}. Got strategy={!r} instead.". format(valid_strategy, self.strategy)) -
Line 288, col. 12 in
KBinsDiscretizer():raise ValueError( "inverse_transform only supports 'encode = ordinal'. Got encode={!r} instead." .format(self.encode))
-
Line 410, col. 12 in
LabelBinarizer():raise ValueError('y has 0 samples: %r' % y) -
Line 584, col. 12 in
label_binarize():raise ValueError('y has 0 samples: %r' % y)
-
Line 123, col. 8 in
johnson_lindenstrauss_min_dim():raise ValueError('The JL bound is defined for eps in ]0, 1[, got %r' % eps) -
Line 127, col. 8 in
johnson_lindenstrauss_min_dim():raise ValueError( 'The JL bound is defined for n_samples greater than zero, got %r' % n_samples) -
Line 141, col. 8 in
_check_density():raise ValueError('Expected density in range ]0, 1], got: %r' % density)
-
Line 224, col. 12 in
LinearSVC():raise ValueError('Penalty term must be positive; got (C=%r)' % self.C) -
Line 412, col. 12 in
LinearSVR():raise ValueError('Penalty term must be positive; got (C=%r)' % self.C)
-
Line 167, col. 12 in
BaseLibSVM():raise ValueError( """sample_weight and X have incompatible shapes: %r vs %r Note: Sparse matrices cannot be indexed w/boolean masks (use `indices=True` in CV).""" % (sample_weight.shape, X.shape)) -
Line 465, col. 12 in
BaseLibSVM():raise ValueError('cannot use sparse input in %r trained on dense data' % type(self).__name__) -
Line 744, col. 8 in
_get_liblinear_solver_type():raise ValueError( '`multi_class` must be one of `ovr`, `crammer_singer`, got %r' % multi_class) -
Line 764, col. 4 in
_get_liblinear_solver_type():raise ValueError( 'Unsupported set of arguments: %s, Parameters: penalty=%r, loss=%r, dual=%r' % (error_string, penalty, loss, dual)) -
Line 870, col. 12 in
_fit_liblinear():raise ValueError( 'This solver needs samples of at least 2 classes in the data, but the data contains only one class: %r' % classes_[0]) -
Line 887, col. 12 in
_fit_liblinear():raise ValueError( 'Intercept scaling is %r but needs to be greater than 0. To disable fitting an intercept, set fit_intercept=False.' % intercept_scaling)
-
Line 56, col. 16 in
test_metaestimator_delegation():raise AttributeError('%r is hidden' % obj.hidden_method)
-
Line 244, col. 12 in
BaseDecisionTree():raise ValueError('max_leaf_nodes must be integral number but was %r' % max_leaf_nodes) -
Line 297, col. 12 in
BaseDecisionTree():raise ValueError("'presort' should be in {}. Got {!r} instead.".format( allowed_presort, self.presort))
-
Line 96, col. 8 in
unique_labels():raise ValueError('Unknown label type: %s' % repr(ys)) -
Line 171, col. 8 in
check_classification_targets():raise ValueError('Unknown label type: %r' % y_type) -
Line 242, col. 8 in
type_of_target():raise ValueError( 'Expected array-like (array or non-string sequence), got %r' % y) -
Line 262, col. 12 in
type_of_target():raise ValueError( 'You appear to be using a legacy multi-label data representation. Sequence of sequences are no longer supported; use a binary array or sparse matrix instead.' ) -
Line 314, col. 16 in
_check_partial_fit_first_call():raise ValueError( '`classes=%r` is not the same as on last call to partial_fit, was: %r' % (classes, clf.classes_))
-
Line 245, col. 8 in
resample():raise ValueError('Unexpected kw arguments: %r' % options.keys())
-
Line 102, col. 20 in
_AssertRaisesBaseContext():warnings.warn('%r is an invalid keyword argument for this function' % next( iter(kwargs)), DeprecationWarning, 3)
-
Line 61, col. 12 in
compute_class_weight():raise ValueError("class_weight must be dict, 'balanced', or None, got: %r" % class_weight)
-
Line 63, col. 12 in
_BaseComposition():raise ValueError('Names provided are not unique: {0!r}'.format(list(names))) -
Line 67, col. 12 in
_BaseComposition():raise ValueError('Estimator names conflict with constructor arguments: {0!r}' .format(sorted(invalid_names))) -
Line 71, col. 12 in
_BaseComposition():raise ValueError('Estimator names must not contain __: got {0!r}'.format( invalid_names))
-
Line 341, col. 12 in
_IgnoreWarnings():raise RuntimeError('Cannot enter %r twice' % self) -
Line 351, col. 12 in
_IgnoreWarnings():raise RuntimeError('Cannot exit %r without entering first' % self) -
Line 391, col. 12 in
assert_raise_message():raise AssertionError( 'Error message does not include the expected string: %r. Observed error message: %r' % (message, error_message)) -
Line 655, col. 12 in
all_estimators():raise ValueError( "Parameter type_filter must be 'classifier', 'regressor', 'transformer', 'cluster' or None, got %s." % repr(type_filter))
-
Line 141, col. 12 in
_num_samples():raise TypeError( 'Singleton array %r cannot be considered a valid collection.' % x) -
Line 226, col. 8 in
check_consistent_length():raise ValueError( 'Found input variables with inconsistent numbers of samples: %r' % [int (l) for l in lengths]) -
Line 492, col. 8 in
check_array():raise ValueError( 'force_all_finite should be a bool or "allow-nan". Got {!r} instead'. format(force_all_finite)) -
Line 569, col. 12 in
check_array():raise ValueError( 'Found array with %d sample(s) (shape=%s) while a minimum of %d is required%s.' % (n_samples, shape_repr, ensure_min_samples, context)) -
Line 577, col. 12 in
check_array():raise ValueError( 'Found array with %d feature(s) (shape=%s) while a minimum of %d is required%s.' % (n_features, shape_repr, ensure_min_features, context)) -
Line 803, col. 4 in
check_random_state():raise ValueError( '%r cannot be used to seed a numpy.random.RandomState instance' % seed)