1414import logging
1515import functools
1616
17+ from inspect import getcallargs
18+
1719import numpy as np
1820
1921from sklearn .metrics .classification import (_check_targets , _prf_divide ,
2224from sklearn .utils .fixes import bincount
2325from sklearn .utils .multiclass import unique_labels
2426
27+ try :
28+ from inspect import signature
29+ except ImportError :
30+ from sklearn .externals .funcsigs import signature
31+
32+
2533LOGGER = logging .getLogger (__name__ )
2634
2735
@@ -563,10 +571,10 @@ def geometric_mean_score(y_true,
563571
564572
565573def make_index_balanced_accuracy (alpha = 0.1 , squared = True ):
566- """Balance any scoring function using the indexed balanced accuracy
574+ """Balance any scoring function using the index balanced accuracy
567575
568576 This factory function wraps scoring function to express it as the
569- indexed balanced accuracy (IBA). You need to use this function to
577+ index balanced accuracy (IBA). You need to use this function to
570578 decorate any scoring function.
571579
572580 Parameters
@@ -582,7 +590,7 @@ def make_index_balanced_accuracy(alpha=0.1, squared=True):
582590 -------
583591 iba_scoring_func : callable,
584592 Returns the scoring metric decorated which will automatically compute
585- the indexed balanced accuracy.
593+ the index balanced accuracy.
586594
587595 Examples
588596 --------
@@ -603,21 +611,16 @@ def compute_score(*args, **kwargs):
603611 # Square if desired
604612 if squared :
605613 _score = np .power (_score , 2 )
606- # args will contain the y_pred and y_true
607- # kwargs will contain the other parameters
608- labels = kwargs .get ('labels' , None )
609- pos_label = kwargs .get ('pos_label' , 1 )
610- average = kwargs .get ('average' , 'binary' )
611- sample_weight = kwargs .get ('sample_weight' , None )
612- # Compute the sensitivity and specificity
613- dict_sen_spe = {
614- 'labels' : labels ,
615- 'pos_label' : pos_label ,
616- 'average' : average ,
617- 'sample_weight' : sample_weight
618- }
619- sen , spe , _ = sensitivity_specificity_support (* args ,
620- ** dict_sen_spe )
614+ # Create the list of tags
615+ tags_scoring_func = getcallargs (scoring_func , * args , ** kwargs )
616+ # Get the signature of the sens/spec function
617+ sens_spec_sig = signature (sensitivity_specificity_support )
618+ # Filter the inputs required by the sens/spec function
619+ tags_sens_spec = sens_spec_sig .bind (** tags_scoring_func )
620+ # Call the sens/spec function
621+ sen , spe , _ = sensitivity_specificity_support (
622+ * tags_sens_spec .args ,
623+ ** tags_sens_spec .kwargs )
621624 # Compute the dominance
622625 dom = sen - spe
623626 return (1. + alpha * dom ) * _score
@@ -640,7 +643,7 @@ def classification_report_imbalanced(y_true,
640643 Specific metrics have been proposed to evaluate the classification
641644 performed on imbalanced dataset. This report compiles the
642645 state-of-the-art metrics: precision/recall/specificity, geometric
643- mean, and indexed balanced accuracy of the
646+ mean, and index balanced accuracy of the
644647 geometric mean.
645648
646649 Parameters
@@ -674,7 +677,7 @@ def classification_report_imbalanced(y_true,
674677 -------
675678 report : string
676679 Text summary of the precision, recall, specificity, geometric mean,
677- and indexed balanced accuracy.
680+ and index balanced accuracy.
678681
679682 Examples
680683 --------
@@ -746,7 +749,7 @@ class 2 1.00 0.67 1.00 0.80 0.82 0.69\
746749 labels = labels ,
747750 average = None ,
748751 sample_weight = sample_weight )
749- # Indexed balanced accuracy
752+ # Index balanced accuracy
750753 iba_gmean = make_index_balanced_accuracy (
751754 alpha = alpha , squared = True )(geometric_mean_score )
752755 iba = iba_gmean (
0 commit comments