# illustrate per class analysis using scikit # sergey v from sklearn.metrics import precision_recall_curve from sklearn.metrics import average_precision_score from sklearn.ensemble import ExtraTreesClassifier etc = ExtraTreesClassifier() etc.fit(X_train, y_train); y_pred = etc.predict(X_test); print("tree test set score: {:.2f}".format(np.mean(y_pred == y_test))) print("Actual labels:\n {}".format(y_test.values)) print("tree predictions:\n {}".format(y_pred)) # define # of classes n_classes = 6; # one hot encode y_test and y_pred y_test_orig=y_test.values y_test=np.zeros((len(y_test_orig),n_classes)) for ii in range(len(y_test_orig)): y_test[ii,y_test_orig[ii]-1]=1 y_pred_orig=y_pred y_pred=np.zeros((len(y_pred_orig),n_classes)) for ii in range(len(y_pred_orig)): y_pred[ii,y_pred_orig[ii]-1]=1 Y_test = y_test; y_score = y_pred; # loop over each class precision = dict() recall = dict() average_precision = dict() for i in range(n_classes): precision[i], recall[i], _ = precision_recall_curve(Y_test[:, i], y_score[:, i]) average_precision[i] = average_precision_score(Y_test[:, i], y_score[:, i]) # A "micro-average": quantifying score on all classes jointly precision["micro"], recall["micro"], _ = precision_recall_curve(Y_test.ravel(), y_score.ravel()) average_precision["micro"] = average_precision_score(Y_test, y_score, average="micro") print('Average precision score, micro-averaged over all classes: {0:0.2f}' .format(average_precision["micro"]))