问题
Similar SO questions:
- Python Catboost: Multiclass F1 score custom metric
Catboost tutorials
- https://catboost.ai/docs/concepts/python-usages-examples.html#user-defined-loss-function
Question
In this question, I have a binary classification problem. After modelling we get the test model predictions y_pred
and we already have true test labels y_true
.
I would like to get the custom evaluation metric defined by following equation:
profit = 400 * truePositive - 200*fasleNegative - 100*falsePositive
Also, since higher profit is better I would like to maximize the function instead of minimize it.
How to get this eval_metric in catboost?
Using sklearn
def get_profit(y_true, y_pred):
tn, fp, fn, tp = sklearn.metrics.confusion_matrix(y_true,y_pred).ravel()
loss = 400*tp - 200*fn - 100*fp
return loss
scoring = sklearn.metrics.make_scorer(get_profit, greater_is_better=True)
Using catboost
class ProfitMetric(object):
def get_final_error(self, error, weight):
return error / (weight + 1e-38)
def is_max_optimal(self):
return True
def evaluate(self, approxes, target, weight):
assert len(approxes) == 1
assert len(target) == len(approxes[0])
approx = approxes[0]
error_sum = 0.0
weight_sum = 0.0
** I don't know here**
return error_sum, weight_sum
Question
How to complete the custom eval metric in catboost?
UPDATE
My update so far
import numpy as np
import pandas as pd
import seaborn as sns
import sklearn
from catboost import CatBoostClassifier
from sklearn.model_selection import train_test_split
def get_profit(y_true, y_pred):
tn, fp, fn, tp = sklearn.metrics.confusion_matrix(y_true,y_pred).ravel()
profit = 400*tp - 200*fn - 100*fp
return profit
class ProfitMetric:
def is_max_optimal(self):
return True # greater is better
def evaluate(self, approxes, target, weight):
assert len(approxes) == 1
assert len(target) == len(approxes[0])
approx = approxes[0]
y_pred = np.rint(approx)
y_true = np.array(target).astype(int)
output_weight = 1 # weight is not used
score = get_profit(y_true, y_pred)
return score, output_weight
def get_final_error(self, error, weight):
return error
df = sns.load_dataset('titanic')
X = df[['survived','pclass','age','sibsp','fare']]
y = X.pop('survived')
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=100)
model = CatBoostClassifier(metric_period=50,
n_estimators=200,
eval_metric=ProfitMetric()
)
model.fit(X, y, eval_set=(X_test, y_test)) # this fails
回答1:
The main difference from yours is:
@staticmethod
def get_profit(y_true, y_pred):
y_pred = expit(y_pred).astype(int)
y_true = y_true.astype(int)
#print("ACCURACY:",(y_pred==y_true).mean())
tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel()
loss = 400*tp - 200*fn - 100*fp
return loss
It's not obvious from the example you linked what are the predictions, but after inspecting it turns out catboost
treats predictions internally as raw log-odds (hat tip @Ben). So, to properly use confusion_matrix
you need to make it sure both y_true
and y_pred
are integer class labels. This is done via:
y_pred = scipy.special.expit(y_pred)
y_true = y_true.astype(int)
So the full working code is:
import seaborn as sns
from catboost import CatBoostClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from scipy.special import expit
df = sns.load_dataset('titanic')
X = df[['survived','pclass','age','sibsp','fare']]
y = X.pop('survived')
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=100)
class ProfitMetric:
@staticmethod
def get_profit(y_true, y_pred):
y_pred = expit(y_pred).astype(int)
y_true = y_true.astype(int)
#print("ACCURACY:",(y_pred==y_true).mean())
tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel()
loss = 400*tp - 200*fn - 100*fp
return loss
def is_max_optimal(self):
return True # greater is better
def evaluate(self, approxes, target, weight):
assert len(approxes) == 1
assert len(target) == len(approxes[0])
y_true = np.array(target).astype(int)
approx = approxes[0]
score = self.get_profit(y_true, approx)
return score, 1
def get_final_error(self, error, weight):
return error
model = CatBoostClassifier(metric_period=50,
n_estimators=200,
eval_metric=ProfitMetric()
)
model.fit(X, y, eval_set=(X_test, y_test))
回答2:
By way of example, I implemented a very simple metric.
It counts the number of times y_pred != y_true in a multi-class classifier.
class CountErrors:
'''Count of wrong predictions'''
def is_max_optimal(self):
False
def evaluate(self, approxes, target, weight):
y_pred = np.array(approxes).argmax(0)
y_true = np.array(target)
return sum(y_pred!=y_true), 1
def get_final_error(self, error, weight):
return error
You can see it used if you run this code:
import numpy as np
import pandas as pd
from catboost import CatBoostClassifier
from sklearn.model_selection import train_test_split
class CountErrors:
'''Count number of wrong predictions'''
def is_max_optimal(self):
False # Lower is better
def evaluate(self, approxes, target, weight):
y_pred = np.array(approxes).argmax(0)
y_true = np.array(target)
return sum(y_pred!=y_true), 1
def get_final_error(self, error, weight):
return error
df = pd.read_csv('https://raw.githubusercontent.com/mkleinbort/resource-datasets/master/abalone/abalone.csv')
y = df['sex']
X = df.drop(columns=['sex'])
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=12)
model = CatBoostClassifier(metric_period=50, n_estimators=200, eval_metric=CountErrors())
model.fit(X, y, eval_set=(X_test, y_test))
Hope you can adapt this to your use-case.
来源:https://stackoverflow.com/questions/65462220/how-to-create-custom-eval-metric-for-catboost