from abc import ABC, abstractmethod
import copy
import math
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import argparse
import os
import random
import shutil
import time
import torch.utils.data as data
import sys
import pickle
import logging
from tqdm import tqdm
sys.path.append("..")
from helpers.utils import *
from helpers.metrics import *
[docs]class BaseMethod(ABC):
"""Abstract method for learning to defer methods"""
@abstractmethod
def __init__(self, *args, **kwargs):
pass
[docs] @abstractmethod
def fit(self, *args, **kwargs):
"""this function should fit the model and be enough to evaluate the model"""
pass
[docs] def fit_hyperparam(self, *args, **kwargs):
"""This is an optional method that fits and optimizes hyperparameters over a validation set"""
return self.fit(*args, **kwargs)
[docs] @abstractmethod
def test(self, dataloader):
"""this function should return a dict with the following keys:
'defers': deferred binary predictions
'preds': classifier predictions
'labels': labels
'hum_preds': human predictions
'rej_score': a real score for the rejector, the higher the more likely to be rejected
'class_probs': probability of the classifier for each class (can be scores as well)
"""
pass
[docs]class BaseSurrogateMethod(BaseMethod):
"""Abstract method for learning to defer methods based on a surrogate model"""
def __init__(self, alpha, plotting_interval, model, device):
self.alpha = alpha
self.plotting_interval = plotting_interval
self.model = model
self.device = device
[docs] @abstractmethod
def surrogate_loss_function(self, outputs, hum_preds, data_y):
"""surrogate loss function"""
pass
[docs] def fit_epoch(self, dataloader, optimizer, verbose=False, epoch=1):
"""
Fit the model for one epoch
model: model to be trained
dataloader: dataloader
optimizer: optimizer
verbose: print loss
epoch: epoch number
"""
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
end = time.time()
self.model.train()
for batch, (data_x, data_y, hum_preds) in enumerate(dataloader):
data_x = data_x.to(self.device)
data_y = data_y.to(self.device)
hum_preds = hum_preds.to(self.device)
outputs = self.model(data_x)
loss = self.surrogate_loss_function(outputs, hum_preds, data_y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
prec1 = accuracy(outputs.data, data_y, topk=(1,))[0]
losses.update(loss.data.item(), data_x.size(0))
top1.update(prec1.item(), data_x.size(0))
batch_time.update(time.time() - end)
end = time.time()
if torch.isnan(loss):
print("Nan loss")
logging.warning(f"NAN LOSS")
break
if verbose and batch % self.plotting_interval == 0:
logging.info(
"Epoch: [{0}][{1}/{2}]\t"
"Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t"
"Loss {loss.val:.4f} ({loss.avg:.4f})\t"
"Prec@1 {top1.val:.3f} ({top1.avg:.3f})".format(
epoch,
batch,
len(dataloader),
batch_time=batch_time,
loss=losses,
top1=top1,
)
)
[docs] def fit(
self,
dataloader_train,
dataloader_val,
dataloader_test,
epochs,
optimizer,
lr,
scheduler=None,
verbose=True,
test_interval=5,
):
optimizer = optimizer(self.model.parameters(), lr=lr)
if scheduler is not None:
scheduler = scheduler(optimizer, len(dataloader_train) * epochs)
for epoch in tqdm(range(epochs)):
self.fit_epoch(dataloader_train, optimizer, verbose, epoch)
if verbose and epoch % test_interval == 0:
data_test = self.test(dataloader_val)
logging.info(compute_deferral_metrics(data_test))
if scheduler is not None:
scheduler.step()
final_test = self.test(dataloader_test)
return compute_deferral_metrics(final_test)
[docs] def test(self, dataloader):
"""
Test the model
dataloader: dataloader
"""
defers_all = []
truths_all = []
hum_preds_all = []
predictions_all = [] # classifier only
rej_score_all = [] # rejector probability
class_probs_all = [] # classifier probability
self.model.eval()
with torch.no_grad():
for batch, (data_x, data_y, hum_preds) in enumerate(dataloader):
data_x = data_x.to(self.device)
data_y = data_y.to(self.device)
hum_preds = hum_preds.to(self.device)
outputs = self.model(data_x)
outputs_class = F.softmax(outputs[:, :-1], dim=1)
outputs = F.softmax(outputs, dim=1)
_, predicted = torch.max(outputs.data, 1)
max_probs, predicted_class = torch.max(outputs.data[:, :-1], 1)
predictions_all.extend(predicted_class.cpu().numpy())
defers_all.extend(
(predicted.cpu().numpy() == len(outputs.data[0]) - 1).astype(int)
)
truths_all.extend(data_y.cpu().numpy())
hum_preds_all.extend(hum_preds.cpu().numpy())
for i in range(len(outputs.data)):
rej_score_all.append(
outputs.data[i][-1].item()
- outputs.data[i][predicted_class[i]].item()
)
class_probs_all.extend(outputs_class.cpu().numpy())
# convert to numpy
defers_all = np.array(defers_all)
truths_all = np.array(truths_all)
hum_preds_all = np.array(hum_preds_all)
predictions_all = np.array(predictions_all)
rej_score_all = np.array(rej_score_all)
class_probs_all = np.array(class_probs_all)
data = {
"defers": defers_all,
"labels": truths_all,
"hum_preds": hum_preds_all,
"preds": predictions_all,
"rej_score": rej_score_all,
"class_probs": class_probs_all,
}
return data