Spaces:
Sleeping
Sleeping
| from load_model import extract_sel_mean_std_bias_assignemnt | |
| from pathlib import Path | |
| from architectures.model_mapping import get_model | |
| from configs.dataset_params import dataset_constants | |
| import torch | |
| import torchvision.transforms as transforms | |
| import pandas as pd | |
| import cv2 | |
| import numpy as np | |
| from PIL import Image | |
| from get_data import get_augmentation | |
| from configs.dataset_params import normalize_params | |
| import random | |
| from evaluation.diversity import MultiKCrossChannelMaxPooledSum | |
| from visualization import filter_with_diversity | |
| def select_with_diversity(dataset="CUB2011", arch="resnet50",seed=123456, model_type="qsenn", n_features = 50, n_per_class=5, img_size=448, reduced_strides=False, folder = None): | |
| n_classes = dataset_constants[dataset]["num_classes"] | |
| model = get_model(arch, n_classes, reduced_strides) | |
| if folder is None: | |
| folder = Path.home() / f"tmp/{arch}/{dataset}/{seed}/" | |
| print(folder) | |
| model.load_state_dict(torch.load(folder / "Trained_DenseModel.pth"))#REMOVE | |
| state_dict = torch.load(folder / f"{model_type}_{n_features}_{n_per_class}_FinetunedModel.pth") | |
| selection= torch.load(folder / f"SlDD_Selection_50.pt") | |
| state_dict['linear.selection']=selection | |
| feature_sel, sparse_layer, current_mean, current_std, bias_sparse = extract_sel_mean_std_bias_assignemnt(state_dict) | |
| model.set_model_sldd(feature_sel, sparse_layer, current_mean, current_std, bias_sparse) | |
| model.load_state_dict(state_dict) | |
| W=model.linear.layer.weight | |
| TR=get_augmentation(0.1, img_size, False, False, True, True, normalize_params["CUB2011"]) | |
| device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
| model = model.to(device) | |
| model.eval() | |
| #get name list and label | |
| data_dir=Path.home()/"tmp/Datasets/CUB200/CUB_200_2011/" | |
| labels = pd.read_csv(data_dir/"image_class_labels.txt", sep=' ', names=['img_id', 'target']) | |
| namelist=pd.read_csv(data_dir/"images.txt",sep=' ',names=['img_id','file_name']) | |
| # classlist=pd.read_csv(data_dir/"classes.txt",sep=' ',names=['cl_id','class_name']) | |
| # targets=labels.loc[labels['img_id']==i,'target'].values[0] | |
| Label_txt = pd.DataFrame({'img_id': pd.Series(dtype='int'), 'target': pd.Series(dtype='str')}) | |
| with torch.no_grad(): | |
| for t in range(1, 201): | |
| print("in class:",t) | |
| img_list=labels[labels['target']==t] | |
| l=t-1 | |
| weights=W[l,:] | |
| k = (weights > 0).sum().item() | |
| imgid_diver=[] | |
| for i in img_list['img_id']: | |
| filename=namelist.loc[namelist['img_id']==i,'file_name'].values[0] | |
| img=cv2.imread(data_dir/f"images/{filename}") | |
| img=cv2.cvtColor(img, cv2.COLOR_BGR2RGB) | |
| img=Image.fromarray(img) | |
| img=TR(img) | |
| img=img.unsqueeze(0) | |
| img=img.to(device) | |
| output, featuremaps =model(img,with_feature_maps=True,with_final_features=False) | |
| #div calculate | |
| localizer = MultiKCrossChannelMaxPooledSum(range(1, k+1), W, None) | |
| localizer(output.to("cuda"),featuremaps.to("cuda")) | |
| locality, exlusive_locality = localizer.get_result() | |
| diversity = locality[k-1] | |
| diversity=diversity.item() | |
| imgid_diver.append((i,diversity)) | |
| top_k = sorted(imgid_diver, key=lambda x: x[1], reverse=True)[:4] | |
| top_k_imgids = [imgid for imgid, div in top_k] | |
| t_list = [t] * len(top_k_imgids) | |
| new_data = pd.DataFrame({'img_id': top_k_imgids, 'target': t_list}) | |
| Label_txt = pd.concat([Label_txt, new_data], ignore_index=True) | |
| Label_txt.to_csv('image_class_labels.txt', sep=' ', index=False, header=False) | |
| select_with_diversity(dataset="CUB2011", arch="resnet50",seed=123456, model_type="qsenn", n_features = 50, n_per_class=5, img_size=448, reduced_strides=False, folder = None) | |