import time start = time.time() import torch import torch.nn as nn import torch.optim as optim from torch.utils.data import DataLoader, Dataset import numpy as np import matplotlib.pyplot as plt import pandas as pd from PIL import Image models_num = -9 # we create 9 models bin_rate = 256 pixels = int(65536/bin_rate) dpi = 100 height = 5 width = 5 element = dpi*height*dpi*width epochs = 20 batch_size = 32 device = torch.device("cpu") #device = torch.device("cuda:0") lr = 0.001 weight_decay = 0.005 p_path = "../5_5_pictures_test/" component = 1 # for model-n, please revise component=n model_path = '../model4_train/model{}'.format(component) data = [] file_list = np.array([10,11]) # unknown spectra N_size = ["a","b","c"] # N=3 detail = str("Neg_sorted_bin{}_scaling_by_total".format(bin_rate)) log_file = "{}_pred.log".format(component) with open(log_file,"w") as f: f.write("initial OK \n") # read unknown spectra for i in file_list: for j in N_size: name = str("{}{}_{}".format(i,j,detail)) for k in np.arange(pixels): with Image.open(p_path+"{}_{}.jpg".format(name,k+1)) as f: im = np.array(f, dtype=int) im_bin = np.array(im[:,:,2] < 220,dtype=int) data.append(im_bin.flatten()) with open(log_file,"a") as f: f.write(str(i)+" is OK \n") data = np.array(data,dtype=int) for i in np.arange(1,10): target = pd.read_csv("../target_file/target_for_{}.csv".format(i), header=0).values target_list = [] for j in range(target.shape[0]): if target[j,0] in file_list: target_list.append(target[j,1]) target_list = np.repeat(target_list, pixels*len(N_size)) data = np.hstack((data,target_list.reshape(-1,1))) data = np.array(data,dtype=int) with open(log_file,"a") as f: f.write("data shape: "+str(data.shape[0])+"x"+str(data.shape[1])+"\n") f.write("Read Over \n") def to_categorical(y, num_classes): return np.eye(num_classes, dtype='uint8')[y] y_target = data[:,models_num+component-1] y_target = to_categorical(y_target,2) data = data[:,:models_num] data = np.array(data.reshape(-1,1,dpi*height,dpi*height),dtype=int) class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.relu = nn.ReLU() self.softmax = nn.Softmax(dim=1) self.pool = nn.MaxPool2d(2, # kernel size stride=2, # Default value is kernel_size ) self.conv1 = torch.nn.Conv2d(1, # channel input 20, # channel output 5, # kernel size padding = 'same', ) self.conv2 = nn.Conv2d(20,50,5,padding='same',) self.fc1 = nn.Linear(781250, # input vector dimension (125*125*50) 500, # output vector dimension ) self.fc2 = nn.Linear(500, 2) def forward(self, x): x = self.conv1(x) x = self.relu(x) x = self.pool(x) x = self.conv2(x) x = self.relu(x) x = self.pool(x) x = x.view(x.size()[0], -1) # flatten x = self.fc1(x) x = self.relu(x) x = self.fc2(x) x = self.softmax(x) return x # download the trained model net = Net() net = net.to(device) criterion = nn.BCELoss() # Binary Cross Entropy optimizer = optim.Adam(net.parameters(), lr=lr, weight_decay=weight_decay) net.load_state_dict(torch.load(model_path)) print(net) print(criterion) print(optimizer) class Pred_dataset(Dataset): def __init__(self,X,Y): self.data = X self.labels = Y def __getitem__(self, index): return self.data[index], self.labels[index] def __len__(self): return len(self.data) # predict the unknown spectra sample_list = [] loss_list = [] acc_list = [] plt.figure(figsize=(6*len(N_size),5*len(file_list))) for i in range(len(file_list)): for j in range(len(N_size)): sample_list.append("{}{}".format(file_list[i],N_size[j])) X_data_i = np.array(data[pixels*(3*i+j):pixels*(3*i+j+1)]) y_target_i = y_target[pixels*(3*i+j):pixels*(3*i+j+1)] pred_dataset = Pred_dataset(X=X_data_i,Y=y_target_i) pred_loader = DataLoader(pred_dataset, batch_size=batch_size,shuffle=False) sum_loss = 0.0 sum_correct = 0 sum_total = 0 for (inputs, labels) in pred_loader: inputs, labels = inputs.to(device), labels.to(device) inputs = inputs.float() labels = labels.float() optimizer.zero_grad() outputs = net(inputs) loss = criterion(outputs, labels) sum_loss += loss.item() _, predicted = outputs.max(1) sum_total += labels.size(0) new_labels = labels.argmax(dim=1) sum_correct += (predicted == new_labels).sum().item() print("{}{} pred mean loss={}, accuracy={}".format(file_list[i],N_size[j],sum_loss*batch_size/len(pred_loader.dataset), float(sum_correct/sum_total))) loss_list.append(sum_loss*batch_size/len(pred_loader.dataset)) acc_list.append(float(sum_correct/sum_total)) with open(log_file,"a") as f: f.write("pred accuracy="+str(i)+str(j)+" "+str(float(sum_correct/sum_total))+"\n") # output output = pd.DataFrame() output["sample"] = sample_list output["loss"] = loss_list output["accuracy"] = acc_list output.to_csv("./{}_predict_result.csv".format(component),index=False) elapsed_time = time.time() - start print ("elapsed_time:{0}".format(elapsed_time) + "[sec]")