网络搭建:
mynn.py:
import torchfrom torch import nnclass mynn(nn.Module): def __init__(self): super(mynn, self).__init__() self.layer1 = nn.Sequential( nn.Linear(3520, 4096), nn.BatchNorm1d(4096), nn.ReLU(True) ) self.layer2 = nn.Sequential( nn.Linear(4096, 4096), nn.BatchNorm1d(4096), nn.ReLU(True) ) self.layer3 = nn.Sequential( nn.Linear(4096, 4096), nn.BatchNorm1d(4096), nn.ReLU(True) ) self.layer4 = nn.Sequential( nn.Linear(4096, 4096), nn.BatchNorm1d(4096), nn.ReLU(True) ) self.layer5 = nn.Sequential( nn.Linear(4096, 3072), nn.BatchNorm1d(3072), nn.ReLU(True) ) self.layer6 = nn.Sequential( nn.Linear(3072, 2048), nn.BatchNorm1d(2048), nn.ReLU(True) ) self.layer7 = nn.Sequential( nn.Linear(2048, 1024), nn.BatchNorm1d(1024), nn.ReLU(True) ) self.layer8 = nn.Sequential( nn.Linear(1024, 256), nn.BatchNorm1d(256), nn.ReLU(True) ) self.layer9 = nn.Sequential( nn.Linear(256, 64), nn.BatchNorm1d(64), nn.ReLU(True) ) self.layer10 = nn.Sequential( nn.Linear(64, 32), nn.BatchNorm1d(32), nn.ReLU(True) ) self.layer11 = nn.Sequential( nn.Linear(32, 3) ) def forward(self, x): x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) x = self.layer5(x) x = self.layer6(x) x = self.layer7(x) x = self.layer8(x) x = self.layer9(x) x = self.layer10(x) x = self.layer11(x) return xDataset重定义:mydataset.py
import osfrom torch.utils import dataimport numpy as npfrom astropy.io import fitsfrom torchvision import transforms as Tfrom PIL import Imageimport pandas as pdclass mydataset(data.Dataset): def __init__(self,csv_file,root_dir=None,transform=None): self.landmarks_frame=np.loadtxt(open(csv_file,"rb"),delimiter=",") #landmarks_frame是一个numpy矩阵 self.root_dir=root_dir self.transform=transform def __len__(self): return len(self.landmarks_frame) def __getitem__(self, idx): lfit=self.landmarks_frame[idx,:] lable=lfit[len(lfit)-1] datafit=lfit[0:(len(lfit)-1)] return lable,datafit主程序:main.py
import torchfrom torch import nn, optimfrom torchvision import datasets, transformsfrom torch.autograd import Variable#from models import Mynet, my_AlexNet, my_VGGfrom sdata import mydatasetimport timeimport numpy as npfrom model import mynnif __name__ == '__main__': #如果Dataloader开启num_workers > 0 必须要在'__main__'下才能消除报错 data_train = mydataset.mydataset(csv_file="G:\\DATA\\train.csv",root_dir=None,transform=None) #data_test = mydataset(test=True) data_test = mydataset.mydataset(csv_file="G:\\DATA\\test.csv", root_dir=None, transform=None) data_loader_train = torch.utils.data.DataLoader(dataset=data_train, batch_size=256, shuffle=True, num_workers=0, pin_memory=True) data_loader_test = torch.utils.data.DataLoader(dataset=data_test, batch_size=256, shuffle=True, num_workers=0, pin_memory=True) print("**dataloader done**") model = mynn.mynn() if torch.cuda.is_available(): #model = model.cuda() model.to(torch.device('cuda')) #损失函数 criterion = nn.CrossEntropyLoss() #optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9) #优化算法 optimizer = optim.Adam(model.parameters(), lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=1e-4) n_epochs = 1000 global_train_acc = [] s_time = time.time() for epoch in range(n_epochs): running_loss = 0.0 running_correct = 0.0 print('Epoch {}/{}'.format(epoch, n_epochs)) for label,datafit in data_loader_train: x_train, y_train = datafit,label #x_train, y_train = Variable(x_train.cuda()), Variable(y_train.cuda()) x_train, y_train = x_train.to(torch.device('cuda')), y_train.to(torch.device('cuda')) x_train=x_train.float() y_train=y_train.long() #x_train, y_train = Variable(x_train), Variable(y_train) outputs = model(x_train) _, pred = torch.max(outputs.data, 1) optimizer.zero_grad() loss = criterion(outputs, y_train) loss.backward() optimizer.step() running_loss += loss.item() running_correct += torch.sum(pred == y_train.data) testing_correct = 0.0 for label,datafit in data_loader_test: x_test, y_test = datafit,label x_test=x_test.float() y_test=y_test.long() x_test, y_test = Variable(x_test.cuda()), Variable(y_test.cuda()) # x_test, y_test = Variable(x_test), Variable(y_test) outputs = model(x_test) _, pred = torch.max(outputs.data, 1) testing_correct += torch.sum(pred == y_test.data) print('Loss is:{:.4f}, Train Accuracy is:{:.4f}%, Test Accuracy ' 'is:{:.4f}'.format(running_loss / len(data_train), 100 * running_correct / len(data_train), 100 * testing_correct / len(data_test))) e_time = time.time() print('time_run is :', e_time - s_time) print('*******done******')将天文数据写入csv中:main.py
# -*- coding: utf-8 -*-"""Spyder EditorThis is a temporary script file."""import matplotlib.pyplot as pltfrom astropy.io import fitsimport osimport matplotlibmatplotlib.use('Qt5Agg')from astropy.io import fitsimport numpy as npfrom sklearn.model_selection import train_test_splitfrom sklearn import svmfrom sklearn.decomposition import PCAdef getData(fitPath,cla): fileList=[] #所有.fit文件 files=os.listdir(fitPath) #返回一个列表,其中包含在目录条目的名称 y=[] for f in files: if os.path.isfile(fitPath+'/'+f) and f[-4:-1]==".fi": fileList.append(fitPath+'/'+f) #添加文件 len=90000 x=np.ones(3521) num=1 for path in fileList: f = fits.open(path) header = f[0].header # fit文件中的各种标识 SPEC_CLN = header['SPEC_CLN'] SN_G = header['SN_G'] NAXIS1 = header['NAXIS1'] # 光谱数据维度 COEFF0 = header['COEFF0'] COEFF1 = header['COEFF1'] wave = np.ones(NAXIS1) # 光谱图像中的横坐标 for i in range(NAXIS1): wave[i] = i logwavelength = COEFF0 + wave * COEFF1 for i in range(NAXIS1): wave[i] = 10 ** logwavelength[i] min=0 for i in range(NAXIS1-1): if wave[i]<=4000 and wave[i+1]>=4000: min=i spec = f[0].data[0, :] # 光谱数据 fit中的第一行数据 spec=spec[min:min+3521] spec=np.array(spec) spec[3520]=cla if num==1: x=spec num=2 else: x=np.row_stack((x,spec)) #np.savetxt(csvPath,x, delimiter=',') return xif __name__ == '__main__': x=getData("G:\DATA\STAR",0) x_train,x_test=train_test_split(x,test_size=0.1 ,random_state=0) y=getData("G:\DATA\QSO",1) y_train, y_test = train_test_split(y, test_size=0.1, random_state=0) x_train = np.row_stack((x_train,y_train)) x_test=np.row_stack((x_test,y_test)) z=getData("G:\DATA\GALAXY",2) z_train, z_test = train_test_split(z, test_size=0.1, random_state=0) x_train=np.row_stack((x_train,z_train)) x_test = np.row_stack((x_test,z_test)) np.savetxt("G:\\DATA\\train.csv",x_train, delimiter=',') np.savetxt("G:\\DATA\\test.csv", x_test, delimiter=',')
来源:博客园
作者:invisible_man
链接:https://www.cnblogs.com/invisible2/p/11523330.html