【UFLDL】多层神经网络的python实现源码

上周写完了该代码,但是由于没有注意到softmax相关的实现故结果不对,更正后可以得到正确结果,用200幅图片迭代200次可以得到90%以上的正确率,参数设置还有待于优化,另外可以考虑用多线程加速,此处目前还有问题(有待于修改,,慎用)。

推导请参考之前的文章。

用MSE作为目标函数,也可以得到很好的结果,只是需要迭代的次数较多,400幅图片,迭代2000次,训练的正确率是96.75%。

#coding=utf-8'''Created on 20141115@author: wangshuai13'''import numpy#import matplotlib.pyplot as pltimport structimport mathimport randomimport timeimport threading class MyThread(threading.Thread):def __init__(self,threadname,tANN,idx_start,idx_end):threading.Thread.__init__(self,name=threadname)self.ANN=tANNself.idx_start=idx_startself.idx_end=idx_enddef run(self):cDetaW,cDetaB,cError=self.ANN.backwardPropogation(self.ANN.traindata[self.idx_start],0)for idx in range(self.idx_start+1,self.idx_end):DetaWtemp,DetaBtemp,Errortemp=self.ANN.backwardPropogation(self.ANN.traindata[idx],idx)cError += Errortemp#cDetaW += DetaWtemp#cDetaB += DetaBtempfor idx_W in range(0,len(cDetaW)):cDetaW[idx_W] += DetaWtemp[idx_W]for idx_B in range(0,len(cDetaB)):cDetaB[idx_B] += DetaBtemp[idx_B]return cDetaW,cDetaB,cErrordef sigmoid(inX):return 1.0/(1.0+math.exp(-inX))def softmax(inMatrix):m,n=numpy.shape(inMatrix)outMatrix=numpy.mat(numpy.zeros((m,n)))soft_sum=0for idx in range(0,n):outMatrix[0,idx] = math.exp(inMatrix[0,idx])soft_sum += outMatrix[0,idx]for idx in range(0,n):outMatrix[0,idx] /= soft_sumreturn outMatrixdef tangenth(inX):return (1.0*math.exp(inX)-1.0*math.exp(-inX))/(1.0*math.exp(inX)+1.0*math.exp(-inX))def difsigmoid(inX):return sigmoid(inX)*(1.0-sigmoid(inX))def sigmoidMatrix(inputMatrix):m,n=numpy.shape(inputMatrix)outMatrix=numpy.mat(numpy.zeros((m,n)))for idx_m in range(0,m):for idx_n in range(0,n):outMatrix[idx_m,idx_n]=sigmoid(inputMatrix[idx_m,idx_n])return outMatrixdef loadMNISTimage(absFilePathandName,datanum=60000):images=open(absFilePathandName,'rb')buf=images.read()index=0magic, numImages , numRows , numColumns = struct.unpack_from('>IIII' , buf , index)print magic, numImages , numRows , numColumnsindex += struct.calcsize('>IIII')if magic != 2051:raise Exceptiondatasize=int(784*datanum)datablock=">"+str(datasize)+"B"#nextmatrix=struct.unpack_from('>47040000B' ,buf, index)nextmatrix=struct.unpack_from(datablock ,buf, index)nextmatrix=numpy.array(nextmatrix)/255.0#nextmatrix=nextmatrix.reshape(numImages,numRows,numColumns)nextmatrix=nextmatrix.reshape(datanum,1,numRows*numColumns)#for idx in range(0,numImages):# test=nextmatrix[idx,:,:]# print idx,numpy.shape(test)#im = struct.unpack_from('>784B' ,buf, index)#move=struct.calcsize('>784B')#print move#index += struct.calcsize('>784B')#im=numpy.array(im)#im = im.reshape(14,56)#row,col=numpy.shape(im)#print row,col#fig = plt.figure()#plotwindow = fig.add_subplot(111)#plt.imshow(im , cmap='gray')#plt.show()#nextsum=59999*28*28#print nextsum#nextmatrix=struct.unpack_from('>47039216B' ,buf, index)#nextmatrix=numpy.array(nextmatrix)#nextmatrix=nextmatrix.reshape(59999,28,28)#for idx in range(1,59999):#temp=nextmatrix[idx,:,:]#plt.imshow(temp,cmap='gray')#plt.show()#print temp#print next#for lines in images.readlines():#print type(lines),linesreturn nextmatrix, numImagesdef loadMNISTlabels(absFilePathandName,datanum=60000):labels=open(absFilePathandName,'rb')buf=labels.read()index=0magic, numLabels = struct.unpack_from('>II' , buf , index)print magic, numLabelsindex += struct.calcsize('>II')if magic != 2049:raise Exceptiondatablock=">"+str(datanum)+"B"#nextmatrix=struct.unpack_from('>60000B' ,buf, index)nextmatrix=struct.unpack_from(datablock ,buf, index)nextmatrix=numpy.array(nextmatrix)#for idx in range(0,numLabels):# test=nextmatrix[idx]# print idx,type(test),testreturn nextmatrix, numLabelsclass MuiltilayerANN(object):#NumofNodesinHiddenlayers should be s list of intdef __init__(self,NumofHiddenLayers,NumofNodesinHiddenlayers,inputDimension,outputDimension=1,maxIter=50):self.trainDataNum=200self.decayRate=0.2self.punishFactor=0.05self.eps=0.00001self.numofhl=NumofHiddenLayersself.Nl=int(NumofHiddenLayers+2)self.NodesinHidden=[]for element in NumofNodesinHiddenlayers:self.NodesinHidden.append(int(element))#self.B=[]self.inputDi=int(inputDimension)self.outputDi=int(outputDimension)self.maxIteration=int(maxIter)def setTrainDataNum(self,datanum):self.trainDataNum=datanumreturndef loadtraindata(self,absFilePathandName):self.traindata,self.TotalnumoftrainData=loadMNISTimage(absFilePathandName,self.trainDataNum)#print self.traindata[1]returndef loadtrainlabel(self,absFilePathandName):self.trainlabel,self.TotalnumofTrainLabels=loadMNISTlabels(absFilePathandName,self.trainDataNum)if self.TotalnumofTrainLabels != self.TotalnumoftrainData:raise Exceptionreturndef initialweights(self):#initial matrix#nodesinLayers is a listself.nodesinLayers=[]self.nodesinLayers.append(int(self.inputDi))self.nodesinLayers += self.NodesinHiddenself.nodesinLayers.append(int(self.outputDi))#self.nodesinB=[]#self.nodesinB += self.NodesinHidden#self.nodesinB.append(int(self.outputDi))#for element in self.nodesinLayers:#self.nodesinLayers=int(self.nodesinLayers[idx])#weight matrix, it's a list and each element is a numpy matrix#weight matrix, here is Wij, and in BP we may inverse it into wji#here we store the matrix as numpy.arrayself.weightMatrix=[]self.B=[]for idx in range(0,self.Nl-1):#Xaxier's scaling factor#X. Glorot, Y. Bengio. Understanding the difficulty of training#deep feedforward neural networks. AISTATS 2010.s=math.sqrt(6)/math.sqrt(self.nodesinLayers[idx]+self.nodesinLayers[idx+1])#s=random.uniform(self.nodesinLayers[idx],self.nodesinLayers[idx+1])*2.0*s – stempMatrix=numpy.zeros((self.nodesinLayers[idx],self.nodesinLayers[idx+1]))for row_m in range(0,self.nodesinLayers[idx]):for col_m in range(0,self.nodesinLayers[idx+1]):tempMatrix[row_m,col_m]=random.random()*2.0*s-sself.weightMatrix.append(numpy.mat(tempMatrix))self.B.append(numpy.mat(numpy.zeros((1,self.nodesinLayers[idx+1]))))return 0def printWeightMatrix(self):for idx in range(0,int(self.Nl)-1):print self.weightMatrix[idx]print self.B[idx]return 0def forwardPropogation(self,singleDataInput,currentDataIdx):#self.tempusedata=inputdataZtemp=[]#Ztemp.append(numpy.mat(inputdata)*self.weightMatrix[0]+self.B[0])Ztemp.append(numpy.mat(singleDataInput)*self.weightMatrix[0]+self.B[0])Atemp=[]#print Ztempfor idx in range(1,self.Nl-1):Atemp.append(sigmoidMatrix(Ztemp[idx-1]))Ztemp.append(Atemp[idx-1]*self.weightMatrix[idx]+self.B[idx])#print ZtempAtemp.append(Ztemp[self.Nl-2])        #softmax        errorMat=softmax(Atemp[self.Nl-2])        errorsum=-1.0*math.log(errorMat[0,int(self.trainlabel[currentDataIdx])])        return Atemp,Ztemp,errorsumdef calThetaNl(self,Anl,Y,Znl):thetaNl=softmax(Anl)-Yreturn thetaNldef backwardPropogation(self,singleDataInput,currentDataIdx):Atemp,Ztemp,temperror=self.forwardPropogation(numpy.mat(singleDataInput),currentDataIdx)#print "single error",temperror#Theta is stored inverseTheta=[]outlabels=numpy.mat(numpy.zeros((1,self.outputDi)))outlabels[0,int(self.trainlabel[currentDataIdx])]=1.0#print outlabelsthetaNl=self.calThetaNl(Atemp[self.Nl-2], outlabels, Ztemp[self.Nl-2])#print thetaNlTheta.append(thetaNl)#for idx in range(1,self.Nl-1):inverseidx=self.Nl-1-idx#print inverseidxthetaLPlus1=Theta[idx-1]WeightL=self.weightMatrix[inverseidx]Zl=Ztemp[inverseidx-1]thetal=thetaLPlus1*WeightL.transpose()#print "thetal temp",thetalrow_theta,col_theta=numpy.shape(thetal)if row_theta != 1:raise Exception#print col_thetafor idx_col in range(0,col_theta):#print idx_col#print "dif",difsigmoid(Zl[0,idx_col])thetal[0,idx_col] =thetal[0,idx_col]*difsigmoid(Zl[0,idx_col])#print thetalTheta.append(thetal)#print Theta#DetaW,DetaB are also stored inverseDetaW=[]DetaB=[]for idx in range(0,self.Nl-2):inverse_idx=self.Nl-2-1-idx########################################################???pay great attention to the deminson of matrix???###########################################################dW=Theta[idx]*Atemp[inverse_idx].transpose()dW=Atemp[inverse_idx].transpose()*Theta[idx]#print dWdB=Theta[idx]DetaW.append(dW)DetaB.append(dB)DetaW.append(singleDataInput.transpose()*Theta[self.Nl-2])DetaB.append(Theta[self.Nl-2])#print "DetaW",DetaW#print "DetaB",DetaBreturn DetaW,DetaB,temperrordef updatePara(self,DetaW,DetaB):#update parametersfor idx in range(0,self.Nl-1):#print DetaW[idx]#print DetaB[idx]inverse_idx=self.Nl-1-1-idxself.weightMatrix[inverse_idx] -= self.decayRate*((1.0/self.trainDataNum)*DetaW[idx]+self.punishFactor*self.weightMatrix[inverse_idx])#self.weightMatrix[inverse_idx] -= (self.decayRate*(DetaW[idx]+self.punishFactor*self.weightMatrix[inverse_idx]))self.B[inverse_idx] -= self.decayRate*(1.0/self.trainDataNum)*DetaB[idx]#self.B[inverse_idx] -= self.decayRate*DetaB[idx]#print self.weightMatrix#print self.Bdef calpunish(self):punishment=0.0for idx in range(0,self.Nl-1):temp=self.weightMatrix[idx]idx_m,idx_n=numpy.shape(temp)for i_m in range(0,idx_m):for i_n in range(0,idx_n):punishment += temp[i_m,i_n]*temp[i_m,i_n]return 0.5*self.punishFactor*punishmentdef trainANN(self):Error_old=10000000000.0iter_idx=0while iter_idx<self.maxIteration:print "iter num: ",iter_idx,"==============================="iter_idx += 1cDetaW,cDetaB,cError=self.backwardPropogation(self.traindata[0],0)for idx in range(1,self.trainDataNum):DetaWtemp,DetaBtemp,Errortemp=self.backwardPropogation(self.traindata[idx],idx)cError += Errortemp#cDetaW += DetaWtemp#cDetaB += DetaBtempfor idx_W in range(0,len(cDetaW)):cDetaW[idx_W] += DetaWtemp[idx_W]for idx_B in range(0,len(cDetaB)):cDetaB[idx_B] += DetaBtemp[idx_B]#print "Error",cErrorcError/=self.trainDataNumcError += self.calpunish()print "old error",Error_oldprint "new error",cErrorError_new=cErrorif Error_old-Error_new < self.eps:breakError_old=Error_newself.updatePara(cDetaW, cDetaB)returndef trainANNwithMultiThread(self):Error_old=10000000000.0iter_idx=0while iter_idx<self.maxIteration:print "iter num: ",iter_idx,"==============================="iter_idx += 1cDetaW,cDetaB,cError=self.backwardPropogation(self.traindata[0],0)segNum=int(self.trainDataNum/3)work1 = MyThread('work1',self,1,segNum)cDetaW1,cDetaB1,cError1=work1.run()work2 = MyThread('work2',self,segNum,int(2*segNum))cDetaW2,cDetaB2,cError2=work2.run()work3 = MyThread('work3',self,int(2*segNum),self.trainDataNum)cDetaW3,cDetaB3,cError3=work3.run()while work1.isAlive() or work2.isAlive() or work3.isAlive():time.sleep(0.005)continuecDetaW=cDetaW+cDetaW1+cDetaW2+cDetaW3cDetaB=cDetaB+cDetaB1+cDetaB2+cDetaB3cError=cError+cError1+cError2+cError3cError/=self.trainDataNumcError += self.calpunish()print "old error",Error_oldprint "new error",cErrorError_new=cErrorif Error_old-Error_new < self.eps:breakError_old=Error_newself.updatePara(cDetaW, cDetaB)returndef getTrainAccuracy(self):accuracycount=0for idx in range(0,self.trainDataNum):Atemp,Ztemp,errorsum=self.forwardPropogation(self.traindata[idx],idx)TrainPredict=softmax(Atemp[self.Nl-2])print TrainPredictPlist=TrainPredict.tolist()LabelPredict=Plist[0].index(max(Plist[0]))print "LabelPredict",LabelPredictprint "trainLabel",self.trainlabel[idx]if int(LabelPredict) == int(self.trainlabel[idx]):accuracycount += 1print "accuracy:", float(accuracycount)/float(self.trainDataNum)return

生活中若没有朋友,就像生活中没有阳光一样

【UFLDL】多层神经网络的python实现源码

相关文章:

你感兴趣的文章:

标签云: