top of page

Deep Learning Assignment Help | Regularization, Dropout, Stochastic and minibatch Gradient



In this Project, We have create a proper object-oriented implementation of a deep linear network (from scratch) with capabilities of using different activation functions, regularization, dropout, stochastic, batch, and minibatch Gradient Descent algorithm, and visualization of the decision boundaries.


Create a Python application called NNPY2. Add a file called Utils.py with the following code in it.

#Import Libraries
import numpy as np 
from sklearn import datasets, linear_model 
import matplotlib.pyplot as plt 
class Utils(object):
	def initData(self):
		# generate a random dataset and plot it
		np.random.seed(0)
		X, y = datasets.make_moons(200, noise=0.20)
		plt.scatter(X[:,0], X[:,1], s=40, c=y, cmap=plt.cm.Spectral)
		plt.show()
		return X, y
	 def normalizeData(self,X): 
		min = np.min(X, axis = 0) 
		max = np.max(X, axis = 0) 
		normX = 1 - ((max - X)/(max-min)) 
		return normX

Add a class called ActivationType.py with the following code in it.

class ActivationType(object):  
	NONE = 0  
	SIGMOID = 1  
	TANH = 2  
	RELU = 3  
	SOFTMAX = 4

Add a class called GradDescType.py with the following code in it

class GradDescType(object):  
	STOCHASTIC = 1  
	BATCH = 2  
	MINIBATCH = 3

Add a class called Layer.py with the following code in it. This class contains the neurons, their activation functions, biases, and weights for a single layer in the neural network. Examine the code carefully to see how a single layer ina neural network is put together.


import numpy as np
from ActivationType import ActivationType

class Layer(object):
	def __init__(self,numNeurons,numNeuronsPrevLayer, lastLayer= False,dropOut = 0.2,activationType=ActivationType.SIGMOID):
		# initialize the weights and biases
		self.numNeurons = numNeurons
		self.lastLayer = lastLayer
		self.numNeuronsPrevLayer = numNeuronsPrevLayer
		self.activationFunction = activationType
		self.dropOut = dropOut
		#self.b = np.zeros((numNeurons,1))
		self.delta = np.zeros((numNeurons,1))
		self.a = np.zeros((numNeurons,1))
		self.derivAF = np.zeros((numNeurons,1)) # deriv of Activation function
		#self.W = 0.01 * np.random.randn(numNeurons,numNeuronsPrevLayer) 
		#self.W = np.random.uniform(low=-0.1,high=0.1,size=(numNeurons,numNeuronsPrevLayer)) 
		#self.b = np.random.uniform(low=-1,high=1,size=(numNeurons,1)) 
		self.W = np.random.randn(numNeurons, numNeuronsPrevLayer)/ np.sqrt(numNeuronsPrevLayer)
		self.b = np.zeros((numNeurons,1))
		self.WGrad = np.zeros((numNeurons,numNeuronsPrevLayer))
		self.bGrad = np.zeros((numNeurons,1)) # gradient for delta
		self.zeroout = None # for dropout
	
	def Evaluate(self,indata):
		sum = np.dot(self.W,indata) + self.b
		if (self.activationFunction == ActivationType.NONE):
			self.a = sum
			self.derivAF = 1
		if (self.activationFunction == ActivationType.SIGMOID):
			self.a = self.sigmoid(sum)
			self.derivAF = self.a * (1 - self.a)
		if (self.activationFunction == ActivationType.TANH):
			self.a = self.TanH(sum)
			self.derivAF = (1 - self.a*self.a)
		if (self.activationFunction == ActivationType.RELU):
			self.a = self.Relu(sum)
			self.derivAF = 1.0 * (self.a > 0)
		if (self.activationFunction == ActivationType.SOFTMAX):
			self.a = self.Softmax(sum)
			self.derivAF = None # we do delta computation in Softmax layer
		if (self.lastLayer == False):
			self.zeroout = np.random.binomial(1,self.dropOut,(self.numNeurons,1))/self.dropOut
			self.a = self.a * self.zeroout
			self.derivAF = self.derivAF * self.zeroout
	
	def linear(self,x):
		return x # output same as input
 
	def sigmoid(self,x):
		return 1 / (1 + np.exp(-x)) # np.exp makes it operate on entire array
	
	def TanH(self, x):
		return np.tanh(x)
	
	def Relu(self, x):
		return np.maximum(0,x)
	
	def Softmax(self, x):
		ex = np.exp(x)
		return ex/ex.sum()
 
	def ClearWBGrads(self): # zero out accumulation of grads and biase gradients
		self.WGrad = np.zeros((self.numNeurons, self.numNeuronsPrevLayer))
		self.bGrad = np.zeros((self.numNeurons,1)) # gradient for delta

Add a class called Network to the project with the following code in it. The Network class contains a list of layers, and provides the train and evaluate functions.

import math
import numpy as np
from Layer import *
from GradDescType import *
from sklearn.utils import shuffle

class Network(object):
	def __init__(self,X,Y,numLayers,dropOut = 1.0,activationF=ActivationType.SIGMOID,lastLayerAF= ActivationType.SIGMOID):
		self.X = X
		self.Y = Y
		self.numLayers = numLayers
		self.Layers = [] # network contains list of layers
		self.lastLayerAF = lastLayerAF
		
		for i in range(len(numLayers)):
			if (i == 0): # first layer
				layer = Layer(numLayers[i],X.shape[1],False,dropOut, activationF) 
			elif (i == len(numLayers)-1): # last layer
				layer = Layer(Y.shape[1],numLayers[i-1],True,dropOut, lastLayerAF) 
			else: # intermediate layers
				layer = Layer(numLayers[i],numLayers[i-1],False,dropOut, activationF) 
				self.Layers.append(layer);
 
	def Evaluate(self,indata, decision_plotting=0): # evaluates all layers
		self.Layers[0].Evaluate(indata)
		
		for i in range(1,len(self.numLayers)):
			self.Layers[i].Evaluate(self.Layers[i-1].a)
		#if decision_plotting == 0:
		return self.Layers[len(self.numLayers)-1].a
		#else:
		# if self.Layers[len(self.numLayers)-1].a[0] > 0.5:
		# return 1
		# else:
		# return 0
		
	def Train(self, epochs,learningRate, lambda1, gradDescType, batchSize=1):
		for j in range(epochs):
			error = 0
			self.X, self.Y = shuffle(self.X, self.Y, random_state=0)
		for i in range(self.X.shape[0]):
			self.Evaluate(self.X[i])
			if (self.lastLayerAF == ActivationType.SOFTMAX):
				error += -(self.Y[i] * np.log(self.Layers[len(self.numLayers)-1].a+0.001)).sum()
			else:
				error += ((self.Layers[len(self.numLayers)-1].a - self.Y[i]) * \(self.Layers[len(self.numLayers)-1].a - self.Y[i])).sum()
			lnum = len(self.numLayers)-1 # last layer number
			# compute deltas, grads on all layers
			while(lnum >= 0):
				if (lnum == len(self.numLayers)-1): # last layer
					if (self.lastLayerAF == ActivationType.SOFTMAX):
						self.Layers[lnum].delta = -self.Y[i]+ self.Layers[lnum].a
					else:
						self.Layers[lnum].delta = -(self.Y[i]-self.Layers[lnum].a) * self.Layers[lnum].derivAF
				else: # intermediate layer
					self.Layers[lnum].delta = np.dot(self.Layers[lnum+1].W.T,self.Layers[lnum+1].delta) * \ self.Layers[lnum].derivAF
				if (lnum > 0): #previous output
					prevOut = self.Layers[lnum-1].a
				else:
					prevOut = self.X[i]
 
				self.Layers[lnum].WGrad += np.dot(self.Layers[lnum].delta,prevOut.T)
				self.Layers[lnum].bGrad += self.Layers[lnum].delta
				lnum = lnum - 1
 
			if (gradDescType == GradDescType.MINIBATCH):
				if (i % batchSize == 0):
					self.UpdateGradsBiases(learningRate,lambda1, batchSize)
			if (gradDescType == GradDescType.STOCHASTIC):
				self.UpdateGradsBiases(learningRate,lambda1, 1)
		if (gradDescType == GradDescType.BATCH):
			self.UpdateGradsBiases(learningRate,lambda1, self.X.shape[0])
			print("Iter = " + str(j) + " Error = "+ str(error))
 
 def UpdateGradsBiases(self, learningRate, lambda1, batchSize):
	# update weights and biases for all layers
	for ln in range(len(self.numLayers)):
		self.Layers[ln].W = self.Layers[ln].W - learningRate * (1/batchSize) * self.Layers[ln].WGrad - learningRate * lambda1 * self.Layers[ln].W 
		self.Layers[ln].b = self.Layers[ln].b - learningRate * (1/batchSize) * self.Layers[ln].bGrad
		self.Layers[ln].ClearWBGrads()

Add a file called MoonDataSetTest.py with the following code in it.

import sys
from Network import Network
from GradDescType import *
from ActivationType import *
from Utils import Utils
import numpy as np
import matplotlib.pyplot as plt

def plot_decision_boundary(pred_func, X, y): 
	# Set min and max values and give it some padding 
	x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5 
	y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5 
	h = 0.01 
	# Generate a grid of points with distance h between them 
	xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) 
	# Predict the function value for the whole gid 
	xdata = np.c_[xx.ravel(), yy.ravel()]
	xdatanp = xdata.reshape(xdata.shape[0],xdata.shape[1],1)
	#print(xdatanp.shape)
	Z = [pred_func(xdatanp[x]) for x in range(0,len(xdatanp))] 
	Z = np.array(Z)
	exp_scores = np.exp(Z)
	probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True)
	Z = np.argmax(probs, axis=1)
	Z = Z.reshape(xx.shape) 
	# Plot the contour and training examples 
	plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral) 
	plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Spectral) 
	plt.show()

def main():
	utils = Utils()
	X, Y = utils.initData() # initialize data
	#X = utils.normalizeData(X)
	trainX = X.reshape((X.shape[0],X.shape[1],1)) 
	trainY = np.zeros((len(Y),2))
	
	for i in range(0,len(Y)):
		if Y[i] == 1:
			trainY[i,0] = 1
			trainY[i,1] = 0
		else:
			trainY[i,0] = 0
			trainY[i,1]= 1
	trainY = trainY.reshape((X.shape[0],X.shape[1],1)) 
	numLayers = [5,2]
	NN = Network(trainX,trainY,numLayers,1.0,ActivationType.RELU, 
	ActivationType.SOFTMAX) # try SOFTMAX
	NN.Train(400,0.1,0.01, GradDescType.STOCHASTIC,1)
 
	#------------ compute accuracy----------
	accuracy = 0
	for i in range(len(trainX)):
		pred = NN.Evaluate(trainX[i])
		if (pred.argmax() == 0 and trainY[i,0,0] == 1) or \
			(pred.argmax() == 1 and trainY[i,0,0] == 0):
			accuracy = accuracy + 1
	accuracy_percent = accuracy/len(trainX)
	print('accuracy =', accuracy_percent)
	plot_decision_boundary(lambda x: NN.Evaluate(x,decision_plotting=1), X, Y)
 
if __name__ == "__main__":
 sys.exit(int(main() or 0))

As you can see from the above code, the main function creates a network, and specifies the neurons via a list called numLayers. For example, the above code sets the number of neurons to 5 in the first layer, and 2 in the last layer. For the moon dataset, the input and output is two dimensional so that we can visualize the decision boundaries.


For this assignment, your goal is to understand the effect of number of layers, number of neurons in the hidden layers, different activation types, effect of stochastic vs minibatch, and overfitting, underfitting of the model.



We are also providing other Machine Learning related help, if you need any Machine Learning Assignment, R project or R homework or need solution of above problem then we are ready to help you.

Send your request at realcode4you@gmail.com and get instant help with an affordable price.

We are always focus to delivered unique or without plagiarism code which is written by our highly educated professional which provide well structured code within your given time frame.


If you are looking other programming language help like C, C++, Java, Python, PHP, Asp.Net, NodeJs, ReactJs, etc. with the different types of databases like MySQL, MongoDB, SQL Server, Oracle, etc. then also contact us.


bottom of page