Posts

Showing posts from 2022

LONG SHORT TERM MEMORY EX-15

  import tensorflow as tf import tensorflow.keras.layers as KL import matplotlib . pyplot as plt # Dataset mnist = tf .keras.datasets.mnist ( x_train , y_train ), ( x_test , y_test ) = mnist .load_data() x_train , x_test = x_train / 255.0 , x_test / 255.0 # Model inputs = KL .Input( shape =( 28 , 28 )) # For RNN x = KL .LSTM( 64 , activation = 'relu' )( inputs ) outputs = KL .Dense( 10 , activation = "softmax" )( x ) model = tf .keras.models.Model( inputs , outputs ) model .summary() model .compile( optimizer = "adam" ,   loss = "sparse_categorical_crossentropy" ,   metrics =[ "acc" ]) history = model .fit( x_train , y_train , epochs = 5 ) plt . plot ( history .history[ 'loss' ]) plt . plot ( history .history[ 'acc' ]) plt . title ( "accuracy vs loss" ) plt . xlabel ( 'Epochs' ) plt . legend ([ 'Loss' , 'Accuracy' ]) test_loss , test_acc = model .evaluate( x_test , y_te

RECURRENT NEURAL NETWORK EX-14

import tensorflow as tf import tensorflow.keras.layers as KL import matplotlib . pyplot as plt # Dataset mnist = tf .keras.datasets.mnist ( x_train , y_train ), ( x_test , y_test ) = mnist .load_data() x_train , x_test = x_train / 255.0 , x_test / 255.0 # Model inputs = KL .Input( shape =( 28 , 28 )) # For RNN x = KL .SimpleRNN( 64 , activation = 'relu' )( inputs ) outputs = KL .Dense( 10 , activation = "softmax" )( x ) model = tf .keras.models.Model( inputs , outputs ) model .summary() model .compile( optimizer = "adam" ,   loss = "sparse_categorical_crossentropy" ,   metrics =[ "acc" ]) history = model .fit( x_train , y_train , epochs = 5 ) plt . plot ( history .history[ 'loss' ]) plt . plot ( history .history[ 'acc' ]) plt . title ( "accuracy vs loss" ) plt . xlabel ( 'Epochs' ) plt . legend ([ 'Loss' , 'Accuracy' ]) test_loss , test_acc = model .evaluate( x_test , y

SEQUENCE MODEL EX-13

  import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers # Define Sequential model with 3 layers model = keras.Sequential([ layers.Dense( 2 , activation ="relu", name ="layer1"), layers.Dense( 3 , activation ="relu", name ="layer2"), layers.Dense( 4 , name ="layer3"), ]) # Call model on a test input x = tf .ones(( 3 , 3 )) y = model ( x ) # Create 3 layers layer1 = layers.Dense( 2 , activation ="relu", name ="layer1") layer2 = layers.Dense( 3 , activation ="relu", name ="layer2") layer3 = layers.Dense( 4 , name ="layer3") # Call layers on a test input x = tf .ones(( 3 , 3 )) y = layer3 ( layer2 ( layer1 ( x ))) model = keras.Sequential( [ layers.Dense( 2 , activation =&quot

ResNet ARCHITECTURE Ex-12

  import tensorflow as tf import matplotlib . pyplot as plt from tensorflow.keras import datasets, layers, models, losses ( x_train , y_train ), ( x_test , y_test )= tf .keras.datasets.mnist.load_data() x_train = tf .pad( x_train , [[ 0 , 0 ], [ 2 , 2 ], [ 2 , 2 ]])/ 255 x_test = tf .pad( x_test , [[ 0 , 0 ], [ 2 , 2 ], [ 2 , 2 ]])/ 255 x_train = tf .expand_dims( x_train , axis = 3 , name = None ) x_test = tf .expand_dims( x_test , axis = 3 , name = None ) x_train = tf .repeat( x_train , 3 , axis = 3 ) x_test = tf .repeat( x_test , 3 , axis = 3 ) x_val = x_train [- 2000 :,:,:,:] y_val = y_train [- 2000 :] x_train = x_train [:- 2000 ,:,:,:] y_train = y_train [:- 2000 ] base_model = tf .keras.applications.ResNet152( weights = 'imagenet' ,   include_top = False ,   input_shape = ( 32 , 32 , 3 )) for layer in base_model .layers:   layer .trainable = False x = layers.Flatten()( base_model .output) x = layers.Dense( 1000 , activation = 'relu' )(

GoogLeNet ARCHITECTURE EX-11

import tensorflow as tf import matplotlib . pyplot as plt from tensorflow.keras import datasets, layers, models, losses ( x_train , y_train ), ( x_test , y_test )= tf .keras.datasets.mnist.load_data() x_train = tf .pad( x_train , [[ 0 , 0 ], [ 2 , 2 ], [ 2 , 2 ]])/ 255 x_test = tf .pad( x_test , [[ 0 , 0 ], [ 2 , 2 ], [ 2 , 2 ]])/ 255 x_train = tf .expand_dims( x_train , axis = 3 , name = None ) x_test = tf .expand_dims( x_test , axis = 3 , name = None ) x_train = tf .repeat( x_train , 3 , axis = 3 ) x_test = tf .repeat( x_test , 3 , axis = 3 ) x_val = x_train [- 2000 :,:,:,:] y_val = y_train [- 2000 :] x_train = x_train [:- 2000 ,:,:,:] y_train = y_train [:- 2000 ] def inception ( x ,   filters_1x1 ,   filters_3x3_reduce ,   filters_3x3 ,   filters_5x5_reduce ,   filters_5x5 ,   filters_pool ):   path1 = layers.Conv2D( filters_1x1 , ( 1 , 1 ), padding = 'same' , activation = 'relu' )( x )   path2 = layers.Conv2D( filters_3x3_reduce , ( 1 , 1 ),

VGG ARCHITECTURE EX -10

import tensorflow as tf import matplotlib . pyplot as plt from tensorflow.keras import datasets, layers, models, losses ( x_train , y_train ), ( x_test , y_test )= tf .keras.datasets.mnist.load_data() x_train = tf .pad( x_train , [[ 0 , 0 ], [ 2 , 2 ], [ 2 , 2 ]])/ 255 x_test = tf .pad( x_test , [[ 0 , 0 ], [ 2 , 2 ], [ 2 , 2 ]])/ 255 x_train = tf .expand_dims( x_train , axis = 3 , name = None ) x_test = tf .expand_dims( x_test , axis = 3 , name = None ) x_val = x_train [- 2000 :,:,:,:] y_val = y_train [- 2000 :] x_train = x_train [:- 2000 ,:,:,:] y_train = y_train [:- 2000 ] model = models.Sequential([  layers.experimental.preprocessing.Resizing( 224 , 224 ,   interpolation = "bilinear" ,   input_shape = x_train .shape[ 1 :]),  layers.Conv2D( 64 , 3 , strides = 1 , padding = 'same' ),  layers.Activation( 'relu' ),  layers.Conv2D( 64 , 3 , strides = 1 , padding = 'same' ),  layers.Activation( 'relu' ),  layers.MaxPooling2D(

AlexNet ARCHITECTURE EX-9

import tensorflow as tf import matplotlib . pyplot as plt from tensorflow.keras import datasets, layers, models, losses ( x_train , y_train ), ( x_test , y_test )= tf .keras.datasets.mnist.load_data() x_train = tf .pad( x_train , [[ 0 , 0 ], [ 2 , 2 ], [ 2 , 2 ]])/ 255 x_test = tf .pad( x_test , [[ 0 , 0 ], [ 2 , 2 ], [ 2 , 2 ]])/ 255 x_train = tf .expand_dims( x_train , axis = 3 , name = None ) x_test = tf .expand_dims( x_test , axis = 3 , name = None ) x_val = x_train [- 2000 :,:,:,:] y_val = y_train [- 2000 :] x_train = x_train [:- 2000 ,:,:,:] y_train = y_train [:- 2000 ] model = models.Sequential([  layers.experimental.preprocessing.Resizing( 224 , 224 ,   interpolation = "bilinear" ,   input_shape = x_train .shape[ 1 :]),  layers.Conv2D( 96 , 11 , strides = 4 , padding = 'same' ),  layers.Lambda( tf .nn.local_response_normalization),  layers.Activation( 'relu' ),  layers.MaxPooling2D( 3 , strides = 2 ),  layers.Conv2D( 256 , 5 , strid

LeNet ARCHITECTURE EX-8

  import tensorflow as tf import matplotlib . pyplot as plt from tensorflow.keras import datasets, layers, models, losses ( x_train , y_train ), ( x_test , y_test )= tf .keras.datasets.mnist.load_data() x_train = tf .pad( x_train , [[ 0 , 0 ], [ 2 , 2 ], [ 2 , 2 ]])/ 255 x_test = tf .pad( x_test , [[ 0 , 0 ], [ 2 , 2 ], [ 2 , 2 ]])/ 255 x_train = tf .expand_dims( x_train , axis = 3 , name = None ) x_test = tf .expand_dims( x_test , axis = 3 , name = None ) x_val = x_train [- 2000 :,:,:,:] y_val = y_train [- 2000 :] x_train = x_train [:- 2000 ,:,:,:] y_train = y_train [:- 2000 ] model = models.Sequential([  layers.Conv2D( 6 , 5 , activation = 'tanh' , input_shape = x_train .shape[ 1 :]),  layers.AveragePooling2D( 2 ),  layers.Activation( 'sigmoid' ),  layers.Conv2D( 16 , 5 , activation = 'tanh' ),  layers.AveragePooling2D( 2 ),  layers.Activation( 'sigmoid' ),  layers.Conv2D( 120 , 5 , activation = 'tanh' ),  layers.Flatten(),

Softmax Regession Exp-3

  import tensorflow as tf from tensorflow import keras import numpy as np (( train_data , train_labels ), ( mnist_data , mnist_labels ))= tf .keras.datasets.mnist.load_data() train_data = train_data / np .float32( 255 ) train_labels = train_labels .astype( np .int32) mnist_data = mnist_data / np .float32( 255 ) mnist_labels = mnist_labels .astype( np .int32) feature_columns =[ tf .feature_column.numeric_column( "x" , shape =[ 28 , 28 ])] classifier = tf .estimator.LinearClassifier(     feature_columns = feature_columns ,     n_classes = 10 ,     model_dir = "mnist_model/" ) train_input_fn = tf .compat.v1.estimator.inputs.numpy_input_fn(         x ={ "x" : train_data },         y = train_labels ,             batch_size = 100 ,             num_epochs = None ,             shuffle = True ) classifier .train( input_fn = train_input_fn , steps = 5 ) val_input_fn = tf .compat.v1.estimator.inputs.numpy_input_fn(         x ={ "x" : mnist_data },    

Single and Multi layer

  # -*- coding: utf-8 -*- """Single and Multi Layer Perceptron.ipynb Automatically generated by Colaboratory. Original file is located at     https://colab.research.google.com/drive/1sUwF-aPoUh-5eiqZRHDTrFQqz01k8xZs """ # importing modules import tensorflow as tf import numpy as np from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Flatten from tensorflow.keras.layers import Dense from tensorflow.keras.layers import Activation import matplotlib.pyplot as plt import plotly.express as px (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data() # Cast the records into float values x_train = x_train.astype( 'float32' ) x_test = x_test.astype( 'float32' ) # normalize image pixel values by dividing # by 255 gray_scale = 255 x_train /= gray_scale x_test /= gray_scale print ( "Feature matrix:" , x_train.shape) print ( "Target matrix:" , x_test.shape) print ( "