Posts

Showing posts from December, 2022

LONG SHORT TERM MEMORY EX-15

  import tensorflow as tf import tensorflow.keras.layers as KL import matplotlib . pyplot as plt # Dataset mnist = tf .keras.datasets.mnist ( x_train , y_train ), ( x_test , y_test ) = mnist .load_data() x_train , x_test = x_train / 255.0 , x_test / 255.0 # Model inputs = KL .Input( shape =( 28 , 28 )) # For RNN x = KL .LSTM( 64 , activation = 'relu' )( inputs ) outputs = KL .Dense( 10 , activation = "softmax" )( x ) model = tf .keras.models.Model( inputs , outputs ) model .summary() model .compile( optimizer = "adam" ,   loss = "sparse_categorical_crossentropy" ,   metrics =[ "acc" ]) history = model .fit( x_train , y_train , epochs = 5 ) plt . plot ( history .history[ 'loss' ]) plt . plot ( history .history[ 'acc' ]) plt . title ( "accuracy vs loss" ) plt . xlabel ( 'Epochs' ) plt . legend ([ 'Loss' , 'Accuracy' ]) test_loss , test_acc = model .evaluate( x_test , y_te

RECURRENT NEURAL NETWORK EX-14

import tensorflow as tf import tensorflow.keras.layers as KL import matplotlib . pyplot as plt # Dataset mnist = tf .keras.datasets.mnist ( x_train , y_train ), ( x_test , y_test ) = mnist .load_data() x_train , x_test = x_train / 255.0 , x_test / 255.0 # Model inputs = KL .Input( shape =( 28 , 28 )) # For RNN x = KL .SimpleRNN( 64 , activation = 'relu' )( inputs ) outputs = KL .Dense( 10 , activation = "softmax" )( x ) model = tf .keras.models.Model( inputs , outputs ) model .summary() model .compile( optimizer = "adam" ,   loss = "sparse_categorical_crossentropy" ,   metrics =[ "acc" ]) history = model .fit( x_train , y_train , epochs = 5 ) plt . plot ( history .history[ 'loss' ]) plt . plot ( history .history[ 'acc' ]) plt . title ( "accuracy vs loss" ) plt . xlabel ( 'Epochs' ) plt . legend ([ 'Loss' , 'Accuracy' ]) test_loss , test_acc = model .evaluate( x_test , y

SEQUENCE MODEL EX-13

  import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers # Define Sequential model with 3 layers model = keras.Sequential([ layers.Dense( 2 , activation ="relu", name ="layer1"), layers.Dense( 3 , activation ="relu", name ="layer2"), layers.Dense( 4 , name ="layer3"), ]) # Call model on a test input x = tf .ones(( 3 , 3 )) y = model ( x ) # Create 3 layers layer1 = layers.Dense( 2 , activation ="relu", name ="layer1") layer2 = layers.Dense( 3 , activation ="relu", name ="layer2") layer3 = layers.Dense( 4 , name ="layer3") # Call layers on a test input x = tf .ones(( 3 , 3 )) y = layer3 ( layer2 ( layer1 ( x ))) model = keras.Sequential( [ layers.Dense( 2 , activation =&quot

ResNet ARCHITECTURE Ex-12

  import tensorflow as tf import matplotlib . pyplot as plt from tensorflow.keras import datasets, layers, models, losses ( x_train , y_train ), ( x_test , y_test )= tf .keras.datasets.mnist.load_data() x_train = tf .pad( x_train , [[ 0 , 0 ], [ 2 , 2 ], [ 2 , 2 ]])/ 255 x_test = tf .pad( x_test , [[ 0 , 0 ], [ 2 , 2 ], [ 2 , 2 ]])/ 255 x_train = tf .expand_dims( x_train , axis = 3 , name = None ) x_test = tf .expand_dims( x_test , axis = 3 , name = None ) x_train = tf .repeat( x_train , 3 , axis = 3 ) x_test = tf .repeat( x_test , 3 , axis = 3 ) x_val = x_train [- 2000 :,:,:,:] y_val = y_train [- 2000 :] x_train = x_train [:- 2000 ,:,:,:] y_train = y_train [:- 2000 ] base_model = tf .keras.applications.ResNet152( weights = 'imagenet' ,   include_top = False ,   input_shape = ( 32 , 32 , 3 )) for layer in base_model .layers:   layer .trainable = False x = layers.Flatten()( base_model .output) x = layers.Dense( 1000 , activation = 'relu' )(

GoogLeNet ARCHITECTURE EX-11

import tensorflow as tf import matplotlib . pyplot as plt from tensorflow.keras import datasets, layers, models, losses ( x_train , y_train ), ( x_test , y_test )= tf .keras.datasets.mnist.load_data() x_train = tf .pad( x_train , [[ 0 , 0 ], [ 2 , 2 ], [ 2 , 2 ]])/ 255 x_test = tf .pad( x_test , [[ 0 , 0 ], [ 2 , 2 ], [ 2 , 2 ]])/ 255 x_train = tf .expand_dims( x_train , axis = 3 , name = None ) x_test = tf .expand_dims( x_test , axis = 3 , name = None ) x_train = tf .repeat( x_train , 3 , axis = 3 ) x_test = tf .repeat( x_test , 3 , axis = 3 ) x_val = x_train [- 2000 :,:,:,:] y_val = y_train [- 2000 :] x_train = x_train [:- 2000 ,:,:,:] y_train = y_train [:- 2000 ] def inception ( x ,   filters_1x1 ,   filters_3x3_reduce ,   filters_3x3 ,   filters_5x5_reduce ,   filters_5x5 ,   filters_pool ):   path1 = layers.Conv2D( filters_1x1 , ( 1 , 1 ), padding = 'same' , activation = 'relu' )( x )   path2 = layers.Conv2D( filters_3x3_reduce , ( 1 , 1 ),

VGG ARCHITECTURE EX -10

import tensorflow as tf import matplotlib . pyplot as plt from tensorflow.keras import datasets, layers, models, losses ( x_train , y_train ), ( x_test , y_test )= tf .keras.datasets.mnist.load_data() x_train = tf .pad( x_train , [[ 0 , 0 ], [ 2 , 2 ], [ 2 , 2 ]])/ 255 x_test = tf .pad( x_test , [[ 0 , 0 ], [ 2 , 2 ], [ 2 , 2 ]])/ 255 x_train = tf .expand_dims( x_train , axis = 3 , name = None ) x_test = tf .expand_dims( x_test , axis = 3 , name = None ) x_val = x_train [- 2000 :,:,:,:] y_val = y_train [- 2000 :] x_train = x_train [:- 2000 ,:,:,:] y_train = y_train [:- 2000 ] model = models.Sequential([  layers.experimental.preprocessing.Resizing( 224 , 224 ,   interpolation = "bilinear" ,   input_shape = x_train .shape[ 1 :]),  layers.Conv2D( 64 , 3 , strides = 1 , padding = 'same' ),  layers.Activation( 'relu' ),  layers.Conv2D( 64 , 3 , strides = 1 , padding = 'same' ),  layers.Activation( 'relu' ),  layers.MaxPooling2D(

AlexNet ARCHITECTURE EX-9

import tensorflow as tf import matplotlib . pyplot as plt from tensorflow.keras import datasets, layers, models, losses ( x_train , y_train ), ( x_test , y_test )= tf .keras.datasets.mnist.load_data() x_train = tf .pad( x_train , [[ 0 , 0 ], [ 2 , 2 ], [ 2 , 2 ]])/ 255 x_test = tf .pad( x_test , [[ 0 , 0 ], [ 2 , 2 ], [ 2 , 2 ]])/ 255 x_train = tf .expand_dims( x_train , axis = 3 , name = None ) x_test = tf .expand_dims( x_test , axis = 3 , name = None ) x_val = x_train [- 2000 :,:,:,:] y_val = y_train [- 2000 :] x_train = x_train [:- 2000 ,:,:,:] y_train = y_train [:- 2000 ] model = models.Sequential([  layers.experimental.preprocessing.Resizing( 224 , 224 ,   interpolation = "bilinear" ,   input_shape = x_train .shape[ 1 :]),  layers.Conv2D( 96 , 11 , strides = 4 , padding = 'same' ),  layers.Lambda( tf .nn.local_response_normalization),  layers.Activation( 'relu' ),  layers.MaxPooling2D( 3 , strides = 2 ),  layers.Conv2D( 256 , 5 , strid

LeNet ARCHITECTURE EX-8

  import tensorflow as tf import matplotlib . pyplot as plt from tensorflow.keras import datasets, layers, models, losses ( x_train , y_train ), ( x_test , y_test )= tf .keras.datasets.mnist.load_data() x_train = tf .pad( x_train , [[ 0 , 0 ], [ 2 , 2 ], [ 2 , 2 ]])/ 255 x_test = tf .pad( x_test , [[ 0 , 0 ], [ 2 , 2 ], [ 2 , 2 ]])/ 255 x_train = tf .expand_dims( x_train , axis = 3 , name = None ) x_test = tf .expand_dims( x_test , axis = 3 , name = None ) x_val = x_train [- 2000 :,:,:,:] y_val = y_train [- 2000 :] x_train = x_train [:- 2000 ,:,:,:] y_train = y_train [:- 2000 ] model = models.Sequential([  layers.Conv2D( 6 , 5 , activation = 'tanh' , input_shape = x_train .shape[ 1 :]),  layers.AveragePooling2D( 2 ),  layers.Activation( 'sigmoid' ),  layers.Conv2D( 16 , 5 , activation = 'tanh' ),  layers.AveragePooling2D( 2 ),  layers.Activation( 'sigmoid' ),  layers.Conv2D( 120 , 5 , activation = 'tanh' ),  layers.Flatten(),