Thursday 17 January 2019

Deep Learning Hello World Program


###############################################################################
## The DL (Deep Learning) Hello World Program
## References:
##   https://www.tensorflow.org/tutorials/
##   https://medium.com/the-andela-way/deep-learning-hello-world-e1fc53ea888
###############################################################################

import tensorflow as tf
from keras.datasets import mnist

(x_train, y_train),(x_test, y_test) = mnist.load_data()

x_train, x_test = x_train / 255.0, x_test / 255.0

model = tf.keras.models.Sequential([
  tf.keras.layers.Flatten(                                ),
  tf.keras.layers.Dense  (512 , activation = tf.nn.relu   ),
  tf.keras.layers.Dropout(0.2                             ),
  tf.keras.layers.Dense  (10  , activation = tf.nn.softmax)
])

model.compile(optimizer = 'adam'                           ,
              loss      = 'sparse_categorical_crossentropy',
              metrics   = ['accuracy']                     )

model.fit(x_train, y_train, epochs=5)

model.evaluate(x_test, y_test)

###############################################################################
# Output
###############################################################################
# Using TensorFlow backend.
# Downloading data from https://s3.amazonaws.com/img-datasets/mnist.npz
# 11493376/11490434 [==============================] - 1s 0us/step
# Epoch 1/5
# 60000/60000 [==============================] - 14s 236us/step - loss: 0.2027 - acc: 0.9406
# Epoch 2/5
# 60000/60000 [==============================] - 14s 225us/step - loss: 0.0805 - acc: 0.9756
# Epoch 3/5
# 60000/60000 [==============================] - 13s 222us/step - loss: 0.0517 - acc: 0.9839
# Epoch 4/5
# 60000/60000 [==============================] - 14s 227us/step - loss: 0.0370 - acc: 0.9883
# Epoch 5/5
# 60000/60000 [==============================] - 13s 224us/step - loss: 0.0262 - acc: 0.9917
# 10000/10000 [==============================] - 1s 51us/step
# [0.07304962697861483, 0.9789]

Wednesday 16 January 2019

TensorFlow Keras Models


compile(optimizer                ,
        loss               = None,
        metrics            = None,
        loss_weights       = None,
        sample_weight_mode = None,
        weighted_metrics   = None,
        target_tensors     = None)

fit(x                = None,
    y                = None,
    batch_size       = None,
    epochs           = 1   ,
    verbose          = 1   ,
    callbacks        = None,
    validation_split = 0.0 ,
    validation_data  = None,
    shuffle          = True,
    class_weight     = None,
    sample_weight    = None,
    initial_epoch    = 0   ,
    steps_per_epoch  = None,
    validation_steps = None)

evaluate(x             = None,
         y             = None,
         batch_size    = None,
         verbose       = 1   ,
         sample_weight = None,
         steps         = None)

predict(x,
        batch_size = None,
        verbose    = 0   ,
        steps      = None)

train_on_batch(x, y, sample_weight=None, class_weight=None)
test_on_batch (x, y, sample_weight=None)
predict_on_batch(x)

fit_generator(generator                  ,
              steps_per_epoch     = None ,
              epochs              = 1    ,
              verbose             = 1    ,
              callbacks           = None ,
              validation_data     = None ,
              validation_steps    = None ,
              class_weight        = None ,
              max_queue_size      = 10   ,
              workers             = 1    ,
              use_multiprocessing = False,
              shuffle             = True ,
              initial_epoch       = 0    )

evaluate_generator(generator                  ,
                   steps               = None ,
                   max_queue_size      = 10   ,
                   workers             = 1    ,
                   use_multiprocessing = False,
                   verbose             = 0    )

predict_generator(generator                  ,
                  steps               = None ,
                  max_queue_size      = 10   ,
                  workers             = 1    ,
                  use_multiprocessing = False,
                  verbose             = 0    )

get_layer(name=None, index=None)

Reference: keras.io

TensorFlow Keras Constraints


keras.constraints.MaxNorm(max_value=2, axis=0)
keras.constraints.NonNeg()
keras.constraints.UnitNorm(axis=0)
keras.constraints.MinMaxNorm(min_value=0.0, max_value=1.0, rate=1.0, axis=0)

Reference: keras.io

TensorFlow Keras Regulizers



keras.regularizers.l1   (0.)
keras.regularizers.l2   (0.)
keras.regularizers.l1_l2(l1 = 0.01, l2 = 0.01)

Reference: keras.io

TensorFlow Keras Initializers


keras.initializers.Initializer()
keras.initializers.Zeros()
keras.initializers.Ones()
keras.initializers.Constant(value=0)
keras.initializers.RandomNormal   (mean   =  0.0 , stddev = 0.05, seed = None)
keras.initializers.RandomUniform  (minval = -0.05, maxval = 0.05, seed = None)
keras.initializers.TruncatedNormal(mean   =  0.0 , stddev = 0.05, seed = None)

keras.initializers.VarianceScaling(scale        = 1.0     ,
                                   mode         = 'fan_in',
                                   distribution = 'normal',
                                   seed         = None    )

keras.initializers.Orthogonal(gain=1.0, seed=None)
keras.initializers.Identity  (gain=1.0)

keras.initializers.lecun_uniform (seed=None)
keras.initializers.glorot_normal (seed=None)
keras.initializers.glorot_uniform(seed=None)
keras.initializers.he_normal     (seed=None)
keras.initializers.lecun_normal  (seed=None)
keras.initializers.he_uniform    (seed=None)

Reference: keras.io

TensorFlow Keras Applications



keras.applications.xception.Xception(include_top  = True      ,
                                     weights      = 'imagenet',
                                     input_tensor = None      ,
                                     input_shape  = None      ,
                                     pooling      = None      ,
                                     classes      = 1000      )

keras.applications.vgg16.VGG16(include_top  = True      ,
                               weights      = 'imagenet',
                               input_tensor = None      ,
                               input_shape  = None      ,
                               pooling      = None      ,
                               classes      = 1000      )

keras.applications.vgg19.VGG19(include_top  = True      ,
                               weights      = 'imagenet',
                               input_tensor = None      ,
                               input_shape  = None      ,
                               pooling      = None      ,
                               classes      = 1000      )

keras.applications.resnet50.ResNet50(include_top  = True      ,
                                     weights      = 'imagenet',
                                     input_tensor = None      ,
                                     input_shape  = None      ,
                                     pooling      = None      ,
                                     classes      = 1000      )

keras.applications.inception_v3.InceptionV3(include_top  = True      ,
                                            weights      = 'imagenet',
                                            input_tensor = None      ,
                                            input_shape  = None      ,
                                            pooling      = None      ,
                                            classes      = 1000      )

keras.applications.inception_resnet_v2.InceptionResNetV2(include_top  = True      ,
                                                         weights      = 'imagenet',
                                                         input_tensor = None      ,
                                                         input_shape  = None      ,
                                                         pooling      = None      ,
                                                         classes      = 1000      )

keras.applications.mobilenet.MobileNet(input_shape      = None,
                                       alpha            = 1.0,
                                       depth_multiplier = 1,
                                       dropout          = 1e-3,
                                       include_top      = True,
                                       weights          = 'imagenet',
                                       input_tensor     = None,
                                       pooling          = None,
                                       classes          = 1000)

keras.applications.densenet.DenseNet121(include_top = True      ,
                                       weights      = 'imagenet',
                                       input_tensor = None      ,
                                       input_shape  = None      ,
                                       pooling      = None      ,
                                       classes      = 1000      )

keras.applications.densenet.DenseNet169(include_top  = True      ,
                                        weights      = 'imagenet',
                                        input_tensor = None      ,
                                        input_shape  = None      ,
                                        pooling      = None      ,
                                        classes      = 1000      )

keras.applications.densenet.DenseNet201(include_top  = True      ,
                                        weights      = 'imagenet',
                                        input_tensor = None      ,
                                        input_shape  = None      ,
                                        pooling      = None      ,
                                        classes      = 1000      )

keras.applications.nasnet.NASNetLarge(input_shape  = None      ,
                                      include_top  = True      ,
                                      weights      = 'imagenet',
                                      input_tensor = None      ,
                                      pooling      = None      ,
                                      classes      = 1000      )

keras.applications.nasnet.NASNetMobile(input_shape  = None      ,
                                       include_top  = True      ,
                                       weights      = 'imagenet',
                                       input_tensor = None      ,
                                       pooling      = None      ,
                                       classes      = 1000      )

keras.applications.mobilenet_v2.MobileNetV2(input_shape      = None      ,
                                            alpha            = 1.0       ,
                                            depth_multiplier = 1         ,
                                            include_top      = True      ,
                                            weights          = 'imagenet',
                                            input_tensor     = None      ,
                                            pooling          = None      ,
                                            classes          = 1000      )

Reference: keras.io

TensorFlow Keras Callback Functions


keras.callbacks.BaseLogger(stateful_metrics=None)
keras.callbacks.TerminateOnNaN()
keras.callbacks.ProgbarLogger(count_mode='samples', stateful_metrics=None)
keras.callbacks.History()

keras.callbacks.ModelCheckpoint(filepath                      ,
                                monitor           = 'val_loss',
                                verbose           = 0         ,
                                save_best_only    = False     ,
                                save_weights_only = False     ,
                                mode              = 'auto'    ,
                                period            = 1         )

keras.callbacks.EarlyStopping(monitor              = 'val_loss',
                              min_delta            = 0         ,
                              patience             = 0         ,
                              verbose              = 0         ,
                              mode                 = 'auto'    ,
                              baseline             = None      ,
                              restore_best_weights = False     )

keras.callbacks.RemoteMonitor(root         = 'http://localhost:9000',
                              path         = '/publish/epoch/end/'  ,
                              field        = 'data'                 ,
                              headers      = None                   ,
                              send_as_json = False                  )

keras.callbacks.LearningRateScheduler(schedule, verbose=0)

keras.callbacks.TensorBoard(log_dir                = './logs',
                            histogram_freq         = 0       ,
                            batch_size             = 32      ,
                            write_graph            = True    ,
                            write_grads            = False   ,
                            write_images           = False   ,
                            embeddings_freq        = 0       ,
                            embeddings_layer_names = None    ,
                            embeddings_metadata    = None    ,
                            embeddings_data        = None    ,
                            update_freq            = 'epoch' )

keras.callbacks.ReduceLROnPlateau(monitor   = 'val_loss',
                                  factor    = 0.1       ,
                                  patience  = 10        ,
                                  verbose   = 0         ,
                                  mode      = 'auto'    ,
                                  min_delta = 0.0001    ,
                                  cooldown  = 0         ,
                                  min_lr    = 0         )

keras.callbacks.CSVLogger(filename, separator=',', append=False)

keras.callbacks.LambdaCallback(on_epoch_begin = None, on_epoch_end = None,
                               on_batch_begin = None, on_batch_end = None,
                               on_train_begin = None, on_train_end = None)

Reference: keras.io

TensorFlow Keras Activation Functions


keras.activations.softmax     (x, axis  = -1 )
keras.activations.elu         (x, alpha = 1.0)
keras.activations.selu        (x)
keras.activations.softplus    (x)
keras.activations.softsign    (x)
keras.activations.relu        (x, alpha = 0.0, max_value=None, threshold=0.0)
keras.activations.tanh        (x)
keras.activations.sigmoid     (x)
keras.activations.hard_sigmoid(x)
keras.activations.exponential (x)
keras.activations.linear      (x)

Reference: keras.io

TensorFlow Keras Optimizers



keras.optimizers.SGD     (lr = 0.01 , momentum=0.0, decay=0.0, nesterov=False)
keras.optimizers.RMSprop (lr = 0.001, rho = 0.9   , epsilon=None, decay=0.0)
keras.optimizers.Adagrad (lr = 0.01 ,               epsilon=None, decay=0.0)
keras.optimizers.Adadelta(lr = 1.0  , rho = 0.95  , epsilon=None, decay=0.0)

keras.optimizers.Adam(lr      = 0.001,
                      beta_1  = 0.9  ,
                      beta_2  = 0.999,
                      epsilon = None ,
                      decay   = 0.0  ,
                      amsgrad = False)

keras.optimizers.Adamax(lr      = 0.002,
                        beta_1  = 0.9  ,
                        beta_2  = 0.999,
                        epsilon = None ,
                        decay   = 0.0  )

keras.optimizers.Nadam(lr             = 0.002,
                       beta_1         = 0.9  ,
                       beta_2         = 0.999,
                       epsilon        = None ,
                       schedule_decay = 0.004)

Reference: keras.io

TensorFlow Keras Metrics



keras.metrics.binary_accuracy                  (y_true, y_pred       )
keras.metrics.categorical_accuracy             (y_true, y_pred       )
keras.metrics.sparse_categorical_accuracy      (y_true, y_pred       )
keras.metrics.top_k_categorical_accuracy       (y_true, y_pred, k = 5)
keras.metrics.sparse_top_k_categorical_accuracy(y_true, y_pred, k = 5)

Reference: keras.io

TensorFlow Keras Utilities



keras.utils.to_categorical(y, num_classes=None, dtype='float32')
keras.utils.normalize(x, axis=-1, order=2)

keras.utils.get_file(fname                      ,
                     origin                     ,
                     untar          = False     ,
                     md5_hash       = None      ,
                     file_hash      = None      ,
                     cache_subdir   = 'datasets',
                     hash_algorithm = 'auto'    ,
                     extract        = False     ,
                     archive_format = 'auto'    ,
                     cache_dir      = None      )

keras.utils.print_summary(model             ,
                          line_length = None,
                          positions   = None,
                          print_fn    = None)

keras.utils.plot_model(model                         ,
                       to_file          = 'model.png',
                       show_shapes      = False      ,
                       show_layer_names = True       ,
                       rankdir          = 'TB'       ,
                       expand_nested    = False      ,
                       dpi              = 96         )

Reference: keras.io

TensorFlow Keras Loss Functions


keras.losses.mean_squared_error             (y_true, y_pred)
keras.losses.mean_absolute_error            (y_true, y_pred)
keras.losses.mean_absolute_percentage_error (y_true, y_pred)
keras.losses.mean_squared_logarithmic_error (y_true, y_pred)
keras.losses.squared_hinge                  (y_true, y_pred)
keras.losses.hinge                          (y_true, y_pred)
keras.losses.categorical_hinge              (y_true, y_pred)
keras.losses.logcosh                        (y_true, y_pred)
keras.losses.categorical_crossentropy       (y_true, y_pred)
keras.losses.sparse_categorical_crossentropy(y_true, y_pred)
keras.losses.binary_crossentropy            (y_true, y_pred)
keras.losses.kullback_leibler_divergence    (y_true, y_pred)
keras.losses.poisson                        (y_true, y_pred)
keras.losses.cosine_proximity               (y_true, y_pred)

Reference: keras.io

TensorFlow Keras Layers


####
model = Sequential()
model.add(Dense(32, input_shape=(16,)))
model.add(Conv2D(64                       ,
                 (3, 3)                   ,
                 input_shape = (3, 32, 32),
                 padding     = 'same',    ))

model.add(LSTM(32))

####
keras.engine.input_layer.Input()

keras.layers.Dense(units                                  ,
                   activation           = None            ,
                   use_bias             = True            ,
                   kernel_initializer   = 'glorot_uniform',
                   bias_initializer     = 'zeros'         ,
                   kernel_regularizer   = None            ,
                   bias_regularizer     = None            ,
                   activity_regularizer = None            ,
                   kernel_constraint    = None            ,
                   bias_constraint      = None            )

keras.layers.Activation(activation)
keras.layers.Dropout(rate, noise_shape=None, seed=None)
keras.layers.Flatten(data_format=None)
keras.layers.Reshape(target_shape)
keras.layers.Permute(dims)
keras.layers.RepeatVector(n)
keras.layers.Lambda(function, output_shape=None, mask=None, arguments=None)
keras.layers.ActivityRegularization(l1=0.0, l2=0.0)
keras.layers.Masking(mask_value=0.0)

keras.layers.SpatialDropout1D(rate)
keras.layers.SpatialDropout2D(rate, data_format=None)
keras.layers.SpatialDropout3D(rate, data_format=None)

keras.layers.Conv1D(filters                                ,
                    kernel_size                            ,
                    strides              = 1               ,
                    padding              = 'valid'         ,
                    data_format          = 'channels_last' ,
                    dilation_rate        = 1               ,
                    activation           = None            ,
                    use_bias             = True            ,
                    kernel_initializer   = 'glorot_uniform',
                    bias_initializer     = 'zeros'         ,
                    kernel_regularizer   = None            ,
                    bias_regularizer     = None            ,
                    activity_regularizer = None            ,
                    kernel_constraint    = None            ,
                    bias_constraint      = None            )

keras.layers.Conv2D(filters                                ,
                    kernel_size                            ,
                    strides              = (1, 1)          ,
                    padding              = 'valid'         ,
                    data_format          = None            ,
                    dilation_rate        = (1, 1)          ,
                    activation           = None            ,
                    use_bias             = True            ,
                    kernel_initializer   = 'glorot_uniform',
                    bias_initializer     = 'zeros'         ,
                    kernel_regularizer   = None            ,
                    bias_regularizer     = None            ,
                    activity_regularizer = None            ,
                    kernel_constraint    = None            ,
                    bias_constraint      = None            )

keras.layers.SeparableConv1D(filters,
                             kernel_size                             ,
                             strides               = 1               ,
                             padding               = 'valid'         ,
                             data_format           = 'channels_last' ,
                             dilation_rate         = 1               ,
                             depth_multiplier      = 1               ,
                             activation            = None            ,
                             use_bias              = True            ,
                             depthwise_initializer = 'glorot_uniform',
                             pointwise_initializer = 'glorot_uniform',
                             bias_initializer      = 'zeros'         ,
                             depthwise_regularizer = None            ,
                             pointwise_regularizer = None            ,
                             bias_regularizer      = None            ,
                             activity_regularizer  = None            ,
                             depthwise_constraint  = None            ,
                             pointwise_constraint  = None            ,
                             bias_constraint       = None            )

keras.layers.SeparableConv2D(filters,
                             kernel_size                             ,
                             strides               = (1, 1)          ,
                             padding               = 'valid'         ,
                             data_format           = None            ,
                             dilation_rate         = (1, 1)          ,
                             depth_multiplier      = 1               ,
                             activation            = None            ,
                             use_bias              = True            ,
                             depthwise_initializer = 'glorot_uniform',
                             pointwise_initializer = 'glorot_uniform',
                             bias_initializer      = 'zeros'         ,
                             depthwise_regularizer = None            ,
                             pointwise_regularizer = None            ,
                             bias_regularizer      = None            ,
                             activity_regularizer  = None            ,
                             depthwise_constraint  = None            ,
                             pointwise_constraint  = None            ,
                             bias_constraint       = None            )

keras.layers.DepthwiseConv2D(kernel_size                             ,
                             strides               = (1, 1)          ,
                             padding               = 'valid'         ,
                             depth_multiplier      = 1               ,
                             data_format           = None            ,
                             activation            = None            ,
                             use_bias              = True            ,
                             depthwise_initializer = 'glorot_uniform',
                             bias_initializer      = 'zeros'         ,
                             depthwise_regularizer = None            ,
                             bias_regularizer      = None            ,
                             activity_regularizer  = None            ,
                             depthwise_constraint  = None            ,
                             bias_constraint       = None            )

keras.layers.Conv2DTranspose(filters                                ,
                             kernel_size                            ,
                             strides              = (1, 1)          ,
                             padding              = 'valid'         ,
                             output_padding       = None            ,
                             data_format          = None            ,
                             dilation_rate        = (1, 1)          ,
                             activation           = None            ,
                             use_bias             = True            ,
                             kernel_initializer   = 'glorot_uniform',
                             bias_initializer     = 'zeros'         ,
                             kernel_regularizer   = None            ,
                             bias_regularizer     = None            ,
                             activity_regularizer = None            ,
                             kernel_constraint    = None            ,
                             bias_constraint      = None            )

keras.layers.Conv3D(filters,
                    kernel_size                            ,
                    strides              = (1, 1, 1)       ,
                    padding              = 'valid'         ,
                    data_format          = None            ,
                    dilation_rate        = (1, 1, 1)       ,
                    activation           = None            ,
                    use_bias             = True            ,
                    kernel_initializer   = 'glorot_uniform',
                    bias_initializer     = 'zeros'         ,
                    kernel_regularizer   = None            ,
                    bias_regularizer     = None            ,
                    activity_regularizer = None            ,
                    kernel_constraint    = None            ,
                    bias_constraint      = None            )

keras.layers.Conv3DTranspose(filters,
                             kernel_size                            ,
                             strides              = (1, 1, 1)       ,
                             padding              = 'valid'         ,
                             output_padding       = None            ,
                             data_format          = None            ,
                             activation           = None            ,
                             use_bias             = True            ,
                             kernel_initializer   = 'glorot_uniform',
                             bias_initializer     = 'zeros'         ,
                             kernel_regularizer   = None            ,
                             bias_regularizer     = None            ,
                             activity_regularizer = None            ,
                             kernel_constraint    = None            ,
                             bias_constraint      = None            )

keras.layers.Cropping1D(cropping=(1, 1)                                    )
keras.layers.Cropping2D(cropping=((0, 0), (0, 0)        ), data_format=None)
keras.layers.Cropping3D(cropping=((1, 1), (1, 1), (1, 1)), data_format=None)

keras.layers.UpSampling1D(size=2)

keras.layers.UpSampling2D(size          = (2, 2)   ,
                          data_format   = None     ,
                          interpolation = 'nearest')

keras.layers.UpSampling3D(size=(2, 2, 2), data_format=None)
keras.layers.ZeroPadding1D(padding=1)
keras.layers.ZeroPadding2D(padding=(1, 1), data_format=None)
keras.layers.ZeroPadding3D(padding=(1, 1, 1), data_format=None)

keras.layers.MaxPooling1D(pool_size   = 2              ,
                          strides     = None           ,
                          padding     = 'valid'        ,
                          data_format = 'channels_last')

keras.layers.MaxPooling2D(pool_size   = (2, 2) ,
                          strides     = None   ,
                          padding     = 'valid',
                          data_format = None   )

keras.layers.MaxPooling3D(pool_size   = (2, 2, 2),
                          strides     = None     ,
                          padding     = 'valid'  ,
                          data_format = None     )

keras.layers.AveragePooling1D(pool_size   = 2              ,
                              strides     = None           ,
                              padding     = 'valid'        ,
                              data_format = 'channels_last')

keras.layers.AveragePooling2D(pool_size   = (2, 2) ,
                              strides     = None   ,
                              padding     = 'valid',
                              data_format = None   )

keras.layers.AveragePooling3D(pool_size   = (2, 2, 2),
                              strides     = None     ,
                              padding     = 'valid'  ,
                              data_format = None     )

keras.layers.GlobalMaxPooling1D    (data_format = 'channels_last')
keras.layers.GlobalAveragePooling1D(data_format = 'channels_last')
keras.layers.GlobalMaxPooling2D    (data_format = None           )
keras.layers.GlobalAveragePooling2D(data_format = None           )
keras.layers.GlobalMaxPooling3D    (data_format = None           )
keras.layers.GlobalAveragePooling3D(data_format = None           )

keras.layers.LocallyConnected1D(filters                                ,
                                kernel_size                            ,
                                strides              = 1               ,
                                padding              = 'valid'         ,
                                data_format          = None            ,
                                activation           = None            ,
                                use_bias             = True            ,
                                kernel_initializer   = 'glorot_uniform',
                                bias_initializer     = 'zeros'         ,
                                kernel_regularizer   = None            ,
                                bias_regularizer     = None            ,
                                activity_regularizer = None            ,
                                kernel_constraint    = None            ,
                                bias_constraint      = None            )

keras.layers.LocallyConnected2D(filters                                ,
                                kernel_size                            ,
                                strides              = (1, 1)          ,
                                padding              = 'valid'         ,
                                data_format          = None            ,
                                activation           = None            ,
                                use_bias             = True            ,
                                kernel_initializer   = 'glorot_uniform',
                                bias_initializer     = 'zeros'         ,
                                kernel_regularizer   = None            ,
                                bias_regularizer     = None            ,
                                activity_regularizer = None            ,
                                kernel_constraint    = None            ,
                                bias_constraint      = None            )

keras.layers.RNN(cell                    ,
                 return_sequences = False,
                 return_state     = False,
                 go_backwards     = False,
                 stateful         = False,
                 unroll           = False)

keras.layers.SimpleRNN(units                                   ,
                       activation            = 'tanh'          ,
                       use_bias              = True            ,
                       kernel_initializer    = 'glorot_uniform',
                       recurrent_initializer = 'orthogonal'    ,
                       bias_initializer      = 'zeros'         ,
                       kernel_regularizer    = None            ,
                       recurrent_regularizer = None            ,
                       bias_regularizer      = None            ,
                       activity_regularizer  = None            ,
                       kernel_constraint     = None            ,
                       recurrent_constraint  = None            ,
                       bias_constraint       = None            ,
                       dropout               = 0.0             ,
                       recurrent_dropout     = 0.0             ,
                       return_sequences      = False           ,
                       return_state          = False           ,
                       go_backwards          = False           ,
                       stateful              = False           ,
                       unroll                = False           )

keras.layers.GRU(units                                   ,
                 activation            = 'tanh'          ,
                 recurrent_activation  = 'hard_sigmoid'  ,
                 use_bias              = True            ,
                 kernel_initializer    = 'glorot_uniform',
                 recurrent_initializer = 'orthogonal'    ,
                 bias_initializer      = 'zeros'         ,
                 kernel_regularizer    = None            ,
                 recurrent_regularizer = None            ,
                 bias_regularizer      = None            ,
                 activity_regularizer  = None            ,
                 kernel_constraint     = None            ,
                 recurrent_constraint  = None            ,
                 bias_constraint       = None            ,
                 dropout               = 0.0             ,            
                 recurrent_dropout     = 0.0             ,
                 implementation        = 1               ,
                 return_sequences      = False           ,
                 return_state          = False           ,
                 go_backwards          = False           ,
                 stateful              = False           ,
                 unroll                = False           ,
                 reset_after           = False           )

keras.layers.LSTM(units                                   ,
                  activation            = 'tanh'          ,
                  recurrent_activation  = 'hard_sigmoid'  ,
                  use_bias              = True            ,
                  kernel_initializer    = 'glorot_uniform',
                  recurrent_initializer = 'orthogonal'    ,
                  bias_initializer      = 'zeros'         ,
                  unit_forget_bias      = True            ,
                  kernel_regularizer    = None            ,
                  recurrent_regularizer = None            ,
                  bias_regularizer      = None            ,
                  activity_regularizer  = None            ,
                  kernel_constraint     = None            ,
                  recurrent_constraint  = None            ,
                  bias_constraint       = None            ,
                  dropout               = 0.0             ,
                  recurrent_dropout     = 0.0             ,
                  implementation        = 1               ,
                  return_sequences      = False           ,
                  return_state          = False           ,
                  go_backwards          = False           ,
                  stateful              = False           ,
                  unroll                = False           )

keras.layers.ConvLSTM2D(filters                                 ,
                        kernel_size                             ,
                        strides               = (1, 1)          ,
                        padding               = 'valid'         ,
                        data_format           = None            ,
                        dilation_rate         = (1, 1)          ,
                        activation            = 'tanh'          ,
                        recurrent_activation  = 'hard_sigmoid'  ,
                        use_bias              = True            ,
                        kernel_initializer    = 'glorot_uniform',
                        recurrent_initializer = 'orthogonal'    ,
                        bias_initializer      = 'zeros'         ,
                        unit_forget_bias      = True            ,
                        kernel_regularizer    = None            ,
                        recurrent_regularizer = None            ,
                        bias_regularizer      = None            ,
                        activity_regularizer  = None            ,
                        kernel_constraint     = None            ,
                        recurrent_constraint  = None            ,
                        bias_constraint       = None            ,
                        return_sequences      = False           ,
                        go_backwards          = False           ,
                        stateful              = False           ,
                        dropout               = 0.0             ,
                        recurrent_dropout     = 0.0             )

keras.layers.SimpleRNNCell(units                                   ,
                           activation            = 'tanh'          ,
                           use_bias              = True            ,
                           kernel_initializer    = 'glorot_uniform',
                           recurrent_initializer = 'orthogonal'    ,
                           bias_initializer      = 'zeros'         ,
                           kernel_regularizer    = None            ,
                           recurrent_regularizer = None            ,
                           bias_regularizer      = None            ,
                           kernel_constraint     = None            ,
                           recurrent_constraint  = None            ,
                           bias_constraint       = None            ,
                           dropout               = 0.0             ,
                           recurrent_dropout     = 0.0             )

keras.layers.GRUCell(units                                   ,
                     activation            = 'tanh'          ,
                     recurrent_activation  = 'hard_sigmoid'  ,
                     use_bias              = True            ,
                     kernel_initializer    = 'glorot_uniform',
                     recurrent_initializer = 'orthogonal'    ,
                     bias_initializer      = 'zeros'         ,
                     kernel_regularizer    = None            ,
                     recurrent_regularizer = None            ,
                     bias_regularizer      = None            ,
                     kernel_constraint     = None            ,
                     recurrent_constraint  = None            ,
                     bias_constraint       = None            ,
                     dropout               = 0.0             ,
                     recurrent_dropout     = 0.0             ,
                     implementation        = 1               ,
                     reset_after           = False           )

keras.layers.LSTMCell(units                                   ,
                      activation            = 'tanh'          ,
                      recurrent_activation  = 'hard_sigmoid'  ,
                      use_bias              = True            ,
                      kernel_initializer    = 'glorot_uniform',
                      recurrent_initializer = 'orthogonal'    ,
                      bias_initializer      = 'zeros'         ,
                      unit_forget_bias      = True            ,
                      kernel_regularizer    = None            ,
                      recurrent_regularizer = None            ,
                      bias_regularizer      = None            ,
                      kernel_constraint     = None            ,
                      recurrent_constraint  = None            ,
                      bias_constraint       = None            ,
                      dropout               = 0.0             ,
                      recurrent_dropout     = 0.0             ,
                      implementation        = 1               )

keras.layers.CuDNNGRU(units                                   ,
                      kernel_initializer    = 'glorot_uniform',
                      recurrent_initializer = 'orthogonal'    ,
                      bias_initializer      = 'zeros'         ,
                      kernel_regularizer    = None            ,
                      recurrent_regularizer = None            ,
                      bias_regularizer      = None            ,
                      activity_regularizer  = None            ,
                      kernel_constraint     = None            ,
                      recurrent_constraint  = None            ,
                      bias_constraint       = None            ,
                      return_sequences      = False           ,
                      return_state          = False           ,
                      stateful              = False           )

keras.layers.CuDNNLSTM(units                                   ,
                       kernel_initializer    = 'glorot_uniform',
                       recurrent_initializer = 'orthogonal'    ,
                       bias_initializer      = 'zeros'         ,
                       unit_forget_bias      = True            ,
                       kernel_regularizer    = None            ,
                       recurrent_regularizer = None            ,
                       bias_regularizer      = None            ,
                       activity_regularizer  = None            ,
                       kernel_constraint     = None            ,
                       recurrent_constraint  = None            ,
                       bias_constraint       = None            ,
                       return_sequences      = False           ,
                       return_state          = False           ,
                       stateful              = False           )

keras.layers.Embedding(input_dim                         ,
                       output_dim                        ,
                       embeddings_initializer = 'uniform',
                       embeddings_regularizer = None     ,
                       activity_regularizer   = None     ,
                       embeddings_constraint  = None     ,
                       mask_zero              = False    ,
                       input_length           = None     )

keras.layers.LeakyReLU(alpha=0.3)

keras.layers.PReLU(alpha_initializer = 'zeros',
                   alpha_regularizer = None   ,
                   alpha_constraint  = None   ,
                   shared_axes       = None   )

keras.layers.ELU            (alpha     = 1.0 )
keras.layers.ThresholdedReLU(theta     = 1.0 )
keras.layers.Softmax        (axis      = -1  )
keras.layers.ReLU           (max_value = None,negative_slope=0.0,threshold=0.0)

keras.layers.BatchNormalization(axis                        = -1     ,
                                momentum                    = 0.99   ,
                                epsilon                     = 0.001  ,
                                center                      = True   ,
                                scale                       = True   ,
                                beta_initializer            = 'zeros',
                                gamma_initializer           = 'ones' ,
                                moving_mean_initializer     = 'zeros',
                                moving_variance_initializer = 'ones' ,
                                beta_regularizer            = None   ,
                                gamma_regularizer           = None   ,
                                beta_constraint             = None   ,
                                gamma_constraint            = None   )

keras.layers.GaussianNoise  (stddev)
keras.layers.GaussianDropout(rate  )
keras.layers.AlphaDropout   (rate  , noise_shape=None, seed=None)
keras.layers.TimeDistributed(layer )

####
keras.layers.Add     ()
keras.layers.Subtract()
keras.layers.Multiply()
keras.layers.Average ()
keras.layers.Maximum ()

keras.layers.Concatenate(axis = -1)
keras.layers.Dot        (axes     , normalize=False)

####
keras.layers.add        (inputs)
keras.layers.subtract   (inputs)
keras.layers.multiply   (inputs)
keras.layers.average    (inputs)
keras.layers.maximum    (inputs)
keras.layers.concatenate(inputs, axis = -1)
keras.layers.dot        (inputs, axes     , normalize = False)
####
layer.get_weights()
layer.set_weights(weights)
layer.get_config()
layer.input
layer.output
layer.input_shape
layer.output_shape
layer.get_input_at       (node_index)
layer.get_output_at      (node_index)
layer.get_input_shape_at (node_index)
layer.get_output_shape_at(node_index)

Reference: keras.io

TensorFlow Keras Input


####
mnist = tf.keras.datasets.mnist
(x_train, y_train),(x_test, y_test) = mnist.load_data()
####
fashion_mnist = keras.datasets.fashion_mnist
(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
####
imdb = keras.datasets.imdb
(train_data, train_labels),
(test_data , test_labels ) = imdb.load_data(num_words=10000)
####
dataset_path = keras.utils.get_file(
                 "auto-mpg.data",
                 "https://archive.ics.uci.edu/ml/" +
                   "machine-learning-databases/auto-mpg/auto-mpg.data")
column_names = ['MPG'   , 'Cylinders'   , 'Displacement', 'Horsepower',
                'Weight', 'Acceleration', 'Model Year'  , 'Origin'    ]

raw_dataset = pd.read_csv(dataset_path      , names            = column_names,
                          na_values    = "?", comment          = '\t'        ,
                          sep          = " ", skipinitialspace = True        )
####
from keras.datasets import cifar10
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
####
from keras.datasets import cifar100
(x_train, y_train), (x_test, y_test) = cifar100.load_data(label_mode='fine')
####
from keras.datasets import reuters
(x_train, y_train), (x_test, y_test) = reuters.load_data(
                                                 path       = "reuters.npz",
                                                 num_words  = None         ,
                                                 skip_top   = 0            ,
                                                 maxlen     = None         ,
                                                 test_split = 0.2          ,
                                                 seed       = 113          ,
                                                 start_char = 1            ,
                                                 oov_char   = 2            ,
                                                 index_from = 3            )
####
from keras.datasets import boston_housing
(x_train, y_train), (x_test, y_test) = boston_housing.load_data()

Reference: keras.io

Sunday 13 January 2019

Tensor Broadcasting, Adding, Multiplying, Reshaping

Paste the code below (but not the output section) into a Colab (https:\\colab.research.google.com\) Jupyter Notebook,if you would like to execute it.
-------------------------------------------------------------------------------------------------


# Broadcasting - enlarging a tensor to match another one, but repeating data.
###############################################################################
import numpy as np

x1 = np.array([1,2,3])

print('x1: '); print (x1)
print('x1.ndim:  ', x1.ndim)
print('x1.shape: ', x1.shape)

x2 = np.array([[1,2,3],[4,5,6]])
print('x2: ') ; print (x2)
print('x2.ndim:  ', x2.ndim)
print('x2.shape: ', x2.shape)

x3 = x1 + x2

print('x3: '); print(x3)
print('x3.ndim:  ', x3.ndim)
print('x3.shape: ', x3.shape)

# Vector Dot Product
###############################################################################
v1 = np.array([1,2,3])
v2 = np.array([1,2,3])
v3 = v1.dot(v2)
print('v3: ', v3)

# Matrix - Vector Multiplication
###############################################################################
m1 = np.array([[1,2,3],[4,5,6],[7,8,9]])
v1 = np.array([10,11,12])
v2 = m1.dot(v1)
print('v2: ', v2)

# Tensor Multiplication
###############################################################################
t1 = np.array([[1,2,3],[4,5,6],[7,8,9]])
t2 = np.array([[10,11,12],[13,14,15],[16,17,18]])
t3 = t1.dot(t2)
print('t3: '); print(t3)

# Reshaping Tensors
###############################################################################
print("t3.shape: ", t3.shape)
t4 = t3.reshape(1,9)
print("t4"); print(t4)
print("t4.shape: ", t4.shape)
t5 = t4.reshape(9,)
print("t5"); print(t5)
print("t5.shape: ", t5.shape)
t6 = t4.reshape(9,1)
print("t6"); print(t6)
print("t6.shape: ", t6.shape)

-------------------------------------------------------------------------------
-- Output
-------------------------------------------------------------------------------
1: 
[1 2 3]
x1.ndim:   1
x1.shape:  (3,)
x2: 
[[1 2 3]
 [4 5 6]]
x2.ndim:   2
x2.shape:  (2, 3)
x3: 
[[2 4 6]
 [5 7 9]]
x3.ndim:   2
x3.shape:  (2, 3)
v3:  14
v2:  [ 68 167 266]
t3: 
[[ 84  90  96]
 [201 216 231]
 [318 342 366]]
t3.shape:  (3, 3)
t4
[[ 84  90  96 201 216 231 318 342 366]]
t4.shape:  (1, 9)
t5
[ 84  90  96 201 216 231 318 342 366]
t5.shape:  (9,)
t6
[[ 84]
 [ 90]
 [ 96]
 [201]
 [216]
 [231]
 [318]
 [342]
 [366]]
t6.shape:  (9, 1)

Wednesday 9 January 2019

NumPy Scalars, Vectors, Matrices, Tensors Examples

Paste the code below (but not the output section) into a Colab (https:\\colab.research.google.com\) Jupyter Notebook,if you would like to execute it.
-------------------------------------------------------------------------------------------------

import numpy as np

# Numpy uses array() to create objects of ndarray type.

###########################################
# Vectors
#   0-Dimensions (aka the origin of a Vector Space)
v0 = np.array(0) # also [0.], [0., 0.], [0., 0., 0.], ...
print('v0      : ', v0      )
print('v0.shape: ', v0.shape)
print('v0.ndim : ', v0.ndim )
#   1-Dimension  (aka Scalar)
v1 = np.array([1.])
print('v1      : ', v1      )
print('v1.shape: ', v1.shape)
print('v1.ndim : ', v1.ndim )
#   2-Dimensions
v2 = np.array([1., 2.])
print('v2      : ', v2      )
print('v2.shape: ', v2.shape)
print('v2.ndim : ', v2.ndim )
#   3-Dimensions
v3 = np.array([1., 2., 3.])
print('v3.shape: ', v3.shape)
print('v3.ndim : ', v3.ndim )
print('v3      : ', v3      )
#   4-Dimensions
v4 = np.array([1., 2., 3., 4.])
print('v4      : ', v4      )
print('v4.shape: ', v4.shape)
print('v4.ndim : ', v4.ndim )

###########################################
# Matrices
#   0-Dimensions (aka Scalar)
m0 = np.array(4.)
print('m0      : ', m0      )
print('m0.shape: ', m0.shape)
print('m0.ndim : ', m0.ndim )
#   1-Dimension (aka Scalar (if one coordinate),
#                aka Vector (if more than one coordinate))
m1 = np.array([1.])
print('m1      : ', m1      )
print('m1.shape: ', m1.shape)
print('m1.ndim : ', m1.ndim )
#   2-Dimensions
m2 = np.array([[1., 2.],
               [3., 4.]])
print('m2      : ', m2      )
print('m2.shape: ', m2.shape)
print('m2.ndim : ', m2.ndim )
#   3-Dimensions
m3 = np.array([[1., 2., 3.],
               [4., 5., 6.],
               [7., 8., 9.]])
print('m3      : ', m3      )
print('m3.shape: ', m3.shape)
print('m3.ndim : ', m3.ndim )
#   4-Dimensions
m4 = np.array([[ 1.,  2.,  3.,  4.],
               [ 5.,  6.,  7.,  8.],
               [ 9., 10., 11., 12.],
               [13., 14., 15., 16.]])
print('m4      : ', m4      )
print('m4.shape: ', m4.shape)
print('m4.ndim : ', m4.ndim )

# Tensors
###########################################
#   0-Dimensions (aka Scalar)
t0 = np.array(4.)
print('t0      : ', t0      )
print('t0.shape: ', t0.shape)
print('t0.ndim : ', t0.ndim )
#   1-Dimension
t1 = np.array([1., 2., 3.])
print('t1      : ', t1      )
print('t1.shape: ', t1.shape)
print('t1.ndim : ', t1.ndim )
#   2-Dimensions
t2 = np.array([[1., 2.],
               [3., 4.]])
print('t2      : ', t2      )
print('t2.shape: ', t2.shape)
print('t2.ndim : ', t2.ndim )
#   3-Dimensions
t3 = np.array([[[ 1.,  2.,  3.],
                [ 4.,  5.,  6.],
                [ 7.,  8.,  9.]],
               [[10., 11., 12.],
                [13., 14., 15.],
                [16., 17., 18.]],
               [[19., 20., 21.],
                [22., 23., 24.],
                [25., 26., 27.]]])
print('t3      : ', t3      )
print('t3.shape: ', t3.shape)
print('t3.ndim : ', t3.ndim )
#   4-Dimensions
t4 = np.array([[[[  1.,   2.,   3.,   4.],
                 [  5.,   6.,   7.,   8.],
                 [  9.,  10.,  11.,  12.],
                 [ 13.,  14.,  15.,  16.]],
                [[ 17.,  18.,  19.,  20.],
                 [ 21.,  22.,  23.,  24.],
                 [ 25.,  26.,  27.,  28.],
                 [ 29.,  30.,  31.,  32.]],
                [[ 33.,  34.,  35.,  36.],
                 [ 37.,  38.,  39.,  40.],
                 [ 41.,  42.,  43.,  44.],
                 [ 45.,  46.,  47.,  48.]],
                [[ 49.,  50.,  51.,  52.],
                 [ 53.,  54.,  55.,  56.],
                 [ 57.,  58.,  59.,  60.],
                 [ 61.,  62.,  63.,  64.]]],
               [[[ 65.,  66.,  67.,  68.],
                 [ 69.,  70.,  71.,  72.],
                 [ 73.,  74.,  75.,  76.],
                 [ 77.,  78.,  79.,  80.]],
                [[ 81.,  82.,  83.,  84.],
                 [ 85.,  86.,  87.,  88.],
                 [ 89.,  90.,  91.,  92.],
                 [ 93.,  94.,  95.,  96.]],
                [[ 97.,  98.,  99., 100.],
                 [101., 102., 103., 104.],
                 [105., 106., 107., 108.],
                 [109., 110., 111., 112.]],
                [[113., 114., 115., 116.],
                 [117., 118., 119., 120.],
                 [121., 122., 123., 124.],
                 [125., 126., 127., 128.]]],
               [[[129., 130., 131., 132.],
                 [133., 134., 135., 136.],
                 [137., 138., 139., 140.],
                 [141., 142., 143., 144.]],
                [[145., 146., 147., 148.],
                 [149., 150., 151., 152.],
                 [153., 154., 155., 156.],
                 [157., 158., 159., 160.]],
                [[161., 162., 163., 164.],
                 [165., 166., 167., 168.],
                 [169., 170., 171., 172.],
                 [173., 174., 175., 176.]],
                [[177., 178., 179., 180.],
                 [181., 182., 183., 184.],
                 [185., 186., 187., 188.],
                 [189., 190., 191., 192.]]],
               [[[193., 194., 195., 196.],
                 [197., 198., 199., 200.],
                 [201., 202., 203., 204.],
                 [205., 206., 207., 208.]],
                [[209., 210., 211., 212.],
                 [213., 214., 215., 216.],
                 [217., 218., 219., 220.],
                 [221., 222., 223., 224.]],
                [[225., 226., 227., 228.],
                 [229., 230., 231., 232.],
                 [233., 234., 235., 236.],
                 [237., 238., 239., 240.]],
                [[241., 242., 243., 244.],
                 [245., 246., 247., 248.],
                 [249., 250., 251., 252.],
                 [253., 254., 255., 256.]]]])
print('t4      : ', t4      )
print('t4.shape: ', t4.shape)
print('t4.ndim : ', t4.ndim )

# Note how the data structures are like: Javascript, Perl, and JSON.
------------------------------------------------------------------------------------------------------
Output
------------------------------------------------------------------------------------------------------

v0      :  0
v0.shape:  ()
v0.ndim :  0
v1      :  [1.]
v1.shape:  (1,)
v1.ndim :  1
v2      :  [1. 2.]
v2.shape:  (2,)
v2.ndim :  1
v3.shape:  (3,)
v3.ndim :  1
v3      :  [1. 2. 3.]
v4      :  [1. 2. 3. 4.]
v4.shape:  (4,)
v4.ndim :  1
m0      :  4.0
m0.shape:  ()
m0.ndim :  0
m1      :  [1.]
m1.shape:  (1,)
m1.ndim :  1
m2      :  [[1. 2.]
 [3. 4.]]
m2.shape:  (2, 2)
m2.ndim :  2
m3      :  [[1. 2. 3.]
 [4. 5. 6.]
 [7. 8. 9.]]
m3.shape:  (3, 3)
m3.ndim :  2
m4      :  [[ 1.  2.  3.  4.]
 [ 5.  6.  7.  8.]
 [ 9. 10. 11. 12.]
 [13. 14. 15. 16.]]
m4.shape:  (4, 4)
m4.ndim :  2
t0      :  4.0
t0.shape:  ()
t0.ndim :  0
t1      :  [1. 2. 3.]
t1.shape:  (3,)
t1.ndim :  1
t2      :  [[1. 2.]
 [3. 4.]]
t2.shape:  (2, 2)
t2.ndim :  2
t3      :  [[[ 1.  2.  3.]
  [ 4.  5.  6.]
  [ 7.  8.  9.]]

 [[10. 11. 12.]
  [13. 14. 15.]
  [16. 17. 18.]]

 [[19. 20. 21.]
  [22. 23. 24.]
  [25. 26. 27.]]]
t3.shape:  (3, 3, 3)
t3.ndim :  3
t4      :  [[[[  1.   2.   3.   4.]
   [  5.   6.   7.   8.]
   [  9.  10.  11.  12.]
   [ 13.  14.  15.  16.]]

  [[ 17.  18.  19.  20.]
   [ 21.  22.  23.  24.]
   [ 25.  26.  27.  28.]
   [ 29.  30.  31.  32.]]

  [[ 33.  34.  35.  36.]
   [ 37.  38.  39.  40.]
   [ 41.  42.  43.  44.]
   [ 45.  46.  47.  48.]]

  [[ 49.  50.  51.  52.]
   [ 53.  54.  55.  56.]
   [ 57.  58.  59.  60.]
   [ 61.  62.  63.  64.]]]


 [[[ 65.  66.  67.  68.]
   [ 69.  70.  71.  72.]
   [ 73.  74.  75.  76.]
   [ 77.  78.  79.  80.]]

  [[ 81.  82.  83.  84.]
   [ 85.  86.  87.  88.]
   [ 89.  90.  91.  92.]
   [ 93.  94.  95.  96.]]

  [[ 97.  98.  99. 100.]
   [101. 102. 103. 104.]
   [105. 106. 107. 108.]
   [109. 110. 111. 112.]]

  [[113. 114. 115. 116.]
   [117. 118. 119. 120.]
   [121. 122. 123. 124.]
   [125. 126. 127. 128.]]]


 [[[129. 130. 131. 132.]
   [133. 134. 135. 136.]
   [137. 138. 139. 140.]
   [141. 142. 143. 144.]]

  [[145. 146. 147. 148.]
   [149. 150. 151. 152.]
   [153. 154. 155. 156.]
   [157. 158. 159. 160.]]

  [[161. 162. 163. 164.]
   [165. 166. 167. 168.]
   [169. 170. 171. 172.]
   [173. 174. 175. 176.]]

  [[177. 178. 179. 180.]
   [181. 182. 183. 184.]
   [185. 186. 187. 188.]
   [189. 190. 191. 192.]]]


 [[[193. 194. 195. 196.]
   [197. 198. 199. 200.]
   [201. 202. 203. 204.]
   [205. 206. 207. 208.]]

  [[209. 210. 211. 212.]
   [213. 214. 215. 216.]
   [217. 218. 219. 220.]
   [221. 222. 223. 224.]]

  [[225. 226. 227. 228.]
   [229. 230. 231. 232.]
   [233. 234. 235. 236.]
   [237. 238. 239. 240.]]

  [[241. 242. 243. 244.]
   [245. 246. 247. 248.]
   [249. 250. 251. 252.]
   [253. 254. 255. 256.]]]]
t4.shape:  (4, 4, 4, 4)
t4.ndim :  4