—– for keras —–
open anaconda promt
pip install keras
# CPU-only version of TensorFlow
pip install --upgrade tensorflow
—– for Visualizing Model Structures in Keras ——–
download graphviz and install in windows
open anaconda promt
conda install graphviz
conda install pip # update pip in conda
pip install graphviz # need both conda and pip install for graphviz to work
conda install pydot
cheet sheet from datacamp : http://datacamp-community.s3.amazonaws.com/94fc681d-5422-40cb-a129-2218e9522f17?imm_mid=0f769c&cmp=em-data-na-na-newsltr_20171025
#---------------Load Data -----------------
import numpy as np
# fix random seed for reproducibility
np.random.seed(7)
# load pima indians dataset. data is from: http://archive.ics.uci.edu/ml/machine-learning-databases/pima-indians-diabetes/pima-indians-diabetes.data
dataset = np.loadtxt("pima-indians-diabetes.data.txt", delimiter=",")
print(dataset[0:3])
# split into input (X) and output (Y) variables
## [[ 6. 148. 72. 35. 0. 33.6 0.627 50. 1. ]
## [ 1. 85. 66. 29. 0. 26.6 0.351 31. 0. ]
## [ 8. 183. 64. 0. 0. 23.3 0.672 32. 1. ]]
X = dataset[:,0:8]
Y = dataset[:,8]
#-------------- start Keras ----------------------
# install theano (optional)
# import theano
# install tensorflow
import tensorflow
# tensorflow is used as default backend
## C:\Users\emiewag\AppData\Local\CONTIN~1\ANACON~1\lib\site-packages\h5py\__init__.py:34: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
## from ._conv import register_converters as _register_converters
import keras
## Using TensorFlow backend.
from keras.models import Sequential
from keras.layers import Dense
# ------------- create model ------------------------
# create a Sequential model and add layers one at a time until we are happy with our network topology
model = Sequential()
# input_dim argument and setting it to 8 for the 8 input variables.
# The first layer has 12 neurons and expects 8 input variables. The second hidden layer has 8 neurons and finally, the output layer has 1 neuron to predict the class (onset of diabetes or not).
# will use the rectifier ('relu') activation function on the first few layers and the sigmoid function in the output layer.
# initialize network weights : random number generated from a 'uniform' distribution between 0 and 0.05 as default. Another traditional alternative would be 'normal' for small random numbers generated from a Gaussian distribution.
model.add(Dense(12, input_dim=8, activation='relu'))
model.add(Dense(8, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
# ------------- Compile model -----------------------
# Compiling the model uses the efficient numerical libraries under the covers (the so-called backend) such as Theano or TensorFlow
# use logarithmic loss, which for a binary classification problem is defined in Keras as 'binary_crossentropy'.
# Use the efficient gradient descent algorithm 'adam' for no other reason that it is an efficient default.
# because it is a classification problem, we will collect and report the classification accuracy as the metric.
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# -------------- Fit the model ------------------------
# train or fit our model on our loaded data by calling the fit() function on the model.
# epochs = a fixed number of iterations through the dataset
# batch_size = the number of instances for each iterations/batch
# these can be chosen experimentally by trial and error.
model.fit(X, Y, epochs=10, batch_size=20)
# ------------- evaluate the model ----------------------
# evaluate() function on your model and pass it the same input and output used to train the model.
## Epoch 1/10
##
## 20/768 [..............................] - ETA: 20s - loss: 3.2236 - acc: 0.8000
## 580/768 [=====================>........] - ETA: 0s - loss: 5.0072 - acc: 0.6414
## 768/768 [==============================] - 1s 805us/step - loss: 4.5111 - acc: 0.6406
## Epoch 2/10
##
## 20/768 [..............................] - ETA: 0s - loss: 2.6817 - acc: 0.6000
## 680/768 [=========================>....] - ETA: 0s - loss: 1.7045 - acc: 0.5324
## 768/768 [==============================] - 0s 81us/step - loss: 1.6698 - acc: 0.5456
## Epoch 3/10
##
## 20/768 [..............................] - ETA: 0s - loss: 1.3325 - acc: 0.6000
## 640/768 [========================>.....] - ETA: 0s - loss: 0.9604 - acc: 0.5781
## 768/768 [==============================] - 0s 85us/step - loss: 0.9556 - acc: 0.5794
## Epoch 4/10
##
## 20/768 [..............................] - ETA: 0s - loss: 0.7035 - acc: 0.7000
## 700/768 [==========================>...] - ETA: 0s - loss: 0.7773 - acc: 0.6400
## 768/768 [==============================] - 0s 73us/step - loss: 0.7832 - acc: 0.6419
## Epoch 5/10
##
## 20/768 [..............................] - ETA: 0s - loss: 0.5461 - acc: 0.7000
## 660/768 [========================>.....] - ETA: 0s - loss: 0.7391 - acc: 0.6591
## 768/768 [==============================] - 0s 82us/step - loss: 0.7359 - acc: 0.6562
## Epoch 6/10
##
## 20/768 [..............................] - ETA: 0s - loss: 0.6225 - acc: 0.7000
## 640/768 [========================>.....] - ETA: 0s - loss: 0.6922 - acc: 0.6609
## 768/768 [==============================] - 0s 82us/step - loss: 0.7208 - acc: 0.6484
## Epoch 7/10
##
## 20/768 [..............................] - ETA: 0s - loss: 0.7864 - acc: 0.6500
## 700/768 [==========================>...] - ETA: 0s - loss: 0.6825 - acc: 0.6729
## 768/768 [==============================] - 0s 78us/step - loss: 0.7057 - acc: 0.6680
## Epoch 8/10
##
## 20/768 [..............................] - ETA: 0s - loss: 0.7816 - acc: 0.5000
## 720/768 [===========================>..] - ETA: 0s - loss: 0.6735 - acc: 0.6792
## 768/768 [==============================] - 0s 73us/step - loss: 0.6699 - acc: 0.6849
## Epoch 9/10
##
## 20/768 [..............................] - ETA: 0s - loss: 0.6021 - acc: 0.6500
## 700/768 [==========================>...] - ETA: 0s - loss: 0.6597 - acc: 0.6843
## 768/768 [==============================] - 0s 77us/step - loss: 0.6582 - acc: 0.6823
## Epoch 10/10
##
## 20/768 [..............................] - ETA: 0s - loss: 0.4144 - acc: 0.9000
## 660/768 [========================>.....] - ETA: 0s - loss: 0.6950 - acc: 0.6500
## 768/768 [==============================] - 0s 85us/step - loss: 0.6749 - acc: 0.6615
scores = model.evaluate(X, Y)
##
## 32/768 [>.............................] - ETA: 0s
## 768/768 [==============================] - 0s 72us/step
print("\n%s: %.2f%%" % (model.metrics_names[1], scores[1]*100))
# ----------- predictions ------------------------------
##
## acc: 66.93%
predictions = model.predict(X)
# round predictions
rounded = [round(x[0]) for x in predictions]
print(rounded)
#------------ for Visualizing Model Structures ---------------
## [1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0]
import graphviz
# set Graphviz path
import os
os.environ["PATH"] += os.pathsep + 'C:/Program Files (x86)/Graphviz2.38/bin/'
from keras.utils import plot_model
### Build, Load, and Compile your model
plot_model(model, to_file='model.png', show_shapes=True, show_layer_names=True)
# ------------ Model weights -----------------------------
# get weights and biases of all layers
for layer in model.layers:
print(layer.get_config(), layer.get_weights(), '\n')
# for the weights directly returned as numpy arrays
# The first output is the weight matrix W and the second output is the bias vector b
## {'name': 'dense_1', 'trainable': True, 'batch_input_shape': (None, 8), 'dtype': 'float32', 'units': 12, 'activation': 'relu', 'use_bias': True, 'kernel_initializer': {'class_name': 'VarianceScaling', 'config': {'scale': 1.0, 'mode': 'fan_avg', 'distribution': 'uniform', 'seed': None}}, 'bias_initializer': {'class_name': 'Zeros', 'config': {}}, 'kernel_regularizer': None, 'bias_regularizer': None, 'activity_regularizer': None, 'kernel_constraint': None, 'bias_constraint': None} [array([[ 0.16575974, 0.20620143, -0.17271052, -0.03720333, 0.11519576,
## -0.0390908 , -0.51982754, -0.00687332, -0.44080356, -0.36889532,
## -0.01298724, -0.39209265],
## [ 0.32381216, -0.38468117, 0.0174976 , 0.4169618 , 0.064748 ,
## 0.05011656, -0.35489193, 0.01127319, 0.0095771 , -0.08171291,
## 0.15330714, -0.17520434],
## [ 0.11438085, -0.05402106, -0.03977703, 0.15809956, 0.131782 ,
## 0.26808137, 0.26937962, 0.18732068, -0.08781412, 0.18577208,
## -0.19688219, -0.47491354],
## [-0.40879145, -0.12509695, -0.268078 , -0.2221998 , 0.2251741 ,
## -0.05531788, -0.0804837 , 0.18472418, -0.20835055, -0.46185768,
## -0.37344125, 0.24130017],
## [ 0.2788149 , -0.08531788, -0.15224488, -0.11078814, -0.38218138,
## 0.10998064, 0.34780902, -0.21086861, -0.23528254, 0.27542713,
## -0.10793687, -0.12594655],
## [ 0.31953168, -0.20449257, 0.01100794, 0.27368912, -0.16293941,
## 0.57351327, -0.2521692 , 0.17130196, 0.36902002, -0.39782533,
## -0.31027526, 0.14674312],
## [ 0.01215864, 0.07205153, -0.405978 , -0.39890528, 0.4684632 ,
## 0.06731576, 0.25042462, -0.33416185, -0.28935003, -0.03822233,
## 0.23282139, 0.02146828],
## [-0.08508507, -0.4926324 , 0.29535913, 0.45315725, 0.33417013,
## -0.34216788, 0.26227653, -0.27202642, -0.4728874 , 0.54339194,
## 0.2214253 , -0.09248918]], dtype=float32), array([-0.03899603, 0. , 0.01484552, -0.03129452, -0.03993011,
## 0.11839017, -0.01340594, -0.01490992, -0.05586267, 0.05895201,
## 0.02291187, 0. ], dtype=float32)]
##
## {'name': 'dense_2', 'trainable': True, 'units': 8, 'activation': 'relu', 'use_bias': True, 'kernel_initializer': {'class_name': 'VarianceScaling', 'config': {'scale': 1.0, 'mode': 'fan_avg', 'distribution': 'uniform', 'seed': None}}, 'bias_initializer': {'class_name': 'Zeros', 'config': {}}, 'kernel_regularizer': None, 'bias_regularizer': None, 'activity_regularizer': None, 'kernel_constraint': None, 'bias_constraint': None} [array([[ 0.18743241, -0.04185414, 0.37472188, -0.3654538 , -0.21363303,
## 0.07144991, 0.16571039, -0.22500344],
## [-0.4116977 , 0.4476785 , 0.13495606, 0.38070595, -0.16811815,
## -0.5323667 , -0.41471216, 0.49056184],
## [-0.43843648, 0.00932139, 0.0280931 , 0.16815569, -0.56017697,
## 0.44286513, 0.50288963, 0.47923213],
## [-0.50431 , 0.25779474, -0.28521898, 0.13576676, 0.4172268 ,
## 0.31381568, -0.16047421, 0.35722655],
## [-0.4659909 , 0.14185447, -0.14533973, 0.29335946, 0.00883893,
## 0.17242508, -0.49217325, -0.4362761 ],
## [-0.31586087, 0.4919106 , -0.26366618, -0.23576528, 0.39437434,
## 0.04399974, -0.20102444, -0.4256484 ],
## [-0.32226318, -0.39043745, -0.49873388, -0.31297997, -0.01171019,
## -0.1476473 , -0.48390082, -0.12835097],
## [-0.04223084, -0.32753035, -0.33522242, -0.56292784, 0.01525146,
## -0.43774384, 0.15280312, 0.45185062],
## [-0.07699671, -0.31851077, -0.39811963, 0.46872973, 0.3621878 ,
## -0.3311009 , -0.04505533, -0.07634095],
## [-0.11334842, 0.32040858, 0.03670746, 0.19086178, -0.20515129,
## 0.1222926 , -0.43837327, 0.2850056 ],
## [-0.5030543 , -0.2936506 , -0.38313577, -0.52671105, -0.05850805,
## -0.4583947 , -0.4208508 , -0.14294197],
## [ 0.04435921, 0.545004 , 0.07590699, 0.21470094, -0.46099266,
## -0.25307545, -0.31362575, 0.3284188 ]], dtype=float32), array([ 0. , 0.06168259, -0.02383792, -0.00036601, 0.06202728,
## -0.06051792, 0. , 0.00968476], dtype=float32)]
##
## {'name': 'dense_3', 'trainable': True, 'units': 1, 'activation': 'sigmoid', 'use_bias': True, 'kernel_initializer': {'class_name': 'VarianceScaling', 'config': {'scale': 1.0, 'mode': 'fan_avg', 'distribution': 'uniform', 'seed': None}}, 'bias_initializer': {'class_name': 'Zeros', 'config': {}}, 'kernel_regularizer': None, 'bias_regularizer': None, 'activity_regularizer': None, 'kernel_constraint': None, 'bias_constraint': None} [array([[-0.27390796],
## [-0.42916566],
## [-0.18801507],
## [-0.13445202],
## [-0.04280819],
## [ 0.59109074],
## [ 0.42389166],
## [-0.67783976]], dtype=float32), array([-0.0618575], dtype=float32)]
first_layer_weights = model.layers[0].get_weights()[0]
first_layer_biases = model.layers[0].get_weights()[1]
second_layer_weights = model.layers[1].get_weights()[0]
second_layer_biases = model.layers[1].get_weights()[1]
Output image:
reference : http://machinelearningmastery.com/tutorial-first-neural-network-python-keras/
```