CS256
Chris Pollett
Oct 27, 2021
from keras.models import Sequential from keras.layers import Dense, Activation, Dropout from keras import losses from keras import optimizers from keras import metrics import os os.environ['TF_CPP_MIN_LOG_LEVEL']='2' num_classes = 10 #if using MNIST digits 0-9 model = Sequential() model.add(Dense(512, activation = 'relu', input_shape = (784,))) model.add(Dropout(0.2)) model.add(Dense(512, activation = 'relu')) model.add(Dropout(0.2)) model.add(Dense(num_classes, activation = 'softmax')) model.compile(loss = 'mean_squared_error', optimizer = 'sgd', metrics = [metrics.categorical_accuracy])
model.fit(x_train, y_train, batch_size = some_num, epochs = some_num2, validation_data = (x_val, y_val))
import keras import tensorflow as tf from keras.datasets import mnist from keras.models import Sequential from keras.layers import Dense, Dropout from tensorflow.keras.optimizers import RMSprop import numpy as np import os os.environ['TF_CPP_MIN_LOG_LEVEL']='2' (x_train, y_train), (x_test, y_test) = mnist.load_data() # xtrain is originally 28x28 grayscale uint8's between 0-255 x_train = x_train.reshape(60000, 784) x_test = x_test.reshape(10000, 784) x_train = x_train.astype('float32') x_test = x_test.astype('float32') x_train /= 255 x_test /= 255 y_train = tf.keras.utils.to_categorical(y_train, 10) y_test = tf.keras.utils.to_categorical(y_test, 10) model = Sequential() model.add(Dense(512, activation='relu', input_shape = (784,))) model.add(Dropout(0.2)) model.add(Dense(512, activation = 'relu')) model.add(Dropout(0.2)) model.add(Dense(10, activation = 'softmax')) model.compile(loss = 'categorical_crossentropy', optimizer = RMSprop(), metrics = ['accuracy']) # if we don't want to see training progress info set verbose to 0 history = model.fit(x_train, y_train, batch_size = 128, epochs = 20, verbose = 1, validation_data = (x_test, y_test))
Epoch 1/20 469/469 [==============================] - 4s 7ms/step - loss: 0.2480 - accuracy: 0.9228 - val_loss: 0.1335 - val_accuracy: 0.9592 Epoch 2/20 469/469 [==============================] - 3s 7ms/step - loss: 0.1022 - accuracy: 0.9692 - val_loss: 0.1041 - val_accuracy: 0.9686 Epoch 3/20 ... Epoch 20/20 469/469 [==============================] - 3s 7ms/step - loss: 0.0164 - accuracy: 0.9957 - val_loss: 0.1116 - val_accuracy: 0.9843
score = model.evaluate(x_test, y_test) #this will default to verbose = 1, for verbose = 0 score = model.evaluate(x_test, y_test, verbose = 0) print('Test loss:', score[0]) print('Test accuracy:', score[1])
from sklearn.cross_validation import StratifiedKFold #... def create_model(): # code to create model def train_and_evaluate(model, data[train], labels[train], data[test], labels[test)): # code to train and fit model #... x_values, y_values = load_model() five_fold = StratifiedKFold(n_splits=5) for train, test in five_fold.split(x_values, y_values): model = None #get rid of results of previous training model = create_model() running_totals = train_evaluate(model, x_values[train], y_values[train], x_values[test], y_values[test]) #use running totals to compute averages losses and accuracy
>>> from matplotlib import pyplot as plt >>> pixels = xtrain[0].reshape((28,28)) >>> plt.imshow(pixels, cmap='gray') >>> plt.show() #this draw the 0th training item so can see is a 5 >>> x = x_train[0] >>> x = np.expand_dims(x, axis=0) >>> model.predict(x) array([[0.0000000e+00, 4.7968505e-27, 3.7603754e-31, 8.6074351e-07, 0.0000000e+00, 9.9999917e-01, 2.5081769e-35, 2.4804149e-33, 9.4426353e-29, 6.9787635e-27]], dtype=float32)Notice 5 is also has predicted with the highest probability above.
model.save("my_model_path") #this would save in tensorflow format model.save("my_model_path.h5") #would detect .h5 extension and save as h5 format # or could do model.save(path, save_format='tf') #or h5
from keras.models import load_model del model model = load_model('my_model_path')
model.add( Conv2D(32, kernel_size = (5, 5), strides = (1, 1), activation ='relu'))
model.add( MaxPooling2D((2, 2)))