2,415 view times

# Introduction to TensorFlow

deepleaning.ai course note, no a tutorial about tensorflow.

I do not think a distinctive difference here..

eg,y=2x-1 and then use a neuron to predict the result. 18.9 is closed to19 but not equal to 19.

import keras
import numpy as np

model=keras.Sequential([keras.layers.Dense(units=1,input_shape=[1])])
model.compile(optimizer='sgd',loss='mean_squared_error')

x=np.array([-1.0,0.0,1.0,2.0,3.0,4.0],dtype=float)
y=np.array([-3.0,-1.0,1.0,3.0,5.0,7.0],dtype=float)

model.fit(x,y,epochs=500)
print(model.predict([10.0]))


result: 18.9

Epoch 500/500
6/6 [==============================] - 0s 166us/step - loss: 2.0329e-05
[[18.986845]]


excecise:

# GRADED FUNCTION: house_model
def house_model(y_new):
xs = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], dtype=float)# Your Code Here#
ys = np.array([1,1.5, 2.0, 2.5, 3.0, 3.5], dtype=float)# Your Code Here#
model = tf.keras.Sequential([keras.layers.Dense(units=1, input_shape=[1])])# Your Code Here#
model.compile(optimizer='sgd', loss='mse')
model.fit(xs, ys, epochs=2000)
return model.predict(y_new)[0]

prediction = house_model([7.0])
print(prediction)


## Introduction to CV

In the previous exercise you saw how to create a neural network that figured out the problem you were trying to solve. This gave an explicit example of learned behavior. Of course, in that instance, it was a bit of overkill because it would have been easier to write the function Y=2x-1 directly, instead of bothering with using Machine Learning to learn the relationship between X and Y for a fixed set of values, and extending that for all values.

But what about a scenario where writing rules like that is much more difficult — for example a computer vision problem? Let’s take a look at a scenario where we can recognize different items of clothing, trained from a dataset containing 10 different types.

import keras
import numpy as np
import os
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
from keras import Sequential
from keras.layers import Flatten,Dense
import matplotlib.pyplot as plt

#混合精度
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.8#显存利用
set_session(tf.Session(config=config))
os.environ['TF_ENABLE_AUTO_MIXED_PRECISION'] = '1'
os.environ["CUDA_VISIBLE_DEVICES"]="0,1" # use gpu 0

fashion_mnist=keras.datasets.fashion_mnist

np.set_printoptions(linewidth=200)
plt.imshow(train_images[0])
print(train_label[0])
print(train_images[0])

train_images=train_images/255.0
test_images=test_images/255.0

model=Sequential([
Flatten(input_shape=(28,28)),
Dense(512,activation=tf.nn.relu),
Dense(10,activation=tf.nn.softmax)])
loss = 'sparse_categorical_crossentropy',
metrics=['accuracy'])

model.fit(train_images, train_label, epochs=200,validation_data=(test_images, test_label))


early stop:

import tensorflow as tf
print(tf.__version__)

class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if(logs.get('loss')<0.4):
print("\nReached 60% accuracy so cancelling training!")
self.model.stop_training = True

callbacks = myCallback()
mnist = tf.keras.datasets.fashion_mnist
(training_images, training_labels), (test_images, test_labels) = mnist.load_data()
training_images=training_images/255.0
test_images=test_images/255.0
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation=tf.nn.relu),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)
])
model.fit(training_images, training_labels, epochs=5, callbacks=[callbacks])


2.2.0
Epoch 1/5
1875/1875 [==============================] - 7s 4ms/step - loss: 0.4728
Epoch 2/5
1865/1875 [============================>.] - ETA: 0s - loss: 0.3610
Reached 60% accuracy so cancelling training!
1875/1875 [==============================] - 7s 4ms/step - loss: 0.3608
<tensorflow.python.keras.callbacks.History at 0x7fb05da98a90>


## Exercise 2

In the course you learned how to do classificaiton using Fashion MNIST, a data set containing items of clothing. There’s another, similar dataset called MNIST which has items of handwriting — the digits 0 through 9.

Write an MNIST classifier that trains to 99% accuracy or above, and does it without a fixed number of epochs — i.e. you should stop training once you reach that level of accuracy.

Some notes:

1. It should succeed in less than 10 epochs, so it is okay to change epochs= to 10, but nothing larger
2. When it reaches 99% or greater it should print out the string “Reached 99% accuracy so cancelling training!”
3. If you add any additional variables, make sure you use the same names as the ones used in the class

I’ve started the code for you below — how would you finish it?

# GRADED FUNCTION: train_mnist
def train_mnist():

# YOUR CODE SHOULD START HERE
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self,epoch,logs={}):
if(logs.get("acc")>0.99):
print("\nReached 99% accuracy so cancelling training!")
self.model.stop_training=True

# YOUR CODE SHOULD END HERE

mnist = tf.keras.datasets.mnist

x_train=x_train/255.0
x_test=x_test/255.0
# YOUR CODE SHOULD START HERE
callbacks=myCallback()
# YOUR CODE SHOULD END HERE
model = tf.keras.models.Sequential([
# YOUR CODE SHOULD START HERE
tf.keras.layers.Flatten(input_shape=(28,28)),
tf.keras.layers.Dense(512,activation='relu'),
tf.keras.layers.Dense(10,activation='softmax')
# YOUR CODE SHOULD END HERE
])

loss='sparse_categorical_crossentropy',
metrics=['accuracy'])

# model fitting
history = model.fit(x_train,y_train,epochs=10,callbacks=[callbacks])
# model fitting
return history.epoch, history.history['acc'][-1]


## CNN

import os
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
import matplotlib.pyplot as plt
from tensorflow.keras import models

#混合精度
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.8#显存利用
set_session(tf.Session(config=config))
os.environ['TF_ENABLE_AUTO_MIXED_PRECISION'] = '1'
os.environ["CUDA_VISIBLE_DEVICES"]="0,1" # use gpu 0

mnist = tf.keras.datasets.fashion_mnist
(training_images, training_labels), (test_images, test_labels) = mnist.load_data()
training_images=training_images.reshape(60000, 28, 28, 1)
training_images=training_images / 255.0
test_images = test_images.reshape(10000, 28, 28, 1)
test_images=test_images/255.0

model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(64, (3,3), activation='relu', input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
])
model.summary()
model.fit(training_images, training_labels, epochs=5)
test_loss = model.evaluate(test_images, test_labels)

# print(test_labels[:100])
# plt.figure(0)
# f, axarr = plt.subplots(3,4)
# FIRST_IMAGE=0
# SECOND_IMAGE=7
# THIRD_IMAGE=26
# CONVOLUTION_NUMBER = 1
# layer_outputs = [layer.output for layer in model.layers]
# activation_model = tf.keras.models.Model(inputs = model.input, outputs = layer_outputs)
# for x in range(0,4):
#   f1 = activation_model.predict(test_images[FIRST_IMAGE].reshape(1, 28, 28, 1))[x]
#   axarr[0,x].imshow(f1[0, : , :, CONVOLUTION_NUMBER], cmap='inferno')
#   axarr[0,x].grid(False)
#   f2 = activation_model.predict(test_images[SECOND_IMAGE].reshape(1, 28, 28, 1))[x]
#   axarr[1,x].imshow(f2[0, : , :, CONVOLUTION_NUMBER], cmap='inferno')
#   axarr[1,x].grid(False)
#   f3 = activation_model.predict(test_images[THIRD_IMAGE].reshape(1, 28, 28, 1))[x]
#   axarr[2,x].imshow(f3[0, : , :, CONVOLUTION_NUMBER], cmap='inferno')
#   axarr[2,x].grid(False)

# plt.figure(1)
# plt.imshow(test_images[FIRST_IMAGE].reshape( 28, 28))
# plt.figure(2)
# plt.imshow(test_images[SECOND_IMAGE].reshape( 28, 28))
# plt.figure(3)
# plt.imshow(test_images[THIRD_IMAGE].reshape( 28, 28))


## exercise3

# GRADED FUNCTION: train_mnist_conv
def train_mnist_conv():

class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self,epoch,logs={}):
if(logs.get("acc")>0.998):
print("\nReached 99.8% accuracy so cancelling training!")
self.model.stop_training=True

callbacks=myCallback()
mnist = tf.keras.datasets.mnist
(training_images, training_labels), (test_images, test_labels) = mnist.load_data(path=path)
training_images = training_images.reshape(training_images.shape[0], training_images.shape[1], training_images.shape[2], 1)
test_images = test_images.reshape(test_images.shape[0], test_images.shape[1], test_images.shape[2], 1)
training_images=training_images / 255.0
test_images=test_images/255.0

model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(64,(3,3),activation='relu',input_shape=(28,28,1)),
tf.keras.layers.MaxPooling2D(2,2),

tf.keras.layers.Conv2D(128,(3,3),activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),

tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128,activation='relu'),
tf.keras.layers.Dense(10,activation='softmax')
])

# model fitting
history = model.fit(
training_images,training_labels,epochs=20,callbacks=[callbacks],
validation_data=(test_images, test_labels)
)
# model fitting
return history.epoch, history.history['acc'][-1]



## exercise4(handle complex images)

# GRADED FUNCTION: train_happy_sad_model

DESIRED_ACCURACY = 0.999

class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self,epoch,logs={}):
if(logs.get("acc")>DESIRED_ACCURACY):
print("\nReached 99.8% accuracy so cancelling training!")
self.model.stop_training=True

callbacks = myCallback()

# This Code Block should Define and Compile the Model. Please assume the images are 150 X 150 in your implementation.
model = tf.keras.models.Sequential([
# Note the input shape is the desired size of the image 300x300 with 3 bytes color
# This is the first convolution
tf.keras.layers.Conv2D(16, (3,3), activation='relu', input_shape=(300, 300, 3)),
tf.keras.layers.MaxPooling2D(2, 2),
# The second convolution
tf.keras.layers.Conv2D(32, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
# The third convolution
tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
# The fourth convolution
tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
# The fifth convolution
tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
# Flatten the results to feed into a DNN
tf.keras.layers.Flatten(),
# 512 neuron hidden layer
tf.keras.layers.Dense(512, activation='relu'),
# Only 1 output neuron. It will contain a value from 0-1 where 0 for 1 class ('horses') and 1 for the other ('humans')
tf.keras.layers.Dense(1, activation='sigmoid')
])

from tensorflow.keras.optimizers import RMSprop

model.compile(loss='binary_crossentropy',
optimizer=RMSprop(lr=0.001),
metrics=['accuracy'])

# This code block should create an instance of an ImageDataGenerator called train_datagen
# And a train_generator by calling train_datagen.flow_from_directory

from tensorflow.keras.preprocessing.image import ImageDataGenerator

train_datagen = ImageDataGenerator(rescale=1/255)

# Please use a target_size of 150 X 150.
train_generator = train_datagen.flow_from_directory(
'/tmp/h-or-s/',  # This is the source directory for training images
target_size=(300, 300),  # All images will be resized to 150x150
batch_size=16,
# Since we use binary_crossentropy loss, we need binary labels
class_mode='binary')
# Expected output: 'Found 80 images belonging to 2 classes'

# This code block should call model.fit_generator and train for
# a number of epochs.
# model fitting
history = model.fit_generator(
train_generator,
steps_per_epoch=1,
epochs=50,
verbose=1,
callbacks=[callbacks])
# model fitting
return history.history['acc'][-1]