tensorflow learning notes + program simple neural network and MNIST data set

Keywords: network Session

Simple neural network for regression problem

import tensorflow as tf 
import numpy as np 
import matplotlib.pyplot as plt 

# Generate random points 
x_data = np.linspace(-0.5,0.5,200)[:,np.newaxis]
# [:, np.newaxis] add a dimension to X ﹣ u data. The original data and the original type remain the same
noise = np.random.normal(0,0.02,x_data.shape)
y_data = np.square(x_data) + noise

# Define two placeholders
x =  tf.placeholder(tf.float32,[None,1])  # [None,1] the behavior of none can be any shape, and the column is 1
y =  tf.placeholder(tf.float32,[None,1])

# Building neural network (middle layer is 10 neurons)
# Define neural network middle layer (initialize network parameters with random values)
Weights_L1 = tf.Variable(tf.random.normal([1,10])) # Corresponding to one input and ten outputs
biases_L1 = tf.Variable(tf.random.normal([1,10]))
Wx_plus_b_L1 = tf.matmul(x,Weights_L1) + biases_L1
# L1 is the output of the middle layer, and the hyperbolic tangent function is used as the interface function
L1 = tf.nn.tanh(Wx_plus_b_L1)

# Define output layer of neural network (initialize network parameters with random values)
Weights_L2 = tf.Variable(tf.random.normal([10,1]))
biases_L2 = tf.Variable(tf.random.normal([1,1]))
Wx_plus_b_L2 = tf.matmul(L1,Weights_L2) + biases_L2
# L2 is the final output, and the hyperbolic tangent function is used as the interface function
prediction = tf.nn.tanh(Wx_plus_b_L2)

# Defining cost function and training method
# Quadratic cost function
loss = tf.reduce_mean(tf.square(y - prediction))
# Gradient descent method and minimum cost function
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)

with tf.Session() as sess: 
    # Variable initialization
    sess.run(tf.global_variables_initializer())
    for _ in range(2000):
        sess.run(train_step,feed_dict = {x:x_data,y:y_data})
    
    # Get forecast
    prediction_value = sess.run(prediction,feed_dict = {x:x_data})

    # Mapping
    plt.figure(figsize=(6,6))
    plt.scatter(x_data,y_data)
    plt.plot(x_data,prediction_value,'r-',lw = 5)
    plt.show() 

Effect:

2 MNIST data set

Some data set links used in the following (Baidu online disk):
Link: https://pan.baidu.com/s/1 y0rwlj9wujefzt9jme5ug
Extraction code: 6fho
Put it in the folder where the program is stored

2.1 procedure

import tensorflow as tf 
from tensorflow.examples.tutorials.mnist import input_data

# Load data set
mnist = input_data.read_data_sets("MNIST_data",one_hot=True)
# One? Hot = true converts its label to only 0 and 1

# Define the variable "size of each batch"
batch_size = 100
# Calculate the total number of batches
n_batch = mnist.train.num_examples // batch_size # To be divisible by

# Define placeholder
x = tf.placeholder(tf.float32,[None,784])
y = tf.placeholder(tf.float32,[None,10])

# Creating a simple neural network
W = tf.Variable(tf.zeros([784,10]))
b = tf.Variable(tf.zeros([10]))
prediction = tf.nn.softmax(tf.matmul(x,W) + b)

# Define cost function (quadratic)
loss = tf.reduce_mean(tf.square(y - prediction))
# Using gradient descent method and minimizing cost function
train_step = tf.train.GradientDescentOptimizer(0.2).minimize(loss)

# initialize variable
init = tf.global_variables_initializer()

# Define the method for accuracy
correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(prediction,1))
''' #Notes# 
    (1).tf.equal()Compare whether two parameters are the same,Same as True,Different for False
    //The return value is stored in a bool type list
    (2).tf.argmax(input,axis) axis Compare each column element when 0,Compare elements in each row when is 1   
    //Finally, the index array of the largest element in each row is returned
'''

# Accuracy rate
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
''' #Notes# 
    (1).tf.cast()Convert boolean type to 32-bit floating point,True For 1.0,False For 0
    (2).tf.reduce_mean()Finding the mean
    //For example, if there are 9 True1 False in the list converted to, then there are 9 1 and 1 0 after tf.cast(), and the accuracy is easily 90%
'''

with tf.Session() as sess:
    sess.run(init)
    for epoch in range(21):
        for batch in range(n_batch):
            batch_xs,batch_ys = mnist.train.next_batch(batch_size)
            ''' #Notes# 
                (1).call mnist.train.next_batch()Get pictures
                (2).Picture data saved in batch_xs in,Label saved in batch_ys in
            '''
            sess.run(train_step,feed_dict = {x:batch_xs,y:batch_ys})
        
        # Accuracy under test
        acc = sess.run(accuracy,feed_dict = {x:mnist.test.images,y:mnist.test.labels})
        print("Iter " + str(epoch) + ".Testing Accuracy " + str(acc))


# Output results: 
'''
Iter 0.Testing Accuracy 0.8316
Iter 1.Testing Accuracy 0.871
Iter 2.Testing Accuracy 0.8819
Iter 3.Testing Accuracy 0.889
Iter 4.Testing Accuracy 0.8943
Iter 5.Testing Accuracy 0.8972
Iter 6.Testing Accuracy 0.9007
Iter 7.Testing Accuracy 0.9024
Iter 8.Testing Accuracy 0.9028
Iter 9.Testing Accuracy 0.9043
Iter 10.Testing Accuracy 0.9064
Iter 11.Testing Accuracy 0.9075
Iter 12.Testing Accuracy 0.9086
Iter 13.Testing Accuracy 0.9091
Iter 14.Testing Accuracy 0.9103
Iter 15.Testing Accuracy 0.911
Iter 16.Testing Accuracy 0.9112
Iter 17.Testing Accuracy 0.9126
Iter 18.Testing Accuracy 0.9128
Iter 19.Testing Accuracy 0.9128
Iter 20.Testing Accuracy 0.9142
'''

2.2 suggestions on model optimization methods

(1) . size of each batch
 (2).l value of initialization weight
 (3) Neural network hidden layer and its topology
 (4) . activation function can use tanh() and other
 (5) . selection of cost function
 (6) . neural network learning rate
 (7) Instead of gradient descent method, other optimization methods are adopted
 (8) . training times
Published 3 original articles, won 0 praise and visited 37
Private letter follow

Posted by Loryman on Sun, 09 Feb 2020 06:40:32 -0800