import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
url="https://raw.githubusercontent.com/ChiHua0826/AI_course_2018/master/Regression/datasets/%E7%95%B6%E4%B8%8B%E8%BB%8A%E9%80%9F%E8%88%87%E6%9C%AA%E4%BE%86%E8%BB%8A%E9%80%9F%E7%B7%9A%E6%80%A7%E8%BF%B4%E6%AD%B8%E6%A8%A1%E5%9E%8B%E8%B3%87%E6%96%99.csv"
c=pd.read_csv(url,header=None)
d=c.loc[:][2]
d=d.drop(0)
e=c.loc[:][1]
e=e.drop(0)
rng = np.random
train_X = np.asarray(d)
train_Y = np.asarray(e)
X=tf.placeholder(tf.float32)
Y=tf.placeholder(tf.float32)
wei1=tf.Variable(rng.randn())
bias1=tf.Variable(rng.randn())
pred = tf.add(tf.multiply(X, wei1), bias1)
wei2=tf.Variable(tf.random_normal([10,1]))#入10出1
bias2=tf.Variable(tf.zeros([1,1]))
wx2=tf.matmul(L1,wei2)+bias2
prediction=tf.nn.tanh(wx2)
wei2=tf.Variable(tf.random_normal([10,1]))#入10出1
bias2=tf.Variable(tf.zeros([1,1]))
wx2=tf.matmul(L1,wei2)+bias2
prediction=tf.nn.tanh(wx2)
loss=tf.reduce_mean(tf.square(y-prediction))
train_step=tf.train.GradientDescentOptimizer(0.1).minimize(loss)
ini=tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(ini)
for _ in range(2000):
for (x, y) in zip(train_X, train_Y):
sess.run(train_step, feed_dict={X: x, Y: y})
pre_value=sess.run(pred,feed_dict={x:d})
plt.figure()
plt.scatter(d,e)
plt.plot(d,pre_value,'g-',lw=4)
plt.show()
learning_rate = 0.01
training_epochs = 5000
display_step = 50
train_X = np.asarray(d)
train_Y = np.asarray([1.7, 2.76, 2.09, 3.19, 1.694, 1.573, 3.366, 2.596,
2.53, 1.221,
2.827, 3.465, 1.65, 2.904, 2.42, 2.94, 1.3])
n_samples = train_X.shape[0]
X = tf.placeholder(tf.float32)
Y = tf.placeholder(tf.float32)
# Set model weights
W = tf.Variable(rng.randn(), name="weight")
b = tf.Variable(rng.randn(), name="bias")
pred = tf.add(tf.multiply(X, W), b)
# Mean squared error
cost = tf.reduce_sum(tf.pow(pred-Y, 2))/(2*n_samples)
# Gradient descent
# Note, minimize() knows to modify W and b because Variable objects are
trainable=True by default
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()
with tf.Session() as sess:
# 执行初始化操作
sess.run(init)
# 拟合模型数据
for epoch in range(training_epochs):
for (x, y) in zip(train_X, train_Y):
sess.run(optimizer, feed_dict={X: x, Y: y})
print("Optimization Finished!")
training_cost = sess.run(cost, feed_dict={X: train_X, Y: train_Y})
print("Training cost=", training_cost, "W=", sess.run(W), "b=",
sess.run(b), '\n')