A) import numpy as np def step(x): #print("Start of step function: \n") is_greater = tf.greater(x, 0) #print(sess.run(is_greater, feed_dict={X: Data})) as_float = tf.cast(is_greater, tf.float32) #print(as_float) doubled = tf.multiply(as_float, 2) #print(doubled) #print(tf.subtract(doubled, 1)) return tf.subtract(doubled, 1) Data = np.array([[0.0, 0.0], [1.0,0.0], [0.0,1.0], [1.0,1.0]]) Label = tf.Variable([0.0, 1.0, 1.0, 1.0]) # Logical OR X = tf.placeholder(tf.float32, shape=[4,2]) # placeholder for data W = tf.Variable(tf.truncated_normal([2,1] , mean=0.0, stddev=1.0)) # weight vector B = tf.Variable(0.0, name="bias") Neuron = tf.add(tf.matmul(X, W),B) # the neuron Preds = tf.reshape(step(Neuron), [4]) #Preds = tf.round(tf.sigmoid(Neuron)) # activation loss = tf.reduce_mean(tf.squared_difference(Neuron, Label)) accuracy = tf.reduce_sum(tf.cast(tf.equal(Preds,step(Label)), tf.float32))/4 # O: accuracy = tf.reduce_sum(tf.cast(tf.equal(Preds,Label), tf.float32))/4 #check accuracy #Bool equal check - Preds : Label Bool_equal_counter = tf.equal(Preds,Label) init = tf.global_variables_initializer() sess = tf.Session() sess.run(init) # MSE, ACC = sess.run([loss, accuracy], feed_dict={X: Data}) # preds_res, neuron_print, bool_print = sess.run([Preds, Neuron, Bool_equal_counter], feed_dict={X: Data}) # print(preds_res, "\n", neuron_print.T, "\n", bool_print) # print("\n") # Optimalizer - Gradient Descent - Parameter: Learning Rate optimizer = tf.train.GradientDescentOptimizer(0.1) train = optimizer.minimize(loss) for i in range(5): train_res = sess.run(train, feed_dict={X: Data}) MSE, ACC = sess.run([loss, accuracy], feed_dict={X: Data}) print("MSE, ACC: ", MSE, ACC) #print(Preds, Label) preds_res, neuron_print, bool_print = sess.run([Preds, Neuron, Bool_equal_counter], feed_dict={X: Data}) print(preds_res, "\n", neuron_print.T, "\n", bool_print) print("Preds : Label") print(preds_res[0], "-----", sess.run(Label)) print("\n") # print(preds_res, label_print) B) #%% import tensorflow as tf features = tf.placeholder(tf.float32, [None, 3]) labels = tf.placeholder(tf.float32, [None,1]) #%% #Random weights W = tf.Variable([[10.0], [000.0], [0.200]], tf.float32) init = tf.initialize_all_variables() #%% with tf.Session() as sess: sess.run(init) predict = tf.nn.sigmoid(tf.matmul(features, W)) print(sess.run(predict, feed_dict={features:[[0, 1, 1]]})) lbls= [[0], [1], [1], [0]] print(sess.run(predict, feed_dict={features: [[0, 1, 1], [1, 1, 1], [1, 0, 1], [0, 1, 1]], labels:lbls})) # error = labels - predict error = tf.reduce_mean((labels - predict)**2) # Training optimizer = tf.train.GradientDescentOptimizer(10) train = optimizer.minimize(error) for i in range(100): sess.run(train, feed_dict={features: [[0, 1, 1], [1, 1, 1], [1, 0, 1], [0, 1, 1]], labels: lbls}) training_cost = sess.run(error, feed_dict={features: [[0, 1, 1], [1, 1, 1], [1, 0, 1], [0, 1, 1]], labels: lbls}) classe = sess.run((labels-predict), feed_dict={features: [[0, 1, 1], [1, 1, 1], [1, 0, 1], [0, 1, 1]], labels: lbls}) print('Training cost = ', training_cost, 'W = ', classe) print(sess.run(predict, feed_dict={features: [[0, 1, 1], [1, 1, 1], [1, 0, 1], [0, 1, 1]]}))