### Own - Conda venv --- dc_info_venv
### main Source --- https://www.tensorflow.org/guide/
#
import tensorflow as tf
#from tf.keras import layers ### Fails - We have TF version == 1.5.0
import math
import numpy as np
import h5py
import matplotlib.pyplot as plt
from tensorflow.python.framework import ops
#from tf_utils import load_dataset, random_mini_batches, convert_to_one_hot, predict
%matplotlib inline
np.random.seed(1)
#
print(tf.VERSION)
print(tf.keras.__version__)
import keras
print('Keras: {}'.format(keras.__version__))
In [2]:
# earlier created Constants
# Now creating placeHolders
a = tf.placeholder(tf.float32)
b = tf.placeholder(tf.float32)
c = tf.sqrt(tf.add(tf.square(a), tf.square(b)))
print(a, b, c)
sess = tf.Session()
print(*sess.run([a, b, c], feed_dict={a: 4., b: 3.}))
In [4]:
print(*sess.run([a, b, c], feed_dict={a: 18., b: 4.}))
In [3]:
## Create a VARIABLE == count_variable
count_variable = tf.get_variable("count", [])
zero_node = tf.constant(0.)
assign_node = tf.assign(count_variable, zero_node)
sess = tf.Session()
sess.run(assign_node)
print(sess.run(count_variable))
#
In [ ]:
"""
When a variable node is first created, it basically stores “null”, and any attempts to evaluate it will
result in this exception.
We can only evaluate a variable after putting a value into it first.
There are two main ways to put a value into a variable: initializers and tf.assign().
"""
In [ ]:
In [ ]:
"""
tf.assign(target, value) is a node that has some unique properties compared to nodes we’ve seen so far:
Identity operation. tf.assign(target, value) does not do any interesting computations,
it is always just equal to value.
Side effects. When computation “flows” through assign_node, side effects happen to other
things in the graph.
In this case, the side effect is to replace the value of count_variable with the value stored in zero_node.
"""
"""
Non-dependent edges. Even though the count_variable node and the assign_node are connected in the graph,
neither is dependent on the other. This means computation will not flow back through that edge when
evaluating either node.
However, assign_node is dependent on zero_node; it needs to know what to assign.
When we call sess.run(assign_node), the computation path goes through assign_node and zero_node.
"""
"""
As computation flows through any node in the graph, it also enacts any side effects controlled by
that node, shown in green.
Due to the particular side effects of tf.assign, the memory associated with count_variable
(which was previously “null”) is now permanently set to equal 0.
This means that when we next call sess.run(count_variable),
we don’t throw any exceptions. Instead, we get a value of 0. Success!
"""
In [8]:
### Initializers ---
const_init_node = tf.constant_initializer(0.)
count_variable = tf.get_variable("count", [], initializer=const_init_node) #
## above -- initializer , is a PROPERTY of tf.get_variable.
## its been set to --- const_init_node
## We have created a CONNECTION in the GRAPH between Two Nodes
## We are yet --- to tell the SESSION - which is not communicating with the GRAPH ..
## We tell SESSION whats to be done - by CODE LINE == init = tf.global_variables_initializer()
sess = tf.Session()
print(sess.run([count_variable]))
In [3]:
const_init_node = tf.constant_initializer(0.)
count_variable = tf.get_variable("count", [], initializer=const_init_node)
#count_variable = tf.get_variable("count", [], initializer=const_init_node ,reuse=True)
## reuse=True -- wont work here
## restarting Notebook ..then below works...as count_variable - which was INIT in cell above, can be INIT again.
init = tf.global_variables_initializer() # Another node with side-effects...
## https://www.tensorflow.org/api_docs/python/tf/initializers/global_variables
## RETURNS -- An Op that initializes global variables in the graph.
## this - global_var_INIT - will look at the Global Graph and Add dependencies to each -- tf.initializer - that it finds.
## by - dependencies - here we mean it will make them -- RE-INIT all the Variables.
sess = tf.Session()
sess.run(init)
print(sess.run([count_variable]))
In [ ]:
## Variable Sharing --- Source -- https://jacobbuckman.com/post/tensorflow-the-confusing-parts-1/#fnref:1
#
"""
You may encounter Tensorflow code with variable sharing, which involves creating a scope
and setting “reuse=True”.
I strongly recommend that you don’t use this in your own code.
If you want to use a single variable in multiple places, simply keep track of your pointer to that
variable’s node programmatically, and re-use it when you need to.
In other words, you should have only a single call of tf.get_variable() for each parameter you
intend to store in memory.
"""
In [ ]:
## Optimizers -- Source -- https://jacobbuckman.com/post/tensorflow-the-confusing-parts-1/#fnref:1
"""
At last: on to the actual deep learning! If you’re still with me, the remaining concepts should be extremely straightforward.
In deep learning, the typical “inner loop” of training is as follows:
Get an input and true_output
Compute a “guess” based on the input and your parameters
Compute a “loss” based on the difference between your guess and the true_output
Update the parameters according to the gradient of the loss
"""
In [4]:
### build the graph
## first set up the parameters
m = tf.get_variable("m", [], initializer=tf.constant_initializer(0.))
b = tf.get_variable("b", [], initializer=tf.constant_initializer(0.))
init = tf.global_variables_initializer()
## then set up the computations
input_placeholder = tf.placeholder(tf.float32)
output_placeholder = tf.placeholder(tf.float32)
x = input_placeholder
y = output_placeholder
y_guess = m * x + b
loss = tf.square(y - y_guess)
### FATT --- Dont RE-RUN this cell again --- Re-start Notebook
"""
ValueError: Variable m already exists, disallowed.
Did you mean to set reuse=True or reuse=tf.AUTO_REUSE in VarScope? Originally defined at:
"""
Out[4]:
In [5]:
## finally, set up the optimizer and minimization node
optimizer = tf.train.GradientDescentOptimizer(1e-3)
train_op = optimizer.minimize(loss)
### start the session
sess = tf.Session()
sess.run(init)
### perform the training loop
import random
## set up problem
true_m = random.random()
true_b = random.random()
In [ ]:
for update_i in range(100000):
#
## (1) get the input and output
input_data = random.random()
output_data = true_m * input_data + true_b
## (2), (3), and (4) all take place within a single call to sess.run()!
_loss, _ = sess.run([loss, train_op], feed_dict={input_placeholder: input_data, output_placeholder: output_data})
#print(update_i, _loss) ## Dont Print ...
### finally, print out the values we learned for our two variables
#print("True parameters: m=%.4f, b=%.4f", % (true_m, true_b))
print("True parameters: m=, b=",true_m, true_b)
#print("Learned parameters: m=%.4f, b=%.4f", % tuple(sess.run([m, b])))
print("Learned parameters: m=, b=",tuple(sess.run([m, b])))
#
In [ ]:
"""
0 0.8164941
1 1.1643778
2 0.8676618
3 1.1011628
4 1.3437326
5 0.79393685
6 0.7909982
7 0.7743486
8 0.9102865
9 0.9251575
10 1.3213344
"""
In [ ]:
In [ ]:
In [ ]:
In [ ]:
In [ ]:
In [ ]:
In [ ]:
In [ ]:
No comments:
Post a Comment