tf.constant

This function is for constant tensor, If you want to make constant tensor which is not updated by updating algorithm like backpropagation.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
"""Example code for tf.constant(https://www.tensorflow.org/versions/r1.8/api_docs/python/tf/constant)

tf.constant(
    value,
    dtype=None,
    shape=None,
    name='Const',
    verify_shape=False
)
"""

import sys
import tensorflow as tf

print("=== Version checking ===")
print("The version of sys: \n{}".format(sys.version))
print("Tensorflow version: {}".format(tf.__version__))
print("========================")
=== Version checking ===
The version of sys: 
3.5.2 (default, Nov 23 2017, 16:37:01) 
[GCC 5.4.0 20160609]
Tensorflow version: 1.8.0
========================
1
2
3
4
5
6
7
8
9
# Constant 1-D Tensor populated with value list.
tensor1 = tf.constant([1, 2, 3, 4, 5, 6, 7])

# Constant 2-D tensor populated with scalar value -1.
tensor2 = tf.constant(-1.0, shape=[2, 3]) 

print("===== Tensor Shape ======")
print("tensor1: {}".format(tensor1))
print("tensor2: {}".format(tensor2))
===== Tensor Shape ======
tensor1: Tensor("Const:0", shape=(7,), dtype=int32)
tensor2: Tensor("Const_1:0", shape=(2, 3), dtype=float32)
1
2
3
with tf.Session() as sess:
    print("tensor1: \n{}, The shape: {}".format(sess.run(tensor1), tensor1.shape))
    print("tensor2: \n{}, The shape: {}".format(sess.run(tensor2), tensor2.shape))
tensor1: 
[1 2 3 4 5 6 7], The shape: (7,)
tensor2: 
[[-1. -1. -1.]
 [-1. -1. -1.]], The shape: (2, 3)

Let’s check it out whether the constant is updated by backpropagation.

But you would get error message like

ValueError: No variables to optimize.

That is because on the your defuat graph, you doesn’t have variable trainable.

Let’s check below

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
x = [[1., 2., 3.],
     [4., 5., 6.]]
w = [[1., 2.],
     [3., 4.],
     [5., 6.]]
b = [1., 1.]


label = [[1., 0.], [0., 1.]]

with tf.name_scope("Constant_variables") as scope:
    x_var = tf.constant(x, dtype=tf.float32)
    # if you change the variable of weight and bias, 
    # Then you can run this code 
    weight = tf.constant(w, dtype=tf.float32)
    bias = tf.constant(b, dtype=tf.float32)
    ground_truths = tf.constant(label, dtype=tf.float32)

with tf.name_scope("Name_scope") as scope:
    output = tf.add(tf.matmul(x, weight), bias)
    
with tf.name_scope("Loss") as scope:
    sub_for_loss = tf.subtract(ground_truths, output)
    losses = tf.reduce_mean(tf.square(sub_for_loss))
    tf.summary.scalar("Loss", losses)
    
with tf.name_scope("Training") as scope:
    global_step_var = tf.Variable(0, name="Global_setp1", trainable=False)
    optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
    train_op = optimizer.minimize(losses, global_step=global_step_var)
    
init_op = tf.global_variables_initializer()

merged_op = tf.summary.merge_all()

print("===== Tensor Shape ======")
print("x_var: {}".format(x_var))
print("weight: {}".format(weight))
print("bias: {}".format(bias))
print("ground_truths: {}".format(ground_truths))
print("output: {}".format(output))
print("losses: {}".format(losses))
print("global_step_var: {}".format(global_step_var))
print("optimizer: {}".format(optimizer))
print("train_op: {}".format(train_op))
print("init_op: {}".format(init_op))
print("merged_op: {}".format(merged_op))
---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
<ipython-input-4-d4a7db2e23d8> in <module>()
     28     global_step_var = tf.Variable(0, name="Global_setp1", trainable=False)
     29     optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
---> 30     train_op = optimizer.minimize(losses, global_step=global_step_var)
     31 
     32 init_op = tf.global_variables_initializer()

~/.local/lib/python3.5/site-packages/tensorflow/python/training/optimizer.py in minimize(self, loss, global_step, var_list, gate_gradients, aggregation_method, colocate_gradients_with_ops, name, grad_loss)
    412         aggregation_method=aggregation_method,
    413         colocate_gradients_with_ops=colocate_gradients_with_ops,
--> 414         grad_loss=grad_loss)
    415 
    416     vars_with_grad = [v for g, v in grads_and_vars if g is not None]

~/.local/lib/python3.5/site-packages/tensorflow/python/training/optimizer.py in compute_gradients(self, loss, var_list, gate_gradients, aggregation_method, colocate_gradients_with_ops, grad_loss)
    518     processors = [_get_processor(v) for v in var_list]
    519     if not var_list:
--> 520       raise ValueError("No variables to optimize.")
    521     var_refs = [p.target() for p in processors]
    522     grads = gradients.gradients(

ValueError: No variables to optimize.

If you check with tf.get_collections function in detail about what kind of variables is on your graph.

the code above only has variable named “Training/Global_step1”

1
2
3
4
5
6
7
8
9
10
11
12
13
###################################
#    Graph's Checking variable    #
###################################
trainable_variable1 = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
trainable_variable2 = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
trainable_variable3 = tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES)
print("===== variable type =====")
print("\ntf.GraphKeys.GLOBAL_VARIABLES: {}".format(trainable_variable1))
print("\ntf.GraphKeys.TRAINABLE_VARIABLES: {}".format(trainable_variable2))
print("\ntf.GraphKeys.LOCAL_VARIABLES: {}".format(trainable_variable3))
print("\n===== all variables =====")
for v in tf.global_variables():
    print(v.name)
===== variable type =====

tf.GraphKeys.GLOBAL_VARIABLES: [<tf.Variable 'Training/Global_setp1:0' shape=() dtype=int32_ref>]

tf.GraphKeys.TRAINABLE_VARIABLES: []

tf.GraphKeys.LOCAL_VARIABLES: []

===== all variables =====
Training/Global_setp1:0

From now on, Let’s change your graph to be trainable.

First of all, change the weight and bias with tf.Variable like this :

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
x = [[1., 2., 3.],
     [4., 5., 6.]]
w = [[1., 2.],
     [3., 4.],
     [5., 6.]]
b = [1., 1.]


label = [[1., 0.], [0., 1.]]

with tf.name_scope("Constant_variables") as scope:
    x_var = tf.constant(x, dtype=tf.float32)
    # if you change the variable of weight and bias, 
    # Then you can run this code 
    weight = tf.Variable(w, dtype=tf.float32)
    bias = tf.Variable(b, dtype=tf.float32)
    ground_truths = tf.constant(label, dtype=tf.float32)

with tf.name_scope("Name_scope") as scope:
    output = tf.add(tf.matmul(x, weight), bias)
    
with tf.name_scope("Loss") as scope:
    sub_for_loss = tf.subtract(ground_truths, output)
    losses = tf.reduce_mean(tf.square(sub_for_loss))
    tf.summary.scalar("Loss", losses)
    
with tf.name_scope("Training") as scope:
    global_step_var = tf.Variable(0, name="Global_setp", trainable=False)
    optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
    train_op = optimizer.minimize(losses, global_step=global_step_var)
    
init_op = tf.global_variables_initializer()

merged_op = tf.summary.merge_all()

print("===== Tensor Shape ======")
print("x_var: {}".format(x_var))
print("weight: {}".format(weight))
print("bias: {}".format(bias))
print("ground_truths: {}".format(ground_truths))
print("output: {}".format(output))
print("losses: {}".format(losses))
print("global_step_var: {}".format(global_step_var))
print("optimizer: {}".format(optimizer))
print("train_op: {}".format(train_op))
print("init_op: {}".format(init_op))
print("merged_op: {}".format(merged_op))
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
===== Tensor Shape ======
x_var: Tensor("Constant_variables/Const:0", shape=(2, 3), dtype=float32)
weight: <tf.Variable 'Constant_variables/Variable:0' shape=(3, 2) dtype=float32_ref>
bias: <tf.Variable 'Constant_variables/Variable_1:0' shape=(2,) dtype=float32_ref>
ground_truths: Tensor("Constant_variables/Const_1:0", shape=(2, 2), dtype=float32)
output: Tensor("Name_scope/Add:0", shape=(2, 2), dtype=float32)
losses: Tensor("Loss/Mean:0", shape=(), dtype=float32)
global_step_var: <tf.Variable 'Training/Global_setp:0' shape=() dtype=int32_ref>
optimizer: <tensorflow.python.training.gradient_descent.GradientDescentOptimizer object at 0x7fd90bbb0b00>
train_op: name: "Training/GradientDescent"
op: "AssignAdd"
input: "Training/Global_setp"
input: "Training/GradientDescent/value"
attr {
  key: "T"
  value {
    type: DT_INT32
  }
}
attr {
  key: "_class"
  value {
    list {
      s: "loc:@Training/Global_setp"
    }
  }
}
attr {
  key: "use_locking"
  value {
    b: false
  }
}

init_op: name: "init"
op: "NoOp"
input: "^Constant_variables/Variable/Assign"
input: "^Constant_variables/Variable_1/Assign"
input: "^Training/Global_setp/Assign"

with tf.get_collection function, you could see what kind of variables you have to update.

1
2
3
4
5
6
7
8
9
10
11
12
13
###################################
#    Graph's Checking variable    #
###################################
trainable_variable1 = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
trainable_variable2 = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
trainable_variable3 = tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES)
print("===== variable type =====")
print("\ntf.GraphKeys.GLOBAL_VARIABLES: {}".format(trainable_variable1))
print("\ntf.GraphKeys.TRAINABLE_VARIABLES: {}".format(trainable_variable2))
print("\ntf.GraphKeys.LOCAL_VARIABLES: {}".format(trainable_variable3))
print("\n===== all variables =====")
for v in tf.global_variables():
    print(v.name)
===== variable type =====

tf.GraphKeys.GLOBAL_VARIABLES: [<tf.Variable 'Constant_variables/Variable:0' shape=(3, 2) dtype=float32_ref>, <tf.Variable 'Constant_variables/Variable_1:0' shape=(2,) dtype=float32_ref>, <tf.Variable 'Training/Global_setp:0' shape=() dtype=int32_ref>]

tf.GraphKeys.TRAINABLE_VARIABLES: [<tf.Variable 'Constant_variables/Variable:0' shape=(3, 2) dtype=float32_ref>, <tf.Variable 'Constant_variables/Variable_1:0' shape=(2,) dtype=float32_ref>]

tf.GraphKeys.LOCAL_VARIABLES: []

===== all variables =====
Constant_variables/Variable:0
Constant_variables/Variable_1:0
Training/Global_setp:0

If you run you graph, the varialbe which is be trainable on your default graph is traned after run train_op variable.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
with tf.Session() as sess:
    sess.run(init_op)
    
    train_writer =  tf.summary.FileWriter("./01-tf.constant", sess.graph)
    for _ in range(1):
        print("x_var, constant: \n{}, The shape: {}".format(sess.run(x_var), x_var.shape))
        print("weight: \n{}, The shape: {}".format(sess.run(weight), weight.shape))
        print("bias: \n{}, The shape: {}".format(sess.run(bias), bias.shape))
        print("ground_truths, constant: \n{}, The shape: {}".format(sess.run(ground_truths), ground_truths.shape))
        print("output: \n{}, The shape: {}".format(sess.run(output), output.shape))
        print("sub_for_loss: \n{}, The shape: {}".format(sess.run(sub_for_loss), sub_for_loss.shape))
        print("losses: \n{}, The shape: {}".format(sess.run(losses), losses.shape))
        print("global_step_var: \n{}, The shape: {}".format(sess.run(global_step_var), global_step_var.shape))
    
        print("================== from now on, print one more time =========================")
        x_var_, weight_, bias_, ground_truths_, output_, sub_for_loss_, losses_, global_step_var_, _ = sess.run([x_var, weight,
                                                                                                         bias, ground_truths,
                                                                                                         output, sub_for_loss,
                                                                                                         losses, global_step_var,
                                                                                                         train_op])
        print("""x_var: \n{}\n, weight: \n{}\n, bias: \n{}\n, 
        ground_truths: \n{}\n, output: \n{}\n, sub_for_loss: \n{}\n, 
        global_step_var: \n{}\n""".format(x_var_, weight_, bias_, ground_truths_, output_, sub_for_loss_, losses_, global_step_var_))
        print("====================================================")
        x_var_, weight_, bias_, ground_truths_, output_, sub_for_loss_, losses_, global_step_var_ = sess.run([x_var, weight,
                                                                                                         bias, ground_truths,
                                                                                                         output, sub_for_loss,
                                                                                                         losses, global_step_var])
        print("====================== after training ==============================")
        print("""x_var: \n{}\n, weight: \n{}\n, bias: \n{}\n, 
        ground_truths: \n{}\n, output: \n{}\n, sub_for_loss: \n{}\n, 
        global_step_var: \n{}\n""".format(x_var_, weight_, bias_, ground_truths_, output_, sub_for_loss_, losses_, global_step_var_))
    
    
    train_writer.close()
x_var, constant: 
[[1. 2. 3.]
 [4. 5. 6.]], The shape: (2, 3)
weight: 
[[1. 2.]
 [3. 4.]
 [5. 6.]], The shape: (3, 2)
bias: 
[1. 1.], The shape: (2,)
ground_truths, constant: 
[[1. 0.]
 [0. 1.]], The shape: (2, 2)
output: 
[[23. 29.]
 [50. 65.]], The shape: (2, 2)
sub_for_loss: 
[[-22. -29.]
 [-50. -64.]], The shape: (2, 2)
losses: 
1980.25, The shape: ()
global_step_var: 
0, The shape: ()
================== from now on, print one more time =========================
x_var: 
[[1. 2. 3.]
 [4. 5. 6.]]
, weight: 
[[1. 2.]
 [3. 4.]
 [5. 6.]]
, bias: 
[1. 1.]
, 
        ground_truths: 
[[1. 0.]
 [0. 1.]]
, output: 
[[23. 29.]
 [50. 65.]]
, sub_for_loss: 
[[-22. -29.]
 [-50. -64.]]
, 
        global_step_var: 
1980.25

====================================================
====================== after training ==============================
x_var: 
[[1. 2. 3.]
 [4. 5. 6.]]
, weight: 
[[-10.1      -12.25    ]
 [-11.7      -14.900001]
 [-13.3      -17.550001]]
, bias: 
[-2.6000001 -3.65     ]
, 
        ground_truths: 
[[1. 0.]
 [0. 1.]]
, output: 
[[ -76.        -98.350006]
 [-181.30002  -232.45    ]]
, sub_for_loss: 
[[ 77.        98.350006]
 [181.30002  233.45    ]]
, 
        global_step_var: 
25742.580078125

Reference