tf.constant
This function is for constant tensor, If you want to make constant tensor which is not updated by updating algorithm like backpropagation.
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
"""Example code for tf.constant(https://www.tensorflow.org/versions/r1.8/api_docs/python/tf/constant)
tf.constant(
value,
dtype=None,
shape=None,
name='Const',
verify_shape=False
)
"""
import sys
import tensorflow as tf
print("=== Version checking ===")
print("The version of sys: \n{}".format(sys.version))
print("Tensorflow version: {}".format(tf.__version__))
print("========================")
1
2
3
4
5
6
7
8
9
# Constant 1-D Tensor populated with value list.
tensor1 = tf.constant([1, 2, 3, 4, 5, 6, 7])
# Constant 2-D tensor populated with scalar value -1.
tensor2 = tf.constant(-1.0, shape=[2, 3])
print("===== Tensor Shape ======")
print("tensor1: {}".format(tensor1))
print("tensor2: {}".format(tensor2))
1
2
3
with tf.Session() as sess:
print("tensor1: \n{}, The shape: {}".format(sess.run(tensor1), tensor1.shape))
print("tensor2: \n{}, The shape: {}".format(sess.run(tensor2), tensor2.shape))
Let’s check it out whether the constant is updated by backpropagation.
But you would get error message like
ValueError: No variables to optimize.
That is because on the your defuat graph, you doesn’t have variable trainable.
Let’s check below
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
x = [[1., 2., 3.],
[4., 5., 6.]]
w = [[1., 2.],
[3., 4.],
[5., 6.]]
b = [1., 1.]
label = [[1., 0.], [0., 1.]]
with tf.name_scope("Constant_variables") as scope:
x_var = tf.constant(x, dtype=tf.float32)
# if you change the variable of weight and bias,
# Then you can run this code
weight = tf.constant(w, dtype=tf.float32)
bias = tf.constant(b, dtype=tf.float32)
ground_truths = tf.constant(label, dtype=tf.float32)
with tf.name_scope("Name_scope") as scope:
output = tf.add(tf.matmul(x, weight), bias)
with tf.name_scope("Loss") as scope:
sub_for_loss = tf.subtract(ground_truths, output)
losses = tf.reduce_mean(tf.square(sub_for_loss))
tf.summary.scalar("Loss", losses)
with tf.name_scope("Training") as scope:
global_step_var = tf.Variable(0, name="Global_setp1", trainable=False)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(losses, global_step=global_step_var)
init_op = tf.global_variables_initializer()
merged_op = tf.summary.merge_all()
print("===== Tensor Shape ======")
print("x_var: {}".format(x_var))
print("weight: {}".format(weight))
print("bias: {}".format(bias))
print("ground_truths: {}".format(ground_truths))
print("output: {}".format(output))
print("losses: {}".format(losses))
print("global_step_var: {}".format(global_step_var))
print("optimizer: {}".format(optimizer))
print("train_op: {}".format(train_op))
print("init_op: {}".format(init_op))
print("merged_op: {}".format(merged_op))
If you check with tf.get_collections function in detail about what kind of variables is on your graph.
the code above only has variable named “Training/Global_step1”
1
2
3
4
5
6
7
8
9
10
11
12
13
###################################
# Graph's Checking variable #
###################################
trainable_variable1 = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
trainable_variable2 = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
trainable_variable3 = tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES)
print("===== variable type =====")
print("\ntf.GraphKeys.GLOBAL_VARIABLES: {}".format(trainable_variable1))
print("\ntf.GraphKeys.TRAINABLE_VARIABLES: {}".format(trainable_variable2))
print("\ntf.GraphKeys.LOCAL_VARIABLES: {}".format(trainable_variable3))
print("\n===== all variables =====")
for v in tf.global_variables():
print(v.name)
From now on, Let’s change your graph to be trainable.
First of all, change the weight and bias with tf.Variable like this :
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
x = [[1., 2., 3.],
[4., 5., 6.]]
w = [[1., 2.],
[3., 4.],
[5., 6.]]
b = [1., 1.]
label = [[1., 0.], [0., 1.]]
with tf.name_scope("Constant_variables") as scope:
x_var = tf.constant(x, dtype=tf.float32)
# if you change the variable of weight and bias,
# Then you can run this code
weight = tf.Variable(w, dtype=tf.float32)
bias = tf.Variable(b, dtype=tf.float32)
ground_truths = tf.constant(label, dtype=tf.float32)
with tf.name_scope("Name_scope") as scope:
output = tf.add(tf.matmul(x, weight), bias)
with tf.name_scope("Loss") as scope:
sub_for_loss = tf.subtract(ground_truths, output)
losses = tf.reduce_mean(tf.square(sub_for_loss))
tf.summary.scalar("Loss", losses)
with tf.name_scope("Training") as scope:
global_step_var = tf.Variable(0, name="Global_setp", trainable=False)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
train_op = optimizer.minimize(losses, global_step=global_step_var)
init_op = tf.global_variables_initializer()
merged_op = tf.summary.merge_all()
print("===== Tensor Shape ======")
print("x_var: {}".format(x_var))
print("weight: {}".format(weight))
print("bias: {}".format(bias))
print("ground_truths: {}".format(ground_truths))
print("output: {}".format(output))
print("losses: {}".format(losses))
print("global_step_var: {}".format(global_step_var))
print("optimizer: {}".format(optimizer))
print("train_op: {}".format(train_op))
print("init_op: {}".format(init_op))
print("merged_op: {}".format(merged_op))
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
===== Tensor Shape ======
x_var: Tensor("Constant_variables/Const:0", shape=(2, 3), dtype=float32)
weight: <tf.Variable 'Constant_variables/Variable:0' shape=(3, 2) dtype=float32_ref>
bias: <tf.Variable 'Constant_variables/Variable_1:0' shape=(2,) dtype=float32_ref>
ground_truths: Tensor("Constant_variables/Const_1:0", shape=(2, 2), dtype=float32)
output: Tensor("Name_scope/Add:0", shape=(2, 2), dtype=float32)
losses: Tensor("Loss/Mean:0", shape=(), dtype=float32)
global_step_var: <tf.Variable 'Training/Global_setp:0' shape=() dtype=int32_ref>
optimizer: <tensorflow.python.training.gradient_descent.GradientDescentOptimizer object at 0x7fd90bbb0b00>
train_op: name: "Training/GradientDescent"
op: "AssignAdd"
input: "Training/Global_setp"
input: "Training/GradientDescent/value"
attr {
key: "T"
value {
type: DT_INT32
}
}
attr {
key: "_class"
value {
list {
s: "loc:@Training/Global_setp"
}
}
}
attr {
key: "use_locking"
value {
b: false
}
}
init_op: name: "init"
op: "NoOp"
input: "^Constant_variables/Variable/Assign"
input: "^Constant_variables/Variable_1/Assign"
input: "^Training/Global_setp/Assign"
with tf.get_collection function, you could see what kind of variables you have to update.
1
2
3
4
5
6
7
8
9
10
11
12
13
###################################
# Graph's Checking variable #
###################################
trainable_variable1 = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
trainable_variable2 = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
trainable_variable3 = tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES)
print("===== variable type =====")
print("\ntf.GraphKeys.GLOBAL_VARIABLES: {}".format(trainable_variable1))
print("\ntf.GraphKeys.TRAINABLE_VARIABLES: {}".format(trainable_variable2))
print("\ntf.GraphKeys.LOCAL_VARIABLES: {}".format(trainable_variable3))
print("\n===== all variables =====")
for v in tf.global_variables():
print(v.name)
If you run you graph, the varialbe which is be trainable on your default graph is traned after run train_op variable.
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
with tf.Session() as sess:
sess.run(init_op)
train_writer = tf.summary.FileWriter("./01-tf.constant", sess.graph)
for _ in range(1):
print("x_var, constant: \n{}, The shape: {}".format(sess.run(x_var), x_var.shape))
print("weight: \n{}, The shape: {}".format(sess.run(weight), weight.shape))
print("bias: \n{}, The shape: {}".format(sess.run(bias), bias.shape))
print("ground_truths, constant: \n{}, The shape: {}".format(sess.run(ground_truths), ground_truths.shape))
print("output: \n{}, The shape: {}".format(sess.run(output), output.shape))
print("sub_for_loss: \n{}, The shape: {}".format(sess.run(sub_for_loss), sub_for_loss.shape))
print("losses: \n{}, The shape: {}".format(sess.run(losses), losses.shape))
print("global_step_var: \n{}, The shape: {}".format(sess.run(global_step_var), global_step_var.shape))
print("================== from now on, print one more time =========================")
x_var_, weight_, bias_, ground_truths_, output_, sub_for_loss_, losses_, global_step_var_, _ = sess.run([x_var, weight,
bias, ground_truths,
output, sub_for_loss,
losses, global_step_var,
train_op])
print("""x_var: \n{}\n, weight: \n{}\n, bias: \n{}\n,
ground_truths: \n{}\n, output: \n{}\n, sub_for_loss: \n{}\n,
global_step_var: \n{}\n""".format(x_var_, weight_, bias_, ground_truths_, output_, sub_for_loss_, losses_, global_step_var_))
print("====================================================")
x_var_, weight_, bias_, ground_truths_, output_, sub_for_loss_, losses_, global_step_var_ = sess.run([x_var, weight,
bias, ground_truths,
output, sub_for_loss,
losses, global_step_var])
print("====================== after training ==============================")
print("""x_var: \n{}\n, weight: \n{}\n, bias: \n{}\n,
ground_truths: \n{}\n, output: \n{}\n, sub_for_loss: \n{}\n,
global_step_var: \n{}\n""".format(x_var_, weight_, bias_, ground_truths_, output_, sub_for_loss_, losses_, global_step_var_))
train_writer.close()
Reference
- tf. constant in Tensorflow apidoc from version r1.8