Tensorflow - error about tf.WholeFileReader, coordinator, threads, queue










1















I am creating simple code that creates an RGB image in grayscale. Even if this does not work properly, I hope the code will be executed. I have a question about thread usage. Below is the code.



with tf.variable_scope("color"): -> make variable(similar to VGG16)
def conv_layer(x, weights, biases, stride, name="convlayer", padding='SAME'):
return tf.nn.relu(tf.nn.conv2d(x, weights, strides=stride, padding=padding) + biases, name=name)


def read_my_file_format(filename_queue, randomize=False):
reader = tf.WholeFileReader()
key, file = reader.read(filename_queue)
uint8image = tf.image.decode_jpeg(file, channels=3)
uint8image = tf.random_crop(uint8image, (224, 224, 3))
if randomize:
uint8image = tf.image.random_flip_left_right(uint8image)
uint8image = tf.image.random_flip_up_down(uint8image, seed=None)
float_image = tf.div(tf.cast(uint8image, tf.float32), 255)
return float_image

def input_pipeline(filenames, batch_size, num_epochs=None):
filename_queue = tf.train.string_input_producer(
filenames, num_epochs=num_epochs, shuffle=False)
example = read_my_file_format(filename_queue, randomize=False)
min_after_dequeue = 5
capacity = min_after_dequeue + 3 * batch_size
example_batch = tf.train.shuffle_batch(
[example], batch_size=batch_size, capacity=capacity,
min_after_dequeue=min_after_dequeue)
return example_batch

with tf.name_scope("images_setting"):
filenames = sorted(glob.glob("C:/example/*.jpg"))
# filenames = ['C:/example/000005.jpg', 'C:/example/000007.jpg ~~~~']
batch_size = 2
num_epochs = 100

colorimage = input_pipeline(filenames, batch_size, num_epochs=num_epochs)
grayscale = tf.image.rgb_to_grayscale(colorimage)

with tf.name_scope("layer_explain"):
expand = tf.image.grayscale_to_rgb(grayscale)
conv1_1 = conv_layer(expand, conv1_1_weights, conv1_1_biases, stride1, 'conv1_1')
conv1_2 = conv_layer(conv1_1, conv1_2_weights, conv1_2_biases, stride1, 'conv1_2')

conv2_1 = conv_layer(conv1_2, conv2_1_weights, conv2_1_biases, stride1, 'conv2_1')
conv2_2 = conv_layer(conv2_1, conv2_2_weights, conv2_2_biases, stride1, 'conv2_2')

conv3_1 = conv_layer(conv2_2, conv3_1_weights, conv3_1_biases, stride1, 'conv3_1')
conv3_2 = conv_layer(conv3_1, conv3_2_weights, conv3_2_biases, stride1, 'conv3_2')
conv3_3 = conv_layer(conv3_2, conv3_3_weights, conv3_3_biases, stride1, 'conv3_3')

conv4_1 = conv_layer(conv3_3, conv4_1_weights, conv4_1_biases, stride1, 'conv4_1')
conv4_2 = conv_layer(conv4_1, conv4_2_weights, conv4_2_biases, stride1, 'conv4_2')
conv4_3 = conv_layer(conv4_2, conv4_3_weights, conv4_3_biases, stride1, 'conv4_3')

conv5_1 = conv_layer(conv4_3, conv5_1_weights, conv5_1_biases, stride1, 'conv5_1')
conv5_2 = conv_layer(conv5_1, conv5_2_weights, conv5_2_biases, stride1, 'conv5_2')
conv5_3 = conv_layer(conv5_2, conv5_3_weights, conv5_3_biases, stride1, 'conv5_3')


print("conv5_3: ", conv5_3)
print("colorimage: ", colorimage)
loss = tf.reduce_mean(tf.square(conv5_3 - colorimage))
optimizer = tf.train.GradientDescentOptimizer(0.001)
opt = optimizer.minimize(loss)


init_global = tf.global_variables_initializer()
init_local = tf.local_variables_initializer()
sess = tf.Session()
sess.run(init_global)
sess.run(init_local)

# Start input enqueue threads.
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)

print("expand: ", expand)
print("conv1_1: ", conv1_1)

print("grayscale: ", grayscale)
print(filenames, '**********************')
try:
while not coord.should_stop():
training_opt = sess.run(opt)

for i in range(10):
loss = sess.run(loss)
print("cost: ".format(loss))
except Exception as ex:
print(ex)
print("Done training -- epoch limit reached")
finally:
coord.request_stop()
coord.join(threads)
sess.close()


Error message:




(cost: 0.2219611406326294)



Fetch argument 0.22196114 has invalid type
, must be a string or Tensor. (Can not convert
a float32 into a Tensor or Operation.)
--> This is error... I think the message means something wrong at "loss funcion"



(Done training -- epoch limit reached)











share|improve this question




























    1















    I am creating simple code that creates an RGB image in grayscale. Even if this does not work properly, I hope the code will be executed. I have a question about thread usage. Below is the code.



    with tf.variable_scope("color"): -> make variable(similar to VGG16)
    def conv_layer(x, weights, biases, stride, name="convlayer", padding='SAME'):
    return tf.nn.relu(tf.nn.conv2d(x, weights, strides=stride, padding=padding) + biases, name=name)


    def read_my_file_format(filename_queue, randomize=False):
    reader = tf.WholeFileReader()
    key, file = reader.read(filename_queue)
    uint8image = tf.image.decode_jpeg(file, channels=3)
    uint8image = tf.random_crop(uint8image, (224, 224, 3))
    if randomize:
    uint8image = tf.image.random_flip_left_right(uint8image)
    uint8image = tf.image.random_flip_up_down(uint8image, seed=None)
    float_image = tf.div(tf.cast(uint8image, tf.float32), 255)
    return float_image

    def input_pipeline(filenames, batch_size, num_epochs=None):
    filename_queue = tf.train.string_input_producer(
    filenames, num_epochs=num_epochs, shuffle=False)
    example = read_my_file_format(filename_queue, randomize=False)
    min_after_dequeue = 5
    capacity = min_after_dequeue + 3 * batch_size
    example_batch = tf.train.shuffle_batch(
    [example], batch_size=batch_size, capacity=capacity,
    min_after_dequeue=min_after_dequeue)
    return example_batch

    with tf.name_scope("images_setting"):
    filenames = sorted(glob.glob("C:/example/*.jpg"))
    # filenames = ['C:/example/000005.jpg', 'C:/example/000007.jpg ~~~~']
    batch_size = 2
    num_epochs = 100

    colorimage = input_pipeline(filenames, batch_size, num_epochs=num_epochs)
    grayscale = tf.image.rgb_to_grayscale(colorimage)

    with tf.name_scope("layer_explain"):
    expand = tf.image.grayscale_to_rgb(grayscale)
    conv1_1 = conv_layer(expand, conv1_1_weights, conv1_1_biases, stride1, 'conv1_1')
    conv1_2 = conv_layer(conv1_1, conv1_2_weights, conv1_2_biases, stride1, 'conv1_2')

    conv2_1 = conv_layer(conv1_2, conv2_1_weights, conv2_1_biases, stride1, 'conv2_1')
    conv2_2 = conv_layer(conv2_1, conv2_2_weights, conv2_2_biases, stride1, 'conv2_2')

    conv3_1 = conv_layer(conv2_2, conv3_1_weights, conv3_1_biases, stride1, 'conv3_1')
    conv3_2 = conv_layer(conv3_1, conv3_2_weights, conv3_2_biases, stride1, 'conv3_2')
    conv3_3 = conv_layer(conv3_2, conv3_3_weights, conv3_3_biases, stride1, 'conv3_3')

    conv4_1 = conv_layer(conv3_3, conv4_1_weights, conv4_1_biases, stride1, 'conv4_1')
    conv4_2 = conv_layer(conv4_1, conv4_2_weights, conv4_2_biases, stride1, 'conv4_2')
    conv4_3 = conv_layer(conv4_2, conv4_3_weights, conv4_3_biases, stride1, 'conv4_3')

    conv5_1 = conv_layer(conv4_3, conv5_1_weights, conv5_1_biases, stride1, 'conv5_1')
    conv5_2 = conv_layer(conv5_1, conv5_2_weights, conv5_2_biases, stride1, 'conv5_2')
    conv5_3 = conv_layer(conv5_2, conv5_3_weights, conv5_3_biases, stride1, 'conv5_3')


    print("conv5_3: ", conv5_3)
    print("colorimage: ", colorimage)
    loss = tf.reduce_mean(tf.square(conv5_3 - colorimage))
    optimizer = tf.train.GradientDescentOptimizer(0.001)
    opt = optimizer.minimize(loss)


    init_global = tf.global_variables_initializer()
    init_local = tf.local_variables_initializer()
    sess = tf.Session()
    sess.run(init_global)
    sess.run(init_local)

    # Start input enqueue threads.
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    print("expand: ", expand)
    print("conv1_1: ", conv1_1)

    print("grayscale: ", grayscale)
    print(filenames, '**********************')
    try:
    while not coord.should_stop():
    training_opt = sess.run(opt)

    for i in range(10):
    loss = sess.run(loss)
    print("cost: ".format(loss))
    except Exception as ex:
    print(ex)
    print("Done training -- epoch limit reached")
    finally:
    coord.request_stop()
    coord.join(threads)
    sess.close()


    Error message:




    (cost: 0.2219611406326294)



    Fetch argument 0.22196114 has invalid type
    , must be a string or Tensor. (Can not convert
    a float32 into a Tensor or Operation.)
    --> This is error... I think the message means something wrong at "loss funcion"



    (Done training -- epoch limit reached)











    share|improve this question


























      1












      1








      1








      I am creating simple code that creates an RGB image in grayscale. Even if this does not work properly, I hope the code will be executed. I have a question about thread usage. Below is the code.



      with tf.variable_scope("color"): -> make variable(similar to VGG16)
      def conv_layer(x, weights, biases, stride, name="convlayer", padding='SAME'):
      return tf.nn.relu(tf.nn.conv2d(x, weights, strides=stride, padding=padding) + biases, name=name)


      def read_my_file_format(filename_queue, randomize=False):
      reader = tf.WholeFileReader()
      key, file = reader.read(filename_queue)
      uint8image = tf.image.decode_jpeg(file, channels=3)
      uint8image = tf.random_crop(uint8image, (224, 224, 3))
      if randomize:
      uint8image = tf.image.random_flip_left_right(uint8image)
      uint8image = tf.image.random_flip_up_down(uint8image, seed=None)
      float_image = tf.div(tf.cast(uint8image, tf.float32), 255)
      return float_image

      def input_pipeline(filenames, batch_size, num_epochs=None):
      filename_queue = tf.train.string_input_producer(
      filenames, num_epochs=num_epochs, shuffle=False)
      example = read_my_file_format(filename_queue, randomize=False)
      min_after_dequeue = 5
      capacity = min_after_dequeue + 3 * batch_size
      example_batch = tf.train.shuffle_batch(
      [example], batch_size=batch_size, capacity=capacity,
      min_after_dequeue=min_after_dequeue)
      return example_batch

      with tf.name_scope("images_setting"):
      filenames = sorted(glob.glob("C:/example/*.jpg"))
      # filenames = ['C:/example/000005.jpg', 'C:/example/000007.jpg ~~~~']
      batch_size = 2
      num_epochs = 100

      colorimage = input_pipeline(filenames, batch_size, num_epochs=num_epochs)
      grayscale = tf.image.rgb_to_grayscale(colorimage)

      with tf.name_scope("layer_explain"):
      expand = tf.image.grayscale_to_rgb(grayscale)
      conv1_1 = conv_layer(expand, conv1_1_weights, conv1_1_biases, stride1, 'conv1_1')
      conv1_2 = conv_layer(conv1_1, conv1_2_weights, conv1_2_biases, stride1, 'conv1_2')

      conv2_1 = conv_layer(conv1_2, conv2_1_weights, conv2_1_biases, stride1, 'conv2_1')
      conv2_2 = conv_layer(conv2_1, conv2_2_weights, conv2_2_biases, stride1, 'conv2_2')

      conv3_1 = conv_layer(conv2_2, conv3_1_weights, conv3_1_biases, stride1, 'conv3_1')
      conv3_2 = conv_layer(conv3_1, conv3_2_weights, conv3_2_biases, stride1, 'conv3_2')
      conv3_3 = conv_layer(conv3_2, conv3_3_weights, conv3_3_biases, stride1, 'conv3_3')

      conv4_1 = conv_layer(conv3_3, conv4_1_weights, conv4_1_biases, stride1, 'conv4_1')
      conv4_2 = conv_layer(conv4_1, conv4_2_weights, conv4_2_biases, stride1, 'conv4_2')
      conv4_3 = conv_layer(conv4_2, conv4_3_weights, conv4_3_biases, stride1, 'conv4_3')

      conv5_1 = conv_layer(conv4_3, conv5_1_weights, conv5_1_biases, stride1, 'conv5_1')
      conv5_2 = conv_layer(conv5_1, conv5_2_weights, conv5_2_biases, stride1, 'conv5_2')
      conv5_3 = conv_layer(conv5_2, conv5_3_weights, conv5_3_biases, stride1, 'conv5_3')


      print("conv5_3: ", conv5_3)
      print("colorimage: ", colorimage)
      loss = tf.reduce_mean(tf.square(conv5_3 - colorimage))
      optimizer = tf.train.GradientDescentOptimizer(0.001)
      opt = optimizer.minimize(loss)


      init_global = tf.global_variables_initializer()
      init_local = tf.local_variables_initializer()
      sess = tf.Session()
      sess.run(init_global)
      sess.run(init_local)

      # Start input enqueue threads.
      coord = tf.train.Coordinator()
      threads = tf.train.start_queue_runners(sess=sess, coord=coord)

      print("expand: ", expand)
      print("conv1_1: ", conv1_1)

      print("grayscale: ", grayscale)
      print(filenames, '**********************')
      try:
      while not coord.should_stop():
      training_opt = sess.run(opt)

      for i in range(10):
      loss = sess.run(loss)
      print("cost: ".format(loss))
      except Exception as ex:
      print(ex)
      print("Done training -- epoch limit reached")
      finally:
      coord.request_stop()
      coord.join(threads)
      sess.close()


      Error message:




      (cost: 0.2219611406326294)



      Fetch argument 0.22196114 has invalid type
      , must be a string or Tensor. (Can not convert
      a float32 into a Tensor or Operation.)
      --> This is error... I think the message means something wrong at "loss funcion"



      (Done training -- epoch limit reached)











      share|improve this question
















      I am creating simple code that creates an RGB image in grayscale. Even if this does not work properly, I hope the code will be executed. I have a question about thread usage. Below is the code.



      with tf.variable_scope("color"): -> make variable(similar to VGG16)
      def conv_layer(x, weights, biases, stride, name="convlayer", padding='SAME'):
      return tf.nn.relu(tf.nn.conv2d(x, weights, strides=stride, padding=padding) + biases, name=name)


      def read_my_file_format(filename_queue, randomize=False):
      reader = tf.WholeFileReader()
      key, file = reader.read(filename_queue)
      uint8image = tf.image.decode_jpeg(file, channels=3)
      uint8image = tf.random_crop(uint8image, (224, 224, 3))
      if randomize:
      uint8image = tf.image.random_flip_left_right(uint8image)
      uint8image = tf.image.random_flip_up_down(uint8image, seed=None)
      float_image = tf.div(tf.cast(uint8image, tf.float32), 255)
      return float_image

      def input_pipeline(filenames, batch_size, num_epochs=None):
      filename_queue = tf.train.string_input_producer(
      filenames, num_epochs=num_epochs, shuffle=False)
      example = read_my_file_format(filename_queue, randomize=False)
      min_after_dequeue = 5
      capacity = min_after_dequeue + 3 * batch_size
      example_batch = tf.train.shuffle_batch(
      [example], batch_size=batch_size, capacity=capacity,
      min_after_dequeue=min_after_dequeue)
      return example_batch

      with tf.name_scope("images_setting"):
      filenames = sorted(glob.glob("C:/example/*.jpg"))
      # filenames = ['C:/example/000005.jpg', 'C:/example/000007.jpg ~~~~']
      batch_size = 2
      num_epochs = 100

      colorimage = input_pipeline(filenames, batch_size, num_epochs=num_epochs)
      grayscale = tf.image.rgb_to_grayscale(colorimage)

      with tf.name_scope("layer_explain"):
      expand = tf.image.grayscale_to_rgb(grayscale)
      conv1_1 = conv_layer(expand, conv1_1_weights, conv1_1_biases, stride1, 'conv1_1')
      conv1_2 = conv_layer(conv1_1, conv1_2_weights, conv1_2_biases, stride1, 'conv1_2')

      conv2_1 = conv_layer(conv1_2, conv2_1_weights, conv2_1_biases, stride1, 'conv2_1')
      conv2_2 = conv_layer(conv2_1, conv2_2_weights, conv2_2_biases, stride1, 'conv2_2')

      conv3_1 = conv_layer(conv2_2, conv3_1_weights, conv3_1_biases, stride1, 'conv3_1')
      conv3_2 = conv_layer(conv3_1, conv3_2_weights, conv3_2_biases, stride1, 'conv3_2')
      conv3_3 = conv_layer(conv3_2, conv3_3_weights, conv3_3_biases, stride1, 'conv3_3')

      conv4_1 = conv_layer(conv3_3, conv4_1_weights, conv4_1_biases, stride1, 'conv4_1')
      conv4_2 = conv_layer(conv4_1, conv4_2_weights, conv4_2_biases, stride1, 'conv4_2')
      conv4_3 = conv_layer(conv4_2, conv4_3_weights, conv4_3_biases, stride1, 'conv4_3')

      conv5_1 = conv_layer(conv4_3, conv5_1_weights, conv5_1_biases, stride1, 'conv5_1')
      conv5_2 = conv_layer(conv5_1, conv5_2_weights, conv5_2_biases, stride1, 'conv5_2')
      conv5_3 = conv_layer(conv5_2, conv5_3_weights, conv5_3_biases, stride1, 'conv5_3')


      print("conv5_3: ", conv5_3)
      print("colorimage: ", colorimage)
      loss = tf.reduce_mean(tf.square(conv5_3 - colorimage))
      optimizer = tf.train.GradientDescentOptimizer(0.001)
      opt = optimizer.minimize(loss)


      init_global = tf.global_variables_initializer()
      init_local = tf.local_variables_initializer()
      sess = tf.Session()
      sess.run(init_global)
      sess.run(init_local)

      # Start input enqueue threads.
      coord = tf.train.Coordinator()
      threads = tf.train.start_queue_runners(sess=sess, coord=coord)

      print("expand: ", expand)
      print("conv1_1: ", conv1_1)

      print("grayscale: ", grayscale)
      print(filenames, '**********************')
      try:
      while not coord.should_stop():
      training_opt = sess.run(opt)

      for i in range(10):
      loss = sess.run(loss)
      print("cost: ".format(loss))
      except Exception as ex:
      print(ex)
      print("Done training -- epoch limit reached")
      finally:
      coord.request_stop()
      coord.join(threads)
      sess.close()


      Error message:




      (cost: 0.2219611406326294)



      Fetch argument 0.22196114 has invalid type
      , must be a string or Tensor. (Can not convert
      a float32 into a Tensor or Operation.)
      --> This is error... I think the message means something wrong at "loss funcion"



      (Done training -- epoch limit reached)








      multithreading tensorflow queue






      share|improve this question















      share|improve this question













      share|improve this question




      share|improve this question








      edited May 30 '17 at 13:53







      이주성

















      asked May 30 '17 at 7:42









      이주성이주성

      62




      62






















          1 Answer
          1






          active

          oldest

          votes


















          0














          The issue is the following line:



           loss = sess.run(loss)


          The first time it runs, loss is a Tensor, so when session.run returns its value, the python variable loss is not a python float, which you cannot pass to session.run.



          Do instead something like



           loss_value = sess.run(loss)


          and you'll be fine.






          share|improve this answer






















            Your Answer






            StackExchange.ifUsing("editor", function ()
            StackExchange.using("externalEditor", function ()
            StackExchange.using("snippets", function ()
            StackExchange.snippets.init();
            );
            );
            , "code-snippets");

            StackExchange.ready(function()
            var channelOptions =
            tags: "".split(" "),
            id: "1"
            ;
            initTagRenderer("".split(" "), "".split(" "), channelOptions);

            StackExchange.using("externalEditor", function()
            // Have to fire editor after snippets, if snippets enabled
            if (StackExchange.settings.snippets.snippetsEnabled)
            StackExchange.using("snippets", function()
            createEditor();
            );

            else
            createEditor();

            );

            function createEditor()
            StackExchange.prepareEditor(
            heartbeatType: 'answer',
            autoActivateHeartbeat: false,
            convertImagesToLinks: true,
            noModals: true,
            showLowRepImageUploadWarning: true,
            reputationToPostImages: 10,
            bindNavPrevention: true,
            postfix: "",
            imageUploader:
            brandingHtml: "Powered by u003ca class="icon-imgur-white" href="https://imgur.com/"u003eu003c/au003e",
            contentPolicyHtml: "User contributions licensed under u003ca href="https://creativecommons.org/licenses/by-sa/3.0/"u003ecc by-sa 3.0 with attribution requiredu003c/au003e u003ca href="https://stackoverflow.com/legal/content-policy"u003e(content policy)u003c/au003e",
            allowUrls: true
            ,
            onDemand: true,
            discardSelector: ".discard-answer"
            ,immediatelyShowMarkdownHelp:true
            );



            );













            draft saved

            draft discarded


















            StackExchange.ready(
            function ()
            StackExchange.openid.initPostLogin('.new-post-login', 'https%3a%2f%2fstackoverflow.com%2fquestions%2f44256353%2ftensorflow-error-about-tf-wholefilereader-coordinator-threads-queue%23new-answer', 'question_page');

            );

            Post as a guest















            Required, but never shown

























            1 Answer
            1






            active

            oldest

            votes








            1 Answer
            1






            active

            oldest

            votes









            active

            oldest

            votes






            active

            oldest

            votes









            0














            The issue is the following line:



             loss = sess.run(loss)


            The first time it runs, loss is a Tensor, so when session.run returns its value, the python variable loss is not a python float, which you cannot pass to session.run.



            Do instead something like



             loss_value = sess.run(loss)


            and you'll be fine.






            share|improve this answer



























              0














              The issue is the following line:



               loss = sess.run(loss)


              The first time it runs, loss is a Tensor, so when session.run returns its value, the python variable loss is not a python float, which you cannot pass to session.run.



              Do instead something like



               loss_value = sess.run(loss)


              and you'll be fine.






              share|improve this answer

























                0












                0








                0







                The issue is the following line:



                 loss = sess.run(loss)


                The first time it runs, loss is a Tensor, so when session.run returns its value, the python variable loss is not a python float, which you cannot pass to session.run.



                Do instead something like



                 loss_value = sess.run(loss)


                and you'll be fine.






                share|improve this answer













                The issue is the following line:



                 loss = sess.run(loss)


                The first time it runs, loss is a Tensor, so when session.run returns its value, the python variable loss is not a python float, which you cannot pass to session.run.



                Do instead something like



                 loss_value = sess.run(loss)


                and you'll be fine.







                share|improve this answer












                share|improve this answer



                share|improve this answer










                answered Nov 15 '18 at 18:21









                Alexandre PassosAlexandre Passos

                4,2961917




                4,2961917





























                    draft saved

                    draft discarded
















































                    Thanks for contributing an answer to Stack Overflow!


                    • Please be sure to answer the question. Provide details and share your research!

                    But avoid


                    • Asking for help, clarification, or responding to other answers.

                    • Making statements based on opinion; back them up with references or personal experience.

                    To learn more, see our tips on writing great answers.




                    draft saved


                    draft discarded














                    StackExchange.ready(
                    function ()
                    StackExchange.openid.initPostLogin('.new-post-login', 'https%3a%2f%2fstackoverflow.com%2fquestions%2f44256353%2ftensorflow-error-about-tf-wholefilereader-coordinator-threads-queue%23new-answer', 'question_page');

                    );

                    Post as a guest















                    Required, but never shown





















































                    Required, but never shown














                    Required, but never shown












                    Required, but never shown







                    Required, but never shown

































                    Required, but never shown














                    Required, but never shown












                    Required, but never shown







                    Required, but never shown







                    Popular posts from this blog

                    Top Tejano songwriter Luis Silva dead of heart attack at 64

                    政党

                    天津地下鉄3号線