Python tensorflow.substr() Examples

The following are 19 code examples of tensorflow.substr(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: monodepth_dataloader.py    From Semantic-Mono-Depth with MIT License 6 votes vote down vote up
def read_image(self, image_path):
        # tf.decode_image does not return the image size, this is an ugly workaround to handle both jpeg and png
        path_length = string_length_tf(image_path)[0]
        file_extension = tf.substr(image_path, path_length - 3, 3)
        file_cond = tf.equal(file_extension, 'jpg')
        
        image  = tf.cond(file_cond, lambda: tf.image.decode_jpeg(tf.read_file(image_path)), lambda: tf.image.decode_png(tf.read_file(image_path)))

        # if the dataset is cityscapes, we crop the last fifth to remove the car hood
        if self.dataset == 'cityscapes':
            o_height    = tf.shape(image)[0]
            crop_height = (o_height * 4) // 5
            image  =  image[:crop_height,:,:]

        image  = tf.image.convert_image_dtype(image,  tf.float32)
        image  = tf.image.resize_images(image,  [self.params.height, self.params.width], tf.image.ResizeMethod.AREA)

        return image 
Example #2
Source File: substr_op_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def _testElementWisePosLen(self, dtype):
    test_string = [[b"ten", b"eleven", b"twelve"],
                   [b"thirteen", b"fourteen", b"fifteen"],
                   [b"sixteen", b"seventeen", b"eighteen"]]
    position = np.array([[1, 2, 3],
                         [1, 2, 3],
                         [1, 2, 3]], dtype)
    length = np.array([[2, 3, 4],
                       [4, 3, 2],
                       [5, 5, 5]], dtype)
    expected_value = [[b"en", b"eve", b"lve"],
                      [b"hirt", b"urt", b"te"],
                      [b"ixtee", b"vente", b"hteen"]]

    substr_op = tf.substr(test_string, position, length)
    with self.test_session():
      substr = substr_op.eval()
      self.assertAllEqual(substr, expected_value) 
Example #3
Source File: monodepth_dataloader.py    From Semantic-Mono-Depth with MIT License 6 votes vote down vote up
def read_semantic_gt(self, image_path):
        # tf.decode_image does not return the image size, this is an ugly workaround to handle both jpeg and png
        path_length = string_length_tf(image_path)[0]
        file_extension = tf.substr(image_path, path_length - 3, 3)
        file_cond = tf.equal(file_extension, 'png')
        
        image  = tf.cond(file_cond, lambda: tf.image.decode_png(tf.read_file(image_path)), lambda: tf.zeros([self.params.height, self.params.width, 1], tf.uint8))

        # if the dataset is cityscapes, we crop the last fifth to remove the car hood
        if self.dataset == 'cityscapes':
            o_height    = tf.shape(image)[0]
            crop_height = (o_height * 4) // 5
            image  =  image[:crop_height,:,:]

        image = tf.to_int32(tf.image.resize_images(image,  [self.params.height, self.params.width], tf.image.ResizeMethod.NEAREST_NEIGHBOR))
        valid = tf.cond(file_cond, lambda: tf.ones([self.params.height, self.params.width, 1], tf.float32), lambda: tf.zeros([self.params.height, self.params.width, 1], tf.float32))

        return image, valid 
Example #4
Source File: input.py    From UnFlow with MIT License 6 votes vote down vote up
def _read_flow(filenames, num_epochs=None):
    """Given a list of filenames, constructs a reader op for ground truth flow files."""
    filename_queue = tf.train.string_input_producer(filenames,
        shuffle=False, capacity=len(filenames), num_epochs=num_epochs)
    reader = tf.WholeFileReader()
    _, value = reader.read(filename_queue)
    value = tf.reshape(value, [1])
    value_width = tf.substr(value, 4, 4)
    value_height = tf.substr(value, 8, 4)
    width = tf.reshape(tf.decode_raw(value_width, out_type=tf.int32), [])
    height = tf.reshape(tf.decode_raw(value_height, out_type=tf.int32), [])

    value_flow = tf.substr(value, 12, 8 * 436 * 1024)
    flow = tf.decode_raw(value_flow, out_type=tf.float32)

    return tf.reshape(flow, [436, 1024, 2]) 
Example #5
Source File: substr_op_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def _testMismatchPosLenShapes(self, dtype):
    test_string = [[b"ten", b"eleven", b"twelve"],
                   [b"thirteen", b"fourteen", b"fifteen"],
                   [b"sixteen", b"seventeen", b"eighteen"]]
    position = np.array([[1, 2, 3]], dtype)
    length = np.array([2, 3, 4], dtype)
    # Should fail: position/length have different rank
    with self.assertRaises(ValueError):
      substr_op = tf.substr(test_string, position, length)

    position = np.array([[1, 2, 3],
                         [1, 2, 3],
                         [1, 2, 3]], dtype)
    length = np.array([[2, 3, 4]], dtype)
    # Should fail: postion/length have different dimensionality
    with self.assertRaises(ValueError):
      substr_op = tf.substr(test_string, position, length) 
Example #6
Source File: input.py    From UnFlow with MIT License 6 votes vote down vote up
def _read_flow(filenames, num_epochs=None):
    """Given a list of filenames, constructs a reader op for ground truth flow files."""
    filename_queue = tf.train.string_input_producer(filenames,
        shuffle=False, capacity=len(filenames), num_epochs=num_epochs)
    reader = tf.WholeFileReader()
    _, value = reader.read(filename_queue)
    value = tf.reshape(value, [1])
    value_width = tf.substr(value, 4, 4)
    value_height = tf.substr(value, 8, 4)
    width = tf.reshape(tf.decode_raw(value_width, out_type=tf.int32), [])
    height = tf.reshape(tf.decode_raw(value_height, out_type=tf.int32), [])

    value_flow = tf.substr(value, 12, 8 * width * height)
    flow = tf.decode_raw(value_flow, out_type=tf.float32)
    flow = tf.reshape(flow, [height, width, 2])
    mask = tf.to_float(tf.logical_and(flow[:, :, 0] < 1e9, flow[:, :, 1] < 1e9))
    mask = tf.reshape(mask, [height, width, 1])

    return flow, mask 
Example #7
Source File: util.py    From mac-graph with The Unlicense 5 votes vote down vote up
def tf_startswith(tensor, prefix, axis=None):
	return tf.reduce_all(tf.equal(tf.substr(tensor, 0, len(prefix)), prefix), axis=axis)



# --------------------------------------------------------------------------
# File readers and writers
# -------------------------------------------------------------------------- 
Example #8
Source File: common.py    From HyperGAN with MIT License 5 votes vote down vote up
def __init__(self, config, batch_size, one_hot=False):
        self.lookup = None
        reader = tf.TextLineReader()
        filename_queue = tf.train.string_input_producer(["chargan.txt"])
        key, x = reader.read(filename_queue)
        vocabulary = self.get_vocabulary()

        table = tf.contrib.lookup.string_to_index_table_from_tensor(
            mapping = vocabulary, default_value = 0)

        x = tf.string_join([x, tf.constant(" " * 64)]) 
        x = tf.substr(x, [0], [64])
        x = tf.string_split(x,delimiter='')
        x = tf.sparse_tensor_to_dense(x, default_value=' ')
        x = tf.reshape(x, [64])
        x = table.lookup(x)
        self.one_hot = one_hot
        if one_hot:
            x = tf.one_hot(x, len(vocabulary))
            x = tf.cast(x, dtype=tf.float32)
            x = tf.reshape(x, [1, int(x.get_shape()[0]), int(x.get_shape()[1]), 1])
        else:
            x = tf.cast(x, dtype=tf.float32)
            x -= len(vocabulary)/2.0
            x /= len(vocabulary)/2.0
            x = tf.reshape(x, [1,1, 64, 1])

        num_preprocess_threads = 8

        x = tf.train.shuffle_batch(
          [x],
          batch_size=batch_size,
          num_threads=num_preprocess_threads,
          capacity= 5000,
          min_after_dequeue=500,
          enqueue_many=True)

        self.x = x
        self.table = table 
Example #9
Source File: data_util.py    From reading_comprehension_tf with Apache License 2.0 5 votes vote down vote up
def generate_subword_feat(sentence,
                          subword_vocab_index,
                          word_max_length,
                          subword_max_length,
                          subword_size,
                          word_sos,
                          word_eos,
                          word_placeholder_enable,
                          subword_pad):
    def word_to_subword(word):
        """generate subwords for word"""
        word_len = tf.size(tf.string_split([word], delimiter=''))
        subwords = tf.substr([word], 0, subword_size)
        for i in range(1, subword_max_length):
            subwords = tf.cond(i+subword_size-1 < word_len,
                lambda: tf.concat([subwords, tf.substr([word], i, subword_size)], 0),
                lambda: subwords)
        
        subwords = tf.concat([subwords[:subword_max_length],
            tf.constant(subword_pad, shape=[subword_max_length])], axis=0)
        subwords = tf.reshape(subwords[:subword_max_length], shape=[subword_max_length])
        
        return subwords
    
    """generate subword feature for sentence"""
    words = tf.string_split([sentence], delimiter=' ').values
    if word_placeholder_enable == True:
        words = tf.concat([[word_sos], words[:word_max_length], [word_eos],
            tf.constant(subword_pad, shape=[word_max_length])], axis=0)
        word_max_length = word_max_length + 2
    else:
        words = tf.concat([words[:word_max_length],
            tf.constant(subword_pad, shape=[word_max_length])], axis=0)
    
    words = tf.reshape(words[:word_max_length], shape=[word_max_length])
    word_subwords = tf.map_fn(word_to_subword, words)
    word_subwords = tf.cast(subword_vocab_index.lookup(word_subwords), dtype=tf.int32)
    
    return word_subwords 
Example #10
Source File: preprocessors.py    From mead-baseline with Apache License 2.0 5 votes vote down vote up
def create_char_vectors_from_post(self, raw_post, mxlen):
        char2index = self.index
        if self.do_lowercase:
            raw_post = self.lowercase(raw_post)
        raw_post = tf.string_split(tf.reshape(raw_post, [-1]))
        culled_word_token_vals = tf.substr(raw_post.values, 0, self.mxwlen)
        char_tokens = tf.string_split(culled_word_token_vals, delimiter='')
        char_indices = char2index.lookup(char_tokens)
        return self.reshape_indices(char_indices, [mxlen, self.mxwlen]) 
Example #11
Source File: tokenizeddata.py    From ChatLearner with Apache License 2.0 5 votes vote down vote up
def _load_corpus(self, corpus_dir):
        for fd in range(2, -1, -1):
            file_list = []
            if fd == 0:
                file_dir = os.path.join(corpus_dir, AUG0_FOLDER)
            elif fd == 1:
                file_dir = os.path.join(corpus_dir, AUG1_FOLDER)
            else:
                file_dir = os.path.join(corpus_dir, AUG2_FOLDER)

            for data_file in sorted(os.listdir(file_dir)):
                full_path_name = os.path.join(file_dir, data_file)
                if os.path.isfile(full_path_name) and data_file.lower().endswith('.txt'):
                    file_list.append(full_path_name)

            assert len(file_list) > 0
            dataset = tf.data.TextLineDataset(file_list)

            src_dataset = dataset.filter(lambda line:
                                         tf.logical_and(tf.size(line) > 0,
                                                        tf.equal(tf.substr(line, 0, 2), tf.constant('Q:'))))
            src_dataset = src_dataset.map(lambda line:
                                          tf.substr(line, 2, MAX_LEN)).prefetch(4096)
            tgt_dataset = dataset.filter(lambda line:
                                         tf.logical_and(tf.size(line) > 0,
                                                        tf.equal(tf.substr(line, 0, 2), tf.constant('A:'))))
            tgt_dataset = tgt_dataset.map(lambda line:
                                          tf.substr(line, 2, MAX_LEN)).prefetch(4096)

            src_tgt_dataset = tf.data.Dataset.zip((src_dataset, tgt_dataset))
            if fd == 1:
                src_tgt_dataset = src_tgt_dataset.repeat(self.hparams.aug1_repeat_times)
            elif fd == 2:
                src_tgt_dataset = src_tgt_dataset.repeat(self.hparams.aug2_repeat_times)

            if self.text_set is None:
                self.text_set = src_tgt_dataset
            else:
                self.text_set = self.text_set.concatenate(src_tgt_dataset) 
Example #12
Source File: substr_op_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testWrongDtype(self):
    with self.test_session():
      with self.assertRaises(TypeError):
        tf.substr(b"test", 3.0, 1)
      with self.assertRaises(TypeError):
        tf.substr(b"test", 3, 1.0) 
Example #13
Source File: substr_op_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _testMatrixStrings(self, dtype):
    test_string = [[b"ten", b"eleven", b"twelve"],
                   [b"thirteen", b"fourteen", b"fifteen"],
                   [b"sixteen", b"seventeen", b"eighteen"]]
    position = np.array(1, dtype)
    length = np.array(4, dtype)
    expected_value = [[b"en", b"leve", b"welv"],
                      [b"hirt", b"ourt", b"ifte"],
                      [b"ixte", b"even", b"ight"]]

    substr_op = tf.substr(test_string, position, length)
    with self.test_session():
      substr = substr_op.eval()
      self.assertAllEqual(substr, expected_value) 
Example #14
Source File: substr_op_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _testVectorStrings(self, dtype):
    test_string = [b"Hello", b"World"]
    position = np.array(1, dtype)
    length = np.array(3, dtype)
    expected_value = [b"ell", b"orl"]

    substr_op = tf.substr(test_string, position, length)
    with self.test_session():
      substr = substr_op.eval()
      self.assertAllEqual(substr, expected_value) 
Example #15
Source File: substr_op_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _testScalarString(self, dtype):
    test_string = b"Hello"
    position = np.array(1, dtype)
    length = np.array(3, dtype)
    expected_value = b"ell"

    substr_op = tf.substr(test_string, position, length)
    with self.test_session():
      substr = substr_op.eval()
      self.assertAllEqual(substr, expected_value) 
Example #16
Source File: util.py    From shortest-path with The Unlicense 5 votes vote down vote up
def tf_startswith(tensor, prefix, axis=None):
	return tf.reduce_all(tf.equal(tf.substr(tensor, 0, len(prefix)), prefix), axis=axis)



# --------------------------------------------------------------------------
# File readers and writers
# -------------------------------------------------------------------------- 
Example #17
Source File: substr_op_test.py    From deep_image_model with Apache License 2.0 4 votes vote down vote up
def _testOutOfRangeError(self, dtype):
    # Scalar/Scalar
    test_string = b"Hello"
    position = np.array(7, dtype)
    length = np.array(3, dtype)
    substr_op = tf.substr(test_string, position, length)
    with self.test_session():
      with self.assertRaises(tf.errors.InvalidArgumentError):
        substr = substr_op.eval()

    # Vector/Scalar
    test_string = [b"good", b"good", b"bad", b"good"]
    position = np.array(3, dtype)
    length = np.array(1, dtype)
    substr_op = tf.substr(test_string, position, length)
    with self.test_session():
      with self.assertRaises(tf.errors.InvalidArgumentError):
        substr = substr_op.eval()

    # Negative pos
    test_string = b"Hello"
    position = np.array(-1, dtype)
    length = np.array(3, dtype)
    substr_op = tf.substr(test_string, position, length)
    with self.test_session():
      with self.assertRaises(tf.errors.InvalidArgumentError):
        substr = substr_op.eval()

    # Matrix/Matrix
    test_string = [[b"good", b"good", b"good"],
                   [b"good", b"good", b"bad"],
                   [b"good", b"good", b"good"]]
    position = np.array([[1, 2, 3],
                         [1, 2, 3],
                         [1, 2, 3]], dtype)
    length = np.array([[3, 2, 1],
                       [1, 2, 3],
                       [2, 2, 2]], dtype)
    substr_op = tf.substr(test_string, position, length)
    with self.test_session():
      with self.assertRaises(tf.errors.InvalidArgumentError):
        substr = substr_op.eval()

    # Broadcast
    test_string = [[b"good", b"good", b"good"],
                   [b"good", b"good", b"bad"]]
    position = np.array([1, 2, 3], dtype)
    length = np.array([1, 2, 3], dtype)
    substr_op = tf.substr(test_string, position, length)
    with self.test_session():
      with self.assertRaises(tf.errors.InvalidArgumentError):
        substr = substr_op.eval() 
Example #18
Source File: substr_op_test.py    From deep_image_model with Apache License 2.0 4 votes vote down vote up
def _testBroadcast(self, dtype):
    # Broadcast pos/len onto input string
    test_string = [[b"ten", b"eleven", b"twelve"],
                   [b"thirteen", b"fourteen", b"fifteen"],
                   [b"sixteen", b"seventeen", b"eighteen"],
                   [b"nineteen", b"twenty", b"twentyone"]]
    position = np.array([1, 2, 3], dtype)
    length = np.array([1, 2, 3], dtype)
    expected_value = [[b"e", b"ev", b"lve"],
                      [b"h", b"ur", b"tee"],
                      [b"i", b"ve", b"hte"],
                      [b"i", b"en", b"nty"]]
    substr_op = tf.substr(test_string, position, length)
    with self.test_session():
      substr = substr_op.eval()
      self.assertAllEqual(substr, expected_value)

    # Broadcast input string onto pos/len
    test_string = [b"thirteen", b"fourteen", b"fifteen"]
    position = np.array([[1, 2, 3],
                         [3, 2, 1],
                         [5, 5, 5]], dtype)
    length = np.array([[3, 2, 1],
                       [1, 2, 3],
                       [2, 2, 2]], dtype)
    expected_value = [[b"hir", b"ur", b"t"],
                      [b"r", b"ur", b"ift"],
                      [b"ee", b"ee", b"en"]]
    substr_op = tf.substr(test_string, position, length)
    with self.test_session():
      substr = substr_op.eval()
      self.assertAllEqual(substr, expected_value)

    # Test 1D broadcast
    test_string = b"thirteen"
    position = np.array([1, 5, 7], dtype)
    length = np.array([3, 2, 1], dtype)
    expected_value = [b"hir", b"ee", b"n"]
    substr_op = tf.substr(test_string, position, length)
    with self.test_session():
      substr = substr_op.eval()
      self.assertAllEqual(substr, expected_value) 
Example #19
Source File: datasets.py    From self-supervision with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def decode_image(contents, channels=None, name=None):
  """Convenience function for `decode_gif`, `decode_jpeg`, and `decode_png`.
  Detects whether an image is a GIF, JPEG, or PNG, and performs the appropriate
  operation to convert the input bytes `string` into a `Tensor` of type `uint8`.

  Note: `decode_gif` returns a 4-D array `[num_frames, height, width, 3]`, as
  opposed to `decode_jpeg` and `decode_png`, which return 3-D arrays
  `[height, width, num_channels]`. Make sure to take this into account when
  constructing your graph if you are intermixing GIF files with JPEG and/or PNG
  files.

  Args:
    contents: 0-D `string`. The encoded image bytes.
    channels: An optional `int`. Defaults to `0`. Number of color channels for
      the decoded image.
    name: A name for the operation (optional)

  Returns:
    `Tensor` with type `uint8` with shape `[height, width, num_channels]` for
      JPEG and PNG images and shape `[num_frames, height, width, 3]` for GIF
      images.
  """
  with ops.name_scope(name, 'decode_image') as scope:
    if channels not in (None, 0, 1, 3):
      raise ValueError('channels must be in (None, 0, 1, 3)')
    substr = tf.substr(contents, 0, 4)

    def _gif():
      # Create assert op to check that bytes are GIF decodable
      is_gif = tf.equal(substr, b'\x47\x49\x46\x38', name='is_gif')
      decode_msg = 'Unable to decode bytes as JPEG, PNG, or GIF'
      assert_decode = control_flow_ops.Assert(is_gif, [decode_msg])
      # Create assert to make sure that channels is not set to 1
      # Already checked above that channels is in (None, 0, 1, 3)
      gif_channels = 0 if channels is None else channels
      good_channels = tf.not_equal(gif_channels, 1, name='check_channels')
      channels_msg = 'Channels must be in (None, 0, 3) when decoding GIF images'
      assert_channels = control_flow_ops.Assert(good_channels, [channels_msg])
      with ops.control_dependencies([assert_decode, assert_channels]):
        return gen_image_ops.decode_gif(contents)

    def _png():
      return gen_image_ops.decode_png(contents, channels)

    def check_png():
      is_png = tf.equal(substr, b'\211PNG', name='is_png')
      return control_flow_ops.cond(is_png, _png, _gif, name='cond_png')

    def _jpeg():
      return gen_image_ops.decode_jpeg(contents, channels)

    is_jpeg = tf.logical_or(tf.equal(substr, b'\xff\xd8\xff\xe0', name='is_jpeg0'),
                           tf.equal(substr, b'\xff\xd8\xff\xe1', name='is_jpeg0'))

    return control_flow_ops.cond(is_jpeg, _jpeg, check_png, name='cond_jpeg')