Python tensorflow.python.framework.ops.RegisterGradient() Examples

The following are 27 code examples of tensorflow.python.framework.ops.RegisterGradient(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.python.framework.ops , or try the search function .
Example #1
Source File: tf_cnnvis.py    From tf_cnnvis with MIT License 6 votes vote down vote up
def _register_custom_gradients():
    """
    Register Custom Gradients.
    """
    global is_Registered

    if not is_Registered:
        # register LRN gradients
        @ops.RegisterGradient("Customlrn")
        def _CustomlrnGrad(op, grad):
            return grad

        # register Relu gradients
        @ops.RegisterGradient("GuidedRelu")
        def _GuidedReluGrad(op, grad):
            return tf.where(0. < grad, gen_nn_ops.relu_grad(grad, op.outputs[0]), tf.zeros_like(grad))

        is_Registered = True


# save given graph object as meta file 
Example #2
Source File: gradients_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def testNoGradientForStringOutputs(self):
    with ops.Graph().as_default() as g:
      @ops.RegisterGradient("TestOp")
      def _TestOpGrad(op, float_grad, string_grad):
        """Gradient function for TestOp."""
        self.assertEquals(float_grad.dtype, dtypes.float32)
        self.assertFalse(string_grad)
        return float_grad
      ops.RegisterShape("TestOp")(None)

      c = constant(1.0)
      x, y = g.create_op("TestOp", [c], [dtypes.float32, dtypes.string]).outputs
      z = x * 2.0
      w = z * 3.0
      grads = gradients.gradients(z, [c])
      self.assertTrue(isinstance(grads[0], ops.Tensor)) 
Example #3
Source File: gradient_reversal.py    From neuralmonkey with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _reverse_gradient(x: tf.Tensor) -> tf.Tensor:
    """Flips the sign of the incoming gradient during training."""

    grad_name = "gradient_reversal_{}".format(x.name)

    # pylint: disable=unused-variable,invalid-name,unused-argument
    @ops.RegisterGradient(grad_name)
    def _flip_gradients(op, grad):
        return [tf.negative(grad)]
    # pylint: enable=unused-variable,invalid-name,unused-argument

    from neuralmonkey.experiment import Experiment
    graph = Experiment.get_current().graph
    with graph.gradient_override_map({"Identity": grad_name}):
        y = tf.identity(x)

    return y 
Example #4
Source File: gradient_reversal.py    From neuralmonkey with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _reverse_gradient(x: tf.Tensor) -> tf.Tensor:
    """Flips the sign of the incoming gradient during training."""

    grad_name = "gradient_reversal_{}".format(x.name)

    # pylint: disable=unused-variable,invalid-name,unused-argument
    @ops.RegisterGradient(grad_name)
    def _flip_gradients(op, grad):
        return [tf.negative(grad)]
    # pylint: enable=unused-variable,invalid-name,unused-argument

    from neuralmonkey.experiment import Experiment
    graph = Experiment.get_current().graph
    with graph.gradient_override_map({"Identity": grad_name}):
        y = tf.identity(x)

    return y 
Example #5
Source File: gradient_reversal.py    From neuralmonkey with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _reverse_gradient(x: tf.Tensor) -> tf.Tensor:
    """Flips the sign of the incoming gradient during training."""

    grad_name = "gradient_reversal_{}".format(x.name)

    # pylint: disable=unused-variable,invalid-name,unused-argument
    @ops.RegisterGradient(grad_name)
    def _flip_gradients(op, grad):
        return [tf.negative(grad)]
    # pylint: enable=unused-variable,invalid-name,unused-argument

    from neuralmonkey.experiment import Experiment
    graph = Experiment.get_current().graph
    with graph.gradient_override_map({"Identity": grad_name}):
        y = tf.identity(x)

    return y 
Example #6
Source File: flip_gradient.py    From adanet with MIT License 5 votes vote down vote up
def __call__(self, x, l=1.0):
        grad_name = "FlipGradient%d" % self.num_calls
        @ops.RegisterGradient(grad_name)
        def _flip_gradients(op, grad):
            return [tf.negative(grad) * l]
        
        g = tf.get_default_graph()
        with g.gradient_override_map({"Identity": grad_name}):
            y = tf.identity(x)
            
        self.num_calls += 1
        return y 
Example #7
Source File: grad_cam.py    From face_classification with MIT License 5 votes vote down vote up
def register_gradient():
    if "GuidedBackProp" not in ops._gradient_registry._registry:
        @ops.RegisterGradient("GuidedBackProp")
        def _GuidedBackProp(op, gradient):
            dtype = op.inputs[0].dtype
            guided_gradient = (gradient * tf.cast(gradient > 0., dtype) *
                               tf.cast(op.inputs[0] > 0., dtype))
            return guided_gradient 
Example #8
Source File: flip_gradient.py    From adanet with MIT License 5 votes vote down vote up
def __call__(self, x, l=1.0):
        grad_name = "FlipGradient%d" % self.num_calls
        @ops.RegisterGradient(grad_name)
        def _flip_gradients(op, grad):
            return [tf.negative(grad) * l]
        
        g = tf.get_default_graph()
        with g.gradient_override_map({"Identity": grad_name}):
            y = tf.identity(x)
            
        self.num_calls += 1
        return y 
Example #9
Source File: flip_gradient.py    From hgail with MIT License 5 votes vote down vote up
def __call__(self, x, l=1.0):
        grad_name = "FlipGradient%d" % self.num_calls
        @ops.RegisterGradient(grad_name)
        def _flip_gradients(op, grad):
            return [-grad * l]
        
        g = tf.get_default_graph()
        with g.gradient_override_map({"Identity": grad_name}):
            y = tf.identity(x)
            
        self.num_calls += 1
        return y 
Example #10
Source File: grad-cam.py    From keras-grad-cam with MIT License 5 votes vote down vote up
def register_gradient():
    if "GuidedBackProp" not in ops._gradient_registry._registry:
        @ops.RegisterGradient("GuidedBackProp")
        def _GuidedBackProp(op, grad):
            dtype = op.inputs[0].dtype
            return grad * tf.cast(grad > 0., dtype) * \
                tf.cast(op.inputs[0] > 0., dtype) 
Example #11
Source File: dann.py    From ddan with MIT License 5 votes vote down vote up
def __call__(self, x, l=1.0):
        grad_name = "FlipGradient%d" % self.num_calls
        @ops.RegisterGradient(grad_name)
        def _flip_gradients(op, grad):
            return [tf.negative(grad) * l]
        
        g = tf.get_default_graph()
        with g.gradient_override_map({"Identity": grad_name}):
            y = tf.identity(x)
            
        self.num_calls += 1
        return y 
Example #12
Source File: tf_block.py    From x-vector-kaldi-tf with Apache License 2.0 5 votes vote down vote up
def __call__(self, x, l=1.0):
        grad_name = "FlipGradient%d" % self.num_calls

        @ops.RegisterGradient(grad_name)
        def _flip_gradients(op, grad):
            return [tf.negative(grad) * l]

        g = tf.get_default_graph()
        with g.gradient_override_map({"Identity": grad_name}):
            y = tf.identity(x)

        self.num_calls += 1
        return y 
Example #13
Source File: grad_cam.py    From emotion_recognition with MIT License 5 votes vote down vote up
def register_gradient():
    if "GuidedBackProp" not in ops._gradient_registry._registry:
        @ops.RegisterGradient("GuidedBackProp")
        def _GuidedBackProp(op, gradient):
            dtype = op.inputs[0].dtype
            guided_gradient = (gradient * tf.cast(gradient > 0., dtype) *
                               tf.cast(op.inputs[0] > 0., dtype))
            return guided_gradient 
Example #14
Source File: flip_gradient.py    From active_learning_coreset with MIT License 5 votes vote down vote up
def __call__(self, x, l=1.0):
        grad_name = "FlipGradient%d" % self.num_calls
        @ops.RegisterGradient(grad_name)
        def _flip_gradients(op, grad):
            return [tf.neg(grad) * l]
        
        g = tf.get_default_graph()
        with g.gradient_override_map({"Identity": grad_name}):
            y = tf.identity(x)
            
        self.num_calls += 1
        return y 
Example #15
Source File: flip_gradient.py    From Transferable-E2E-ABSA with MIT License 5 votes vote down vote up
def __call__(self, x, l=1.0):
        grad_name = "FlipGradient%d" % self.num_calls

        @ops.RegisterGradient(grad_name)
        def _flip_gradients(op, grad):
            return [tf.negative(grad) * l]

        g = tf.get_default_graph()
        with g.gradient_override_map({"Identity": grad_name}):
            y = tf.identity(x)

        self.num_calls += 1
        return y 
Example #16
Source File: flip_gradient.py    From HATN with MIT License 5 votes vote down vote up
def __call__(self, x, l=1.0):
        grad_name = "FlipGradient%d" % self.num_calls
        @ops.RegisterGradient(grad_name)
        def _flip_gradients(op, grad):
            return [tf.negative(grad) * l]
        
        g = tf.get_default_graph()
        with g.gradient_override_map({"Identity": grad_name}):
            y = tf.identity(x)
            
        self.num_calls += 1
        return y 
Example #17
Source File: flip_gradient.py    From bier with GNU General Public License v3.0 5 votes vote down vote up
def __call__(self, x, l=1.0):
        grad_name = "FlipGradient%d" % self.num_calls

        @ops.RegisterGradient(grad_name)
        def _flip_gradients(op, grad):
            return [(-grad) * l]

        g = tf.get_default_graph()
        with g.gradient_override_map({"Identity": grad_name}):
            y = tf.identity(x)

        self.num_calls += 1
        return y 
Example #18
Source File: grad_cam.py    From Face-and-Emotion-Recognition with MIT License 5 votes vote down vote up
def register_gradient():
    if "GuidedBackProp" not in ops._gradient_registry._registry:
        @ops.RegisterGradient("GuidedBackProp")
        def _GuidedBackProp(op, gradient):
            dtype = op.inputs[0].dtype
            guided_gradient = (gradient * tf.cast(gradient > 0., dtype) *
                               tf.cast(op.inputs[0] > 0., dtype))
            return guided_gradient 
Example #19
Source File: token_generator_gumbel.py    From BERT with Apache License 2.0 5 votes vote down vote up
def __call__(self, x, l=1.0):
		grad_name = "FlipGradient%d" % self.num_calls
		@ops.RegisterGradient(grad_name)
		def _flip_gradients(op, grad):
			return [tf.negative(grad) * l]
		
		g = tf.get_default_graph()
		with g.gradient_override_map({"Identity": grad_name}):
			y = tf.identity(x)
			
		self.num_calls += 1
		return y 
Example #20
Source File: token_generator_igr.py    From BERT with Apache License 2.0 5 votes vote down vote up
def __call__(self, x, l=1.0):
		grad_name = "FlipGradient%d" % self.num_calls
		@ops.RegisterGradient(grad_name)
		def _flip_gradients(op, grad):
			return [tf.negative(grad) * l]
		
		g = tf.get_default_graph()
		with g.gradient_override_map({"Identity": grad_name}):
			y = tf.identity(x)
			
		self.num_calls += 1
		return y 
Example #21
Source File: discriminator_gumbel_nce.py    From BERT with Apache License 2.0 5 votes vote down vote up
def __call__(self, x, l=1.0):
		grad_name = "FlipGradient%d" % self.num_calls
		@ops.RegisterGradient(grad_name)
		def _flip_gradients(op, grad):
			return [tf.negative(grad) * l]
		
		g = tf.get_default_graph()
		with g.gradient_override_map({"Identity": grad_name}):
			y = tf.identity(x)
			
		self.num_calls += 1
		return y 
Example #22
Source File: discriminator_relgan_exporter.py    From BERT with Apache License 2.0 5 votes vote down vote up
def __call__(self, x, l=1.0):
		grad_name = "FlipGradient%d" % self.num_calls
		@ops.RegisterGradient(grad_name)
		def _flip_gradients(op, grad):
			return [tf.negative(grad) * l]
		
		g = tf.get_default_graph()
		with g.gradient_override_map({"Identity": grad_name}):
			y = tf.identity(x)
			
		self.num_calls += 1
		return y 
Example #23
Source File: discriminator_gumbel.py    From BERT with Apache License 2.0 5 votes vote down vote up
def __call__(self, x, l=1.0):
		grad_name = "FlipGradient%d" % self.num_calls
		@ops.RegisterGradient(grad_name)
		def _flip_gradients(op, grad):
			return [tf.negative(grad) * l]
		
		g = tf.get_default_graph()
		with g.gradient_override_map({"Identity": grad_name}):
			y = tf.identity(x)
			
		self.num_calls += 1
		return y 
Example #24
Source File: flip_gradient.py    From BERT with Apache License 2.0 5 votes vote down vote up
def __call__(self, x, l=1.0):
		grad_name = "FlipGradient%d" % self.num_calls
		@ops.RegisterGradient(grad_name)
		def _flip_gradients(op, grad):
			return [tf.negative(grad) * l]
		
		g = tf.get_default_graph()
		with g.gradient_override_map({"Identity": grad_name}):
			y = tf.identity(x)
			
		self.num_calls += 1
		return y 
Example #25
Source File: grad_cam.py    From Emotion with MIT License 5 votes vote down vote up
def register_gradient():
    if "GuidedBackProp" not in ops._gradient_registry._registry:
        @ops.RegisterGradient("GuidedBackProp")
        def _GuidedBackProp(op, gradient):
            dtype = op.inputs[0].dtype
            guided_gradient = (gradient * tf.cast(gradient > 0., dtype) *
                               tf.cast(op.inputs[0] > 0., dtype))
            return guided_gradient 
Example #26
Source File: maxpool_gradgrad.py    From tensorflow-forward-ad with MIT License 4 votes vote down vote up
def _MaxPoolGradWithArgmaxGrad(op, grad):
  """The gradients for `MaxPoolGradWithArgmax`.
  Args:
    op: The `MaxPoolGradWithArgmax` `Operation` that we are differentiating,
      which we can use to find the inputs and outputs of the original op.
    grad: Gradient with respect to the output of the `MaxPoolGradWithArgmax` op.
    op.inputs[0]: x
    op.inputs[1]: dl/dy
    op.inputs[2]: argmax_in_x
    op.outputs[0]: dl/dx
  Returns:
    Gradients with respect to the input of `MaxPoolGradWithArgmax`.
  """
  ksize = op.get_attr("ksize")
  strides = op.get_attr("strides")
  padding = op.get_attr("padding")
  return [
      None, _max_pool_grad_grad(
          grad,
          op.inputs[0],
          op.inputs[1],
          ksize,
          strides,
          padding,
          argmax=op.inputs[2]), None
  ]


# @ops.RegisterGradient("MaxPoolGrad")
# def _MaxPoolGradGrad(op, grad):
#   """The gradients for `MaxPoolGrad`.
#   Args:
#     op: The `MaxPoolGrad` `Operation` that we are differentiating, which we can use
#       to find the inputs and outputs of the original op.
#       op.inputs[0]: x
#       op.inputs[1]: y
#       op.inputs[2]: dl/dy
#       op.outputs[0]: dl/dx
#     grad: Gradient with respect to the output of the `MaxPoolGrad` op.
#   Returns:
#     Gradients with respect to the input of `MaxPoolGrad`.
#   """
#   ksize = op.get_attr("ksize")
#   strides = op.get_attr("strides")
#   padding = op.get_attr("padding")
#   return [
#       None, None, _max_pool_grad_grad(grad, op.inputs[0], op.inputs[1], ksize,
#                                       strides, padding)
#   ] 
Example #27
Source File: imperative_graph.py    From lambda-packs with MIT License 4 votes vote down vote up
def __init__(self, parent_graph=None):
    """Initializes an ImperativeGraph.

    Args:
      parent_graph: (Optional) An ImperativeGraph.
    """
    self._parent_graph = parent_graph
    # Whether the create_op function should augment an op with extra logic for
    # imperative execution.
    self._return_as_is = False
    # Operation -> list of Tensors map. Used for overriding the op.outputs
    # property, useful during gradient computation.
    self._outputs_map = {}
    # Operation -> function map. Used for overriding the gradient function
    # for an op.
    self._gradient_function_map = {}
    # Unique name for the graph. Used for naming the container in which
    # temporary variables are placed.
    self._name = uuid.uuid4().hex
    # Names for op types used for marking ops so we can override their
    # gradient functions.
    self._merge_op_type = 'ImperativeMerge' + self._name
    self._imperative_op_type = 'ImperativeOp' + self._name
    # The list of 'assign' ops that initialize variables.
    self._init_ops = []
    # Names of variables whose init ops have been already recorded in _init_ops.
    self._init_variable_names = set()
    # A flag to indicate whether a variable and the corresponding initialization
    # ops are being created. Typically set by the initializer of Variable class.
    self._in_variable_creation = False
    self._variable_cleanup_ops = []
    # Call the parent's initializer.
    super(ImperativeGraph, self).__init__()

    # Register a simple 'pass through' function to be used for ops that have
    # _merge_op_type as the _gradient_op_type attribute.
    ops.RegisterGradient(self._merge_op_type)(
        lambda op, grad, _: [grad] * len(op.inputs))

    # For ops that have _imperative_op_grad as the _gradient_op_type attribute,
    # temporarily replace their outputs with the values in _output_map before
    # calling the original gradient function.
    def _imperative_op_grad(op, *grad):
      with self.replace_outputs(op):
        return self._gradient_function_map[op.name](op, *grad)

    ops.RegisterGradient(self._imperative_op_type)(_imperative_op_grad)