Python cntk.minus() Examples

The following are 16 code examples of cntk.minus(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cntk , or try the search function .
Example #1
Source File: cntk_smoothL1_loss.py    From cntk-python-web-service-on-azure with MIT License 6 votes vote down vote up
def SmoothL1Loss(sigma, bbox_pred, bbox_targets, bbox_inside_weights, bbox_outside_weights):
    """
        From https://github.com/smallcorgi/Faster-RCNN_TF/blob/master/lib/fast_rcnn/train.py

        ResultLoss = outside_weights * SmoothL1(inside_weights * (bbox_pred - bbox_targets))
        SmoothL1(x) = 0.5 * (sigma * x)^2,    if |x| < 1 / sigma^2
                        |x| - 0.5 / sigma^2,    otherwise
    """
    sigma2 = sigma * sigma

    inside_mul_abs = C.abs(C.element_times(bbox_inside_weights, C.minus(bbox_pred, bbox_targets)))

    smooth_l1_sign = C.less(inside_mul_abs, 1.0 / sigma2)
    smooth_l1_option1 = C.element_times(C.element_times(inside_mul_abs, inside_mul_abs), 0.5 * sigma2)
    smooth_l1_option2 = C.minus(inside_mul_abs, 0.5 / sigma2)
    smooth_l1_result = C.plus(C.element_times(smooth_l1_option1, smooth_l1_sign),
                              C.element_times(smooth_l1_option2, C.minus(1.0, smooth_l1_sign)))

    return C.element_times(bbox_outside_weights, smooth_l1_result) 
Example #2
Source File: cntk_backend.py    From GraphicDesignPatternByPython with MIT License 6 votes vote down vote up
def _moments(x, axes=None, shift=None, keep_dims=False):
    _axes = tuple(axes)
    if shift is None:
        shift = x
        # Compute true mean while keeping the dims for proper broadcasting.
        for axis in _axes:
            shift = C.reduce_mean(shift, axis=axis)

    shift = C.stop_gradient(shift)
    shifted_mean = C.minus(x, shift)
    for axis in _axes:
        shifted_mean = C.reduce_mean(shifted_mean, axis=axis)

    variance_mean = C.square(C.minus(x, shift))
    for axis in _axes:
        variance_mean = C.reduce_mean(variance_mean, axis=axis)

    variance = C.minus(variance_mean, C.square(shifted_mean))
    mean = C.plus(shifted_mean, shift)

    if not keep_dims:
        mean = squeeze(mean, _axes)
        variance = squeeze(variance, _axes)

    return mean, variance 
Example #3
Source File: cntk_smoothL1_loss.py    From raster-deep-learning with Apache License 2.0 6 votes vote down vote up
def SmoothL1Loss(sigma, bbox_pred, bbox_targets, bbox_inside_weights, bbox_outside_weights):
    """
        From https://github.com/smallcorgi/Faster-RCNN_TF/blob/master/lib/fast_rcnn/train.py

        ResultLoss = outside_weights * SmoothL1(inside_weights * (bbox_pred - bbox_targets))
        SmoothL1(x) = 0.5 * (sigma * x)^2,    if |x| < 1 / sigma^2
                        |x| - 0.5 / sigma^2,    otherwise
    """
    sigma2 = sigma * sigma

    inside_mul_abs = C.abs(C.element_times(bbox_inside_weights, C.minus(bbox_pred, bbox_targets)))

    smooth_l1_sign = C.less(inside_mul_abs, 1.0 / sigma2)
    smooth_l1_option1 = C.element_times(C.element_times(inside_mul_abs, inside_mul_abs), 0.5 * sigma2)
    smooth_l1_option2 = C.minus(inside_mul_abs, 0.5 / sigma2)
    smooth_l1_result = C.plus(C.element_times(smooth_l1_option1, smooth_l1_sign),
                              C.element_times(smooth_l1_option2, C.minus(1.0, smooth_l1_sign)))

    return C.element_times(bbox_outside_weights, smooth_l1_result) 
Example #4
Source File: cntk_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def _moments(x, axes=None, shift=None, keep_dims=False):
    _axes = tuple(axes)
    if shift is None:
        shift = x
        # Compute true mean while keeping the dims for proper broadcasting.
        for axis in _axes:
            shift = C.reduce_mean(shift, axis=axis)

    shift = C.stop_gradient(shift)
    shifted_mean = C.minus(x, shift)
    for axis in _axes:
        shifted_mean = C.reduce_mean(shifted_mean, axis=axis)

    variance_mean = C.square(C.minus(x, shift))
    for axis in _axes:
        variance_mean = C.reduce_mean(variance_mean, axis=axis)

    variance = C.minus(variance_mean, C.square(shifted_mean))
    mean = C.plus(shifted_mean, shift)

    if not keep_dims:
        mean = squeeze(mean, _axes)
        variance = squeeze(variance, _axes)

    return mean, variance 
Example #5
Source File: cntk_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def _moments(x, axes=None, shift=None, keep_dims=False):
    _axes = tuple(axes)
    if shift is None:
        shift = x
        # Compute true mean while keeping the dims for proper broadcasting.
        for axis in _axes:
            shift = C.reduce_mean(shift, axis=axis)

    shift = C.stop_gradient(shift)
    shifted_mean = C.minus(x, shift)
    for axis in _axes:
        shifted_mean = C.reduce_mean(shifted_mean, axis=axis)

    variance_mean = C.square(C.minus(x, shift))
    for axis in _axes:
        variance_mean = C.reduce_mean(variance_mean, axis=axis)

    variance = C.minus(variance_mean, C.square(shifted_mean))
    mean = C.plus(shifted_mean, shift)

    if not keep_dims:
        mean = squeeze(mean, _axes)
        variance = squeeze(variance, _axes)

    return mean, variance 
Example #6
Source File: cntk_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def _moments(x, axes=None, shift=None, keep_dims=False):
    _axes = tuple(axes)
    if shift is None:
        shift = x
        # Compute true mean while keeping the dims for proper broadcasting.
        for axis in _axes:
            shift = C.reduce_mean(shift, axis=axis)

    shift = C.stop_gradient(shift)
    shifted_mean = C.minus(x, shift)
    for axis in _axes:
        shifted_mean = C.reduce_mean(shifted_mean, axis=axis)

    variance_mean = C.square(C.minus(x, shift))
    for axis in _axes:
        variance_mean = C.reduce_mean(variance_mean, axis=axis)

    variance = C.minus(variance_mean, C.square(shifted_mean))
    mean = C.plus(shifted_mean, shift)

    if not keep_dims:
        mean = squeeze(mean, _axes)
        variance = squeeze(variance, _axes)

    return mean, variance 
Example #7
Source File: cntk_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def _moments(x, axes=None, shift=None, keep_dims=False):
    _axes = tuple(axes)
    if shift is None:
        shift = x
        # Compute true mean while keeping the dims for proper broadcasting.
        for axis in _axes:
            shift = C.reduce_mean(shift, axis=axis)

    shift = C.stop_gradient(shift)
    shifted_mean = C.minus(x, shift)
    for axis in _axes:
        shifted_mean = C.reduce_mean(shifted_mean, axis=axis)

    variance_mean = C.square(C.minus(x, shift))
    for axis in _axes:
        variance_mean = C.reduce_mean(variance_mean, axis=axis)

    variance = C.minus(variance_mean, C.square(shifted_mean))
    mean = C.plus(shifted_mean, shift)

    if not keep_dims:
        mean = squeeze(mean, _axes)
        variance = squeeze(variance, _axes)

    return mean, variance 
Example #8
Source File: cntk_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def _moments(x, axes=None, shift=None, keep_dims=False):
    _axes = tuple(axes)
    if shift is None:
        shift = x
        # Compute true mean while keeping the dims for proper broadcasting.
        for axis in _axes:
            shift = C.reduce_mean(shift, axis=axis)

    shift = C.stop_gradient(shift)
    shifted_mean = C.minus(x, shift)
    for axis in _axes:
        shifted_mean = C.reduce_mean(shifted_mean, axis=axis)

    variance_mean = C.square(C.minus(x, shift))
    for axis in _axes:
        variance_mean = C.reduce_mean(variance_mean, axis=axis)

    variance = C.minus(variance_mean, C.square(shifted_mean))
    mean = C.plus(shifted_mean, shift)

    if not keep_dims:
        mean = squeeze(mean, _axes)
        variance = squeeze(variance, _axes)

    return mean, variance 
Example #9
Source File: cntk_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def _moments(x, axes=None, shift=None, keep_dims=False):
    _axes = tuple(axes)
    if shift is None:
        shift = x
        # Compute true mean while keeping the dims for proper broadcasting.
        for axis in _axes:
            shift = C.reduce_mean(shift, axis=axis)

    shift = C.stop_gradient(shift)
    shifted_mean = C.minus(x, shift)
    for axis in _axes:
        shifted_mean = C.reduce_mean(shifted_mean, axis=axis)

    variance_mean = C.square(C.minus(x, shift))
    for axis in _axes:
        variance_mean = C.reduce_mean(variance_mean, axis=axis)

    variance = C.minus(variance_mean, C.square(shifted_mean))
    mean = C.plus(shifted_mean, shift)

    if not keep_dims:
        mean = squeeze(mean, _axes)
        variance = squeeze(variance, _axes)

    return mean, variance 
Example #10
Source File: cntk_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def _moments(x, axes=None, shift=None, keep_dims=False):
    _axes = tuple(axes)
    if shift is None:
        shift = x
        # Compute true mean while keeping the dims for proper broadcasting.
        for axis in _axes:
            shift = C.reduce_mean(shift, axis=axis)

    shift = C.stop_gradient(shift)
    shifted_mean = C.minus(x, shift)
    for axis in _axes:
        shifted_mean = C.reduce_mean(shifted_mean, axis=axis)

    variance_mean = C.square(C.minus(x, shift))
    for axis in _axes:
        variance_mean = C.reduce_mean(variance_mean, axis=axis)

    variance = C.minus(variance_mean, C.square(shifted_mean))
    mean = C.plus(shifted_mean, shift)

    if not keep_dims:
        mean = squeeze(mean, _axes)
        variance = squeeze(variance, _axes)

    return mean, variance 
Example #11
Source File: cntk_smoothL1_loss.py    From cntk-hotel-pictures-classificator with MIT License 6 votes vote down vote up
def SmoothL1Loss(sigma, bbox_pred, bbox_targets, bbox_inside_weights, bbox_outside_weights):
    """
        From https://github.com/smallcorgi/Faster-RCNN_TF/blob/master/lib/fast_rcnn/train.py

        ResultLoss = outside_weights * SmoothL1(inside_weights * (bbox_pred - bbox_targets))
        SmoothL1(x) = 0.5 * (sigma * x)^2,    if |x| < 1 / sigma^2
                        |x| - 0.5 / sigma^2,    otherwise
    """
    sigma2 = sigma * sigma

    inside_mul_abs = C.abs(C.element_times(bbox_inside_weights, C.minus(bbox_pred, bbox_targets)))

    smooth_l1_sign = C.less(inside_mul_abs, 1.0 / sigma2)
    smooth_l1_option1 = C.element_times(C.element_times(inside_mul_abs, inside_mul_abs), 0.5 * sigma2)
    smooth_l1_option2 = C.minus(inside_mul_abs, 0.5 / sigma2)
    smooth_l1_result = C.plus(C.element_times(smooth_l1_option1, smooth_l1_sign),
                              C.element_times(smooth_l1_option2, C.minus(1.0, smooth_l1_sign)))

    return C.element_times(bbox_outside_weights, smooth_l1_result) 
Example #12
Source File: cntk_backend.py    From deepQuest with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _moments(x, axes=None, shift=None, keep_dims=False):
    _axes = tuple(axes)
    if shift is None:
        shift = x
        # Compute true mean while keeping the dims for proper broadcasting.
        for axis in _axes:
            shift = C.reduce_mean(shift, axis=axis)

    shift = C.stop_gradient(shift)
    shifted_mean = C.minus(x, shift)
    for axis in _axes:
        shifted_mean = C.reduce_mean(shifted_mean, axis=axis)

    variance_mean = C.square(C.minus(x, shift))
    for axis in _axes:
        variance_mean = C.reduce_mean(variance_mean, axis=axis)

    variance = C.minus(variance_mean, C.square(shifted_mean))
    mean = C.plus(shifted_mean, shift)

    if not keep_dims:
        mean = squeeze(mean, _axes)
        variance = squeeze(variance, _axes)

    return mean, variance 
Example #13
Source File: cntk_backend.py    From keras-lambda with MIT License 6 votes vote down vote up
def _moments(x, axes=None, shift=None, keep_dims=False):
    _axes = tuple(axes)
    if shift is None:
        shift = x
        # Compute true mean while keeping the dims for proper broadcasting.
        for axis in _axes:
            shift = C.reduce_mean(shift, axis=axis)

    shift = C.stop_gradient(shift)
    shifted_mean = C.minus(x, shift)
    for axis in _axes:
        shifted_mean = C.reduce_mean(shifted_mean, axis=axis)

    variance_mean = C.square(C.minus(x, shift))
    for axis in _axes:
        variance_mean = C.reduce_mean(variance_mean, axis=axis)

    variance = C.minus(variance_mean, C.square(shifted_mean))
    mean = C.plus(shifted_mean, shift)

    if not keep_dims:
        mean = squeeze(mean, _axes)
        variance = squeeze(variance, _axes)

    return mean, variance 
Example #14
Source File: test_ops_binary.py    From ngraph-python with Apache License 2.0 5 votes vote down vote up
def test_minus_1():
    cntk_op = C.minus([1, 2, 3], [4, 5, 6])
    cntk_ret = cntk_op.eval()

    ng_op, _ = CNTKImporter().import_model(cntk_op)
    ng_ret = ng.transformers.make_transformer().computation(ng_op)()

    assert np.array_equal(cntk_ret, ng_ret) 
Example #15
Source File: test_ops_binary.py    From ngraph-python with Apache License 2.0 5 votes vote down vote up
def test_minus_2():
    cntk_op = C.minus([[1, 2, 3], [4, 5, 6]], [7, 8, 9])
    cntk_ret = cntk_op.eval()

    ng_op, _ = CNTKImporter().import_model(cntk_op)
    ng_ret = ng.transformers.make_transformer().computation(ng_op)()

    assert np.array_equal(cntk_ret, ng_ret) 
Example #16
Source File: test_ops_binary.py    From ngraph-python with Apache License 2.0 5 votes vote down vote up
def test_minus_3():
    cntk_op = C.minus([1, 2, 3], [[4, 5, 6], [7, 8, 9]])
    cntk_ret = cntk_op.eval()

    ng_op, _ = CNTKImporter().import_model(cntk_op)
    ng_ret = ng.transformers.make_transformer().computation(ng_op)()

    assert np.array_equal(cntk_ret, ng_ret)