Python numpy.subtract() Examples

The following are 30 code examples for showing how to use numpy.subtract(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module numpy , or try the search function .

Example 1
Project: openISP   Author: cruxopen   File: nlm.py    License: MIT License 6 votes vote down vote up
def calWeights(self, img, kernel, y, x):
        wmax = 0
        sweight = 0
        average = 0
        for j in range(2 * self.Ds + 1 - 2 * self.ds - 1):
            for i in range(2 * self.Ds + 1 - 2 * self.ds - 1):
                start_y = y - self.Ds + self.ds + j
                start_x = x - self.Ds + self.ds + i
                neighbour_w = img[start_y - self.ds:start_y + self.ds + 1, start_x - self.ds:start_x + self.ds + 1]
                center_w = img[y-self.ds:y+self.ds+1, x-self.ds:x+self.ds+1]
                if j != y or i != x:
                    sub = np.subtract(neighbour_w, center_w)
                    dist = np.sum(np.multiply(kernel, np.multiply(sub, sub)))
                    w = np.exp(-dist/pow(self.h, 2))    # replaced by look up table
                    if w > wmax:
                        wmax = w
                    sweight = sweight + w
                    average = average + w * img[start_y, start_x]
        return sweight, average, wmax 
Example 2
def load_image(img_path, net_input_shape):
    imgBGR = cv2.imread(img_path)
    img = cv2.resize(imgBGR, net_input_shape)
    # BGR -> RGB
    #img = img[:,:, (2, 1, 0)]

    ## Method 1
    # imgT = np.transpose(img, (2, 0, 1))  # c,w,h
    # imgF = np.asarray(imgT, dtype=np.float32)
    # mean = [[[88.159309]], [[97.966286]], [[103.66106]]] # Caffe image mean
    # imgS = np.subtract(imgF,mean)

    ## Method 2
    imgF = np.asarray(img, dtype=np.float32)
    mean = [128.0, 128.0, 128.0] # Caffe image mean
    # mean = [88.159309, 97.966286, 103.66106] # Caffe image mean
    imgSS = np.subtract(imgF, mean)/128.0
    imgS = np.transpose(imgSS, (2, 0, 1))  # c,w,h

    # RGB_MEAN_PIXELS = np.array([88.159309, 97.966286, 103.66106]).reshape((1,1,1,3)).astype(np.float32)

    return imgBGR, np.ascontiguousarray(imgS, dtype=np.float32) # avoid error: ndarray is not contiguous 
Example 3
Project: iAI   Author: aimuch   File: call_engine_to_infer_all.py    License: MIT License 6 votes vote down vote up
def load_image(img_path, net_input_shape):
    img = cv2.resize(cv2.imread(img_path), net_input_shape)
    # BGR -> RGB
    #img = img[:,:, (2, 1, 0)]

    ## Method 1
    # imgT = np.transpose(img, (2, 0, 1))  # c,w,h
    # imgF = np.asarray(imgT, dtype=np.float32)
    # mean = [[[88.159309]], [[97.966286]], [[103.66106]]] # Caffe image mean
    # imgS = np.subtract(imgF,mean)

    ## Method 2
    imgF = np.asarray(img, dtype=np.float32)
    mean = [88.159309, 97.966286, 103.66106] # Caffe image mean
    imgSS = np.subtract(imgF, mean)
    imgS = np.transpose(imgSS, (2, 0, 1))  # CHW

    # RGB_MEAN_PIXELS = np.array([88.159309, 97.966286, 103.66106]).reshape((1,1,1,3)).astype(np.float32)

    return np.ascontiguousarray(imgS, dtype=np.float32) # avoid error: ndarray is not contiguous 
Example 4
Project: iAI   Author: aimuch   File: call_engine_to_infer_all_analysis_error_6classes.py    License: MIT License 6 votes vote down vote up
def load_image(img_path, net_input_shape):
    imgBGR = cv2.imread(img_path)
    img = cv2.resize(imgBGR, net_input_shape)
    # BGR -> RGB
    #img = img[:,:, (2, 1, 0)]

    ## Method 1
    # imgT = np.transpose(img, (2, 0, 1))  # c,w,h
    # imgF = np.asarray(imgT, dtype=np.float32)
    # mean = [[[88.159309]], [[97.966286]], [[103.66106]]] # Caffe image mean
    # imgS = np.subtract(imgF,mean)

    ## Method 2
    imgF = np.asarray(img, dtype=np.float32)
    mean = [128.0, 128.0, 128.0] # Caffe image mean
    # mean = [88.159309, 97.966286, 103.66106] # Caffe image mean
    imgSS = np.subtract(imgF, mean)/128.0
    imgS = np.transpose(imgSS, (2, 0, 1))  # c,w,h

    # RGB_MEAN_PIXELS = np.array([88.159309, 97.966286, 103.66106]).reshape((1,1,1,3)).astype(np.float32)

    return imgBGR, np.ascontiguousarray(imgS, dtype=np.float32) # avoid error: ndarray is not contiguous 
Example 5
Project: iAI   Author: aimuch   File: call_engine_to_infer_one.py    License: MIT License 6 votes vote down vote up
def load_image(img_path, net_input_shape):
    img = cv2.resize(cv2.imread(img_path), net_input_shape)
    # BGR -> RGB
    #img = img[:,:, (2, 1, 0)]

    ## Method 1
    # imgT = np.transpose(img, (2, 0, 1))  # c,w,h
    # imgF = np.asarray(imgT, dtype=np.float32)
    # mean = [[[88.159309]], [[97.966286]], [[103.66106]]] # Caffe image mean
    # imgS = np.subtract(imgF,mean)

    ## Method 2
    imgF = np.asarray(img, dtype=np.float32)
    mean = [88.159309, 97.966286, 103.66106] # Caffe image mean
    imgSS = np.subtract(imgF, mean)
    imgS = np.transpose(imgSS, (2, 0, 1))  # CHW

    # RGB_MEAN_PIXELS = np.array([88.159309, 97.966286, 103.66106]).reshape((1,1,1,3)).astype(np.float32)

    return np.ascontiguousarray(imgS, dtype=np.float32)   # avoid error: ndarray is not contiguous 
Example 6
Project: iAI   Author: aimuch   File: call_engine_to_infer_all_print_predict_on_image.py    License: MIT License 6 votes vote down vote up
def load_image(img_path, net_input_shape):
    imgBGR = cv2.imread(img_path)
    img = cv2.resize(imgBGR, net_input_shape)
    # BGR -> RGB
    #img = img[:,:, (2, 1, 0)]

    ## Method 1
    # imgT = np.transpose(img, (2, 0, 1))  # c,w,h
    # imgF = np.asarray(imgT, dtype=np.float32)
    # mean = [[[88.159309]], [[97.966286]], [[103.66106]]] # Caffe image mean
    # imgS = np.subtract(imgF,mean)

    ## Method 2
    imgF = np.asarray(img, dtype=np.float32)
    mean = [88.159309, 97.966286, 103.66106] # Caffe image mean
    imgSS = np.subtract(imgF, mean)
    imgS = np.transpose(imgSS, (2, 0, 1))  # c,w,h

    # RGB_MEAN_PIXELS = np.array([88.159309, 97.966286, 103.66106]).reshape((1,1,1,3)).astype(np.float32)

    return imgBGR, np.ascontiguousarray(imgS, dtype=np.float32) # avoid error: ndarray is not contiguous 
Example 7
Project: MaskTrack   Author: omkar13   File: utility_functions.py    License: MIT License 6 votes vote down vote up
def apply_val_transform_image(image,inputRes=None):
    meanval = (104.00699, 116.66877, 122.67892)

    if inputRes is not None:
        image = sm.imresize(image, inputRes)

    image = np.array(image, dtype=np.float32)
    image = np.subtract(image, np.array(meanval, dtype=np.float32))



    if image.ndim == 2:
        image = image[:, :, np.newaxis]

    # swap color axis because
    # numpy image: H x W x C
    # torch image: C X H X W

    image = image.transpose((2, 0, 1))
    image = torch.from_numpy(image)

    return image 
Example 8
Project: MaskTrack   Author: omkar13   File: davis17_online_data.py    License: MIT License 6 votes vote down vote up
def make_img_gt_pair(self, idx):
        """
        Make the image-ground-truth pair
        """
        img = cv2.imread(os.path.join(self.db_root_dir, self.img_list[idx]))
        if self.labels[idx] is not None:
            label = cv2.imread(os.path.join(self.db_root_dir, self.labels[idx]), 0)
        else:
            gt = np.zeros(img.shape[:-1], dtype=np.uint8)

        if self.inputRes is not None:
            img = imresize(img, self.inputRes)
            if self.labels[idx] is not None:
                label = imresize(label, self.inputRes, interp='nearest')

        img = np.array(img, dtype=np.float32)
        img = np.subtract(img, np.array(self.meanval, dtype=np.float32))

        if self.labels[idx] is not None:
                gt = np.array(label, dtype=np.float32)
                gt = gt/np.max([gt.max(), 1e-8])

        return img, gt 
Example 9
Project: PoseWarper   Author: facebookresearch   File: eval_helpers.py    License: Apache License 2.0 6 votes vote down vote up
def VOCap(rec,prec):

    mpre = np.zeros([1,2+len(prec)])
    mpre[0,1:len(prec)+1] = prec
    mrec = np.zeros([1,2+len(rec)])
    mrec[0,1:len(rec)+1] = rec
    mrec[0,len(rec)+1] = 1.0

    for i in range(mpre.size-2,-1,-1):
        mpre[0,i] = max(mpre[0,i],mpre[0,i+1])

    i = np.argwhere( ~np.equal( mrec[0,1:], mrec[0,:mrec.shape[1]-1]) )+1
    i = i.flatten()

    # compute area under the curve
    ap = np.sum( np.multiply( np.subtract( mrec[0,i], mrec[0,i-1]), mpre[0,i] ) )

    return ap 
Example 10
Project: recruit   Author: Frank-qlu   File: test_ufunc.py    License: Apache License 2.0 6 votes vote down vote up
def test_NotImplemented_not_returned(self):
        # See gh-5964 and gh-2091. Some of these functions are not operator
        # related and were fixed for other reasons in the past.
        binary_funcs = [
            np.power, np.add, np.subtract, np.multiply, np.divide,
            np.true_divide, np.floor_divide, np.bitwise_and, np.bitwise_or,
            np.bitwise_xor, np.left_shift, np.right_shift, np.fmax,
            np.fmin, np.fmod, np.hypot, np.logaddexp, np.logaddexp2,
            np.logical_and, np.logical_or, np.logical_xor, np.maximum,
            np.minimum, np.mod,
            np.greater, np.greater_equal, np.less, np.less_equal,
            np.equal, np.not_equal]

        a = np.array('1')
        b = 1
        c = np.array([1., 2.])
        for f in binary_funcs:
            assert_raises(TypeError, f, a, b)
            assert_raises(TypeError, f, c, a) 
Example 11
Project: recruit   Author: Frank-qlu   File: test_period.py    License: Apache License 2.0 6 votes vote down vote up
def test_pi_ops_nat(self):
        idx = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-04'],
                          freq='M', name='idx')
        expected = PeriodIndex(['2011-03', '2011-04', 'NaT', '2011-06'],
                               freq='M', name='idx')

        self._check(idx, lambda x: x + 2, expected)
        self._check(idx, lambda x: 2 + x, expected)
        self._check(idx, lambda x: np.add(x, 2), expected)

        self._check(idx + 2, lambda x: x - 2, idx)
        self._check(idx + 2, lambda x: np.subtract(x, 2), idx)

        # freq with mult
        idx = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-04'],
                          freq='2M', name='idx')
        expected = PeriodIndex(['2011-07', '2011-08', 'NaT', '2011-10'],
                               freq='2M', name='idx')

        self._check(idx, lambda x: x + 3, expected)
        self._check(idx, lambda x: 3 + x, expected)
        self._check(idx, lambda x: np.add(x, 3), expected)

        self._check(idx + 3, lambda x: x - 3, idx)
        self._check(idx + 3, lambda x: np.subtract(x, 3), idx) 
Example 12
Project: recruit   Author: Frank-qlu   File: test_period.py    License: Apache License 2.0 6 votes vote down vote up
def test_pi_ops_array_int(self):

        idx = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-04'],
                          freq='M', name='idx')
        f = lambda x: x + np.array([1, 2, 3, 4])
        exp = PeriodIndex(['2011-02', '2011-04', 'NaT', '2011-08'],
                          freq='M', name='idx')
        self._check(idx, f, exp)

        f = lambda x: np.add(x, np.array([4, -1, 1, 2]))
        exp = PeriodIndex(['2011-05', '2011-01', 'NaT', '2011-06'],
                          freq='M', name='idx')
        self._check(idx, f, exp)

        f = lambda x: x - np.array([1, 2, 3, 4])
        exp = PeriodIndex(['2010-12', '2010-12', 'NaT', '2010-12'],
                          freq='M', name='idx')
        self._check(idx, f, exp)

        f = lambda x: np.subtract(x, np.array([3, 2, 3, -2]))
        exp = PeriodIndex(['2010-10', '2010-12', 'NaT', '2011-06'],
                          freq='M', name='idx')
        self._check(idx, f, exp) 
Example 13
Project: plydata   Author: has2k1   File: test_dataframe.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_mutate_all():
    df = pd.DataFrame({
        'alpha': list('aaabbb'),
        'beta': list('babruq'),
        'theta': list('cdecde'),
        'x': [1, 2, 3, 4, 5, 6],
        'y': [6, 5, 4, 3, 2, 1],
        'z': [7, 9, 11, 8, 10, 12]
    })

    result = (df
              >> group_by('alpha')
              >> select('x', 'y', 'z')
              >> mutate_all((np.add, np.subtract), 10)
              )
    assert 'alpha' in result 
Example 14
Project: Emotion-Recogniton-from-EEG-Signals   Author: akshat1706   File: entropy_akshat.py    License: MIT License 6 votes vote down vote up
def app_entropy(x, order=2, metric='chebyshev'):
    """Approximate Entropy
    Parameters
    ----------
    x : list or np.array
        One-dimensional time series of shape (n_times)
    order : int (default: 2)
        Embedding dimension.
    metric : str (default: chebyshev)
        Name of the metric function used with
        :class:`~sklearn.neighbors.KDTree`. The list of available
        metric functions is given by: ``KDTree.valid_metrics``.
    Returns
    -------
    ae : float
        Approximate Entropy.
 
    """
    phi = _app_samp_entropy(x, order=order, metric=metric, approximate=True)
    return np.subtract(phi[0], phi[1]) 
Example 15
Project: mars   Author: mars-project   File: histogram.py    License: Apache License 2.0 6 votes vote down vote up
def _unsigned_subtract(a, b):
    """
    Subtract two values where a >= b, and produce an unsigned result

    This is needed when finding the difference between the upper and lower
    bound of an int16 histogram
    """
    # coerce to a single type
    signed_to_unsigned = {
        np.byte: np.ubyte,
        np.short: np.ushort,
        np.intc: np.uintc,
        np.int_: np.uint,
        np.longlong: np.ulonglong
    }
    dt = np.result_type(a, b)
    try:
        dt = signed_to_unsigned[dt.type]
    except KeyError:  # pragma: no cover
        return np.subtract(a, b, dtype=dt)
    else:
        # we know the inputs are integers, and we are deliberately casting
        # signed to unsigned
        return np.subtract(a, b, casting='unsafe', dtype=dt) 
Example 16
Project: TNT   Author: GaoangW   File: facenet.py    License: GNU General Public License v3.0 6 votes vote down vote up
def triplet_loss(anchor, positive, negative, alpha):
    """Calculate the triplet loss according to the FaceNet paper
    
    Args:
      anchor: the embeddings for the anchor images.
      positive: the embeddings for the positive images.
      negative: the embeddings for the negative images.
  
    Returns:
      the triplet loss according to the FaceNet paper as a float tensor.
    """
    with tf.variable_scope('triplet_loss'):
        pos_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, positive)), 1)
        neg_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, negative)), 1)
        
        basic_loss = tf.add(tf.subtract(pos_dist,neg_dist), alpha)
        loss = tf.reduce_mean(tf.maximum(basic_loss, 0.0), 0)
      
    return loss 
Example 17
Project: TNT   Author: GaoangW   File: facenet.py    License: GNU General Public License v3.0 6 votes vote down vote up
def triplet_loss(anchor, positive, negative, alpha):
    """Calculate the triplet loss according to the FaceNet paper
    
    Args:
      anchor: the embeddings for the anchor images.
      positive: the embeddings for the positive images.
      negative: the embeddings for the negative images.
  
    Returns:
      the triplet loss according to the FaceNet paper as a float tensor.
    """
    with tf.variable_scope('triplet_loss'):
        pos_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, positive)), 1)
        neg_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, negative)), 1)
        
        basic_loss = tf.add(tf.subtract(pos_dist,neg_dist), alpha)
        loss = tf.reduce_mean(tf.maximum(basic_loss, 0.0), 0)
      
    return loss 
Example 18
Project: TNT   Author: GaoangW   File: facenet.py    License: GNU General Public License v3.0 6 votes vote down vote up
def triplet_loss(anchor, positive, negative, alpha):
    """Calculate the triplet loss according to the FaceNet paper
    
    Args:
      anchor: the embeddings for the anchor images.
      positive: the embeddings for the positive images.
      negative: the embeddings for the negative images.
  
    Returns:
      the triplet loss according to the FaceNet paper as a float tensor.
    """
    with tf.variable_scope('triplet_loss'):
        pos_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, positive)), 1)
        neg_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, negative)), 1)
        
        basic_loss = tf.add(tf.subtract(pos_dist,neg_dist), alpha)
        loss = tf.reduce_mean(tf.maximum(basic_loss, 0.0), 0)
      
    return loss 
Example 19
Project: lambda-packs   Author: ryfeus   File: histograms.py    License: MIT License 6 votes vote down vote up
def _unsigned_subtract(a, b):
    """
    Subtract two values where a >= b, and produce an unsigned result

    This is needed when finding the difference between the upper and lower
    bound of an int16 histogram
    """
    # coerce to a single type
    signed_to_unsigned = {
        np.byte: np.ubyte,
        np.short: np.ushort,
        np.intc: np.uintc,
        np.int_: np.uint,
        np.longlong: np.ulonglong
    }
    dt = np.result_type(a, b)
    try:
        dt = signed_to_unsigned[dt.type]
    except KeyError:
        return np.subtract(a, b, dtype=dt)
    else:
        # we know the inputs are integers, and we are deliberately casting
        # signed to unsigned
        return np.subtract(a, b, casting='unsafe', dtype=dt) 
Example 20
def update(self, embeddings0, embeddings1, labels):
        embeddings0, embeddings1, labels = map(
            to_numpy, (embeddings0, embeddings1, labels))
        if self.dist_type is 'euclidean':
            diff = np.subtract(embeddings0, embeddings1)
            dists = np.sqrt(np.sum(np.square(diff), 1))
        else:
            dists = 1 - np.sum(np.multiply(embeddings0,
                                           embeddings1),
                               axis=1) / (np.linalg.norm(embeddings0,
                                                         axis=1) * np.linalg.norm(embeddings1,
                                                                                  axis=1))

        self.dists.extend(dists)
        self.issame.extend(labels) 
Example 21
Project: tensortrade   Author: tensortrade-org   File: node.py    License: Apache License 2.0 5 votes vote down vote up
def __sub__(self, other):
        if np.isscalar(other):
            other = Constant(other, "Constant({})".format(other))
            name = "Subtract({},{})".format(self.name, other.name)
            return BinOp(np.subtract, name)(self, other)
        assert isinstance(other, Node)
        name = "Subtract({},{})".format(self.name, other.name)
        return BinOp(np.subtract, name)(self, other) 
Example 22
Project: tensortrade   Author: tensortrade-org   File: node.py    License: Apache License 2.0 5 votes vote down vote up
def __rsub__(self, other):
        if not np.isscalar(other):
            raise Exception("Invalid node operation.")
        other = Constant(other, "Constant({})".format(other))
        name = "Subtract({},{})".format(other.name, self.name)
        return BinOp(np.subtract, name)(other, self) 
Example 23
Project: NeuroKit   Author: neuropsychology   File: tests_complexity.py    License: MIT License 5 votes vote down vote up
def entropy_app_entropy(x, order=2, metric="chebyshev"):
    phi = entropy_app_samp_entropy(x, order=order, metric=metric, approximate=True)
    return np.subtract(phi[0], phi[1]) 
Example 24
Project: kivy-smoothie-host   Author: wolfmanjm   File: __init__.py    License: GNU General Public License v3.0 5 votes vote down vote up
def draw(self, *args):
        super(ContourPlot, self).draw(*args)
        data = self.data
        xdim, ydim = data.shape

        # Find the minimum and maximum z values
        zmax = data.max()
        zmin = data.min()
        rgb_scale_factor = 1.0 / (zmax - zmin) * 255
        # Scale the z values into RGB data
        buf = np.array(data, dtype=float, copy=True)
        np.subtract(buf, zmin, out=buf)
        np.multiply(buf, rgb_scale_factor, out=buf)
        # Duplicate into 3 dimensions (RGB) and convert to byte array
        buf = np.asarray(buf, dtype=np.uint8)
        buf = np.expand_dims(buf, axis=2)
        buf = np.concatenate((buf, buf, buf), axis=2)
        buf = np.reshape(buf, (xdim, ydim, 3))

        charbuf = bytearray(np.reshape(buf, (buf.size)))
        self._texture = Texture.create(size=(xdim, ydim), colorfmt='rgb')
        self._texture.blit_buffer(charbuf, colorfmt='rgb', bufferfmt='ubyte')
        image = self._image
        image.texture = self._texture

        x_px = self.x_px()
        y_px = self.y_px()
        bl = x_px(self.xrange[0]), y_px(self.yrange[0])
        tr = x_px(self.xrange[1]), y_px(self.yrange[1])
        image.pos = bl
        w = tr[0] - bl[0]
        h = tr[1] - bl[1]
        image.size = (w, h) 
Example 25
Project: insightface   Author: deepinsight   File: verification.py    License: MIT License 5 votes vote down vote up
def calculate_val(thresholds, embeddings1, embeddings2, actual_issame, far_target, nrof_folds=10):
    assert(embeddings1.shape[0] == embeddings2.shape[0])
    assert(embeddings1.shape[1] == embeddings2.shape[1])
    nrof_pairs = min(len(actual_issame), embeddings1.shape[0])
    nrof_thresholds = len(thresholds)
    k_fold = LFold(n_splits=nrof_folds, shuffle=False)
    
    val = np.zeros(nrof_folds)
    far = np.zeros(nrof_folds)
    
    diff = np.subtract(embeddings1, embeddings2)
    dist = np.sum(np.square(diff),1)
    indices = np.arange(nrof_pairs)
    
    for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):
      
        # Find the threshold that gives FAR = far_target
        far_train = np.zeros(nrof_thresholds)
        for threshold_idx, threshold in enumerate(thresholds):
            _, far_train[threshold_idx] = calculate_val_far(threshold, dist[train_set], actual_issame[train_set])
        if np.max(far_train)>=far_target:
            f = interpolate.interp1d(far_train, thresholds, kind='slinear')
            threshold = f(far_target)
        else:
            threshold = 0.0
    
        val[fold_idx], far[fold_idx] = calculate_val_far(threshold, dist[test_set], actual_issame[test_set])
  
    val_mean = np.mean(val)
    far_mean = np.mean(far)
    val_std = np.std(val)
    return val_mean, val_std, far_mean 
Example 26
Project: insightface   Author: deepinsight   File: lfw.py    License: MIT License 5 votes vote down vote up
def calculate_val(thresholds, embeddings1, embeddings2, actual_issame, far_target, nrof_folds=10):
    assert(embeddings1.shape[0] == embeddings2.shape[0])
    assert(embeddings1.shape[1] == embeddings2.shape[1])
    nrof_pairs = min(len(actual_issame), embeddings1.shape[0])
    nrof_thresholds = len(thresholds)
    k_fold = KFold(n_splits=nrof_folds, shuffle=False)
    
    val = np.zeros(nrof_folds)
    far = np.zeros(nrof_folds)
    
    diff = np.subtract(embeddings1, embeddings2)
    dist = np.sum(np.square(diff),1)
    indices = np.arange(nrof_pairs)
    
    for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):
      
        # Find the threshold that gives FAR = far_target
        far_train = np.zeros(nrof_thresholds)
        for threshold_idx, threshold in enumerate(thresholds):
            _, far_train[threshold_idx] = calculate_val_far(threshold, dist[train_set], actual_issame[train_set])
        if np.max(far_train)>=far_target:
            f = interpolate.interp1d(far_train, thresholds, kind='slinear')
            threshold = f(far_target)
        else:
            threshold = 0.0
    
        val[fold_idx], far[fold_idx] = calculate_val_far(threshold, dist[test_set], actual_issame[test_set])
  
    val_mean = np.mean(val)
    far_mean = np.mean(far)
    val_std = np.std(val)
    return val_mean, val_std, far_mean 
Example 27
Project: insightface   Author: deepinsight   File: face_model.py    License: MIT License 5 votes vote down vote up
def is_same_id(self, source_img, target_img_list):
    source_face = self.get_aligned_face(source_img, True)
    print('source face', source_face.shape)
    target_face_list = []
    pp = 0
    for img in target_img_list:
      target_force = False
      if pp==len(target_img_list)-1 and len(target_face_list)==0:
        target_force = True
      target_face = self.get_aligned_face(img, target_force)
      if target_face is not None:
        target_face_list.append(target_face)
      pp+=1
    print('target face', len(target_face_list)) 
    source_feature = self.get_feature(source_face, True)
    target_feature = None
    for target_face in target_face_list:
      _feature = self.get_feature(target_face, False)
      if target_feature is None:
        target_feature = _feature
      else:
        target_feature += _feature
    target_feature = sklearn.preprocessing.normalize(target_feature)
    #sim = np.dot(source_feature, target_feature.T)
    diff = np.subtract(source_feature, target_feature)
    dist = np.sum(np.square(diff),1)
    print('dist', dist)
    #print(sim, dist)
    if dist<=self.threshold:
      return True
    else:
      return False 
Example 28
Project: insightface   Author: deepinsight   File: verification.py    License: MIT License 5 votes vote down vote up
def calculate_val(thresholds, embeddings1, embeddings2, actual_issame, far_target, nrof_folds=10):
    assert(embeddings1.shape[0] == embeddings2.shape[0])
    assert(embeddings1.shape[1] == embeddings2.shape[1])
    nrof_pairs = min(len(actual_issame), embeddings1.shape[0])
    nrof_thresholds = len(thresholds)
    k_fold = LFold(n_splits=nrof_folds, shuffle=False)
    
    val = np.zeros(nrof_folds)
    far = np.zeros(nrof_folds)
    
    diff = np.subtract(embeddings1, embeddings2)
    dist = np.sum(np.square(diff),1)
    indices = np.arange(nrof_pairs)
    
    for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):
      
        # Find the threshold that gives FAR = far_target
        far_train = np.zeros(nrof_thresholds)
        for threshold_idx, threshold in enumerate(thresholds):
            _, far_train[threshold_idx] = calculate_val_far(threshold, dist[train_set], actual_issame[train_set])
        if np.max(far_train)>=far_target:
            f = interpolate.interp1d(far_train, thresholds, kind='slinear')
            threshold = f(far_target)
        else:
            threshold = 0.0
    
        val[fold_idx], far[fold_idx] = calculate_val_far(threshold, dist[test_set], actual_issame[test_set])
  
    val_mean = np.mean(val)
    far_mean = np.mean(far)
    val_std = np.std(val)
    return val_mean, val_std, far_mean 
Example 29
Project: insightface   Author: deepinsight   File: lfw.py    License: MIT License 5 votes vote down vote up
def calculate_val(thresholds, embeddings1, embeddings2, actual_issame, far_target, nrof_folds=10):
    assert(embeddings1.shape[0] == embeddings2.shape[0])
    assert(embeddings1.shape[1] == embeddings2.shape[1])
    nrof_pairs = min(len(actual_issame), embeddings1.shape[0])
    nrof_thresholds = len(thresholds)
    k_fold = KFold(n_splits=nrof_folds, shuffle=False)
    
    val = np.zeros(nrof_folds)
    far = np.zeros(nrof_folds)
    
    diff = np.subtract(embeddings1, embeddings2)
    dist = np.sum(np.square(diff),1)
    indices = np.arange(nrof_pairs)
    
    for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):
      
        # Find the threshold that gives FAR = far_target
        far_train = np.zeros(nrof_thresholds)
        for threshold_idx, threshold in enumerate(thresholds):
            _, far_train[threshold_idx] = calculate_val_far(threshold, dist[train_set], actual_issame[train_set])
        if np.max(far_train)>=far_target:
            f = interpolate.interp1d(far_train, thresholds, kind='slinear')
            threshold = f(far_target)
        else:
            threshold = 0.0
    
        val[fold_idx], far[fold_idx] = calculate_val_far(threshold, dist[test_set], actual_issame[test_set])
  
    val_mean = np.mean(val)
    far_mean = np.mean(far)
    val_std = np.std(val)
    return val_mean, val_std, far_mean 
Example 30
Project: speech_separation   Author: bill9800   File: evaluation.py    License: MIT License 5 votes vote down vote up
def mse(true_path,pred_path):
    true_data,_ = librosa.load(true_path)
    true_stft = utils.stft(true_data)
    pred_data,_ = librosa.load(pred_path)
    pred_stft = utils.stft(pred_data)
    return np.mean(np.square(np.subtract(true_stft,pred_stft)))