Python numpy.invert() Examples

The following are 30 code examples of numpy.invert(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module numpy , or try the search function .
Example #1
Source File: timit_for_srnn.py    From srnn with MIT License 6 votes vote down vote up
def create_test_set(x_lst):
    n = len(x_lst)
    x_lens = np.array(map(len, x_lst))
    max_len = max(map(len, x_lst)) - 1
    u_out = np.zeros((n, max_len, OUTDIM), dtype='float32')*np.nan
    x_out = np.zeros((n, max_len, OUTDIM), dtype='float32')*np.nan
    for row, vec in enumerate(x_lst):
        l = len(vec) - 1
        u = vec[:-1]  # all but last element
        x = vec[1:]   # all but first element

        x_out[row, :l] = x
        u_out[row, :l] = u

    mask = np.invert(np.isnan(x_out))
    x_out[np.isnan(x_out)] = 0
    u_out[np.isnan(u_out)] = 0
    mask = mask[:, :, 0]
    assert np.all((mask.sum(axis=1)+1) == x_lens)
    return u_out, x_out, mask.astype('float32') 
Example #2
Source File: test_mem_overlap.py    From Mastering-Elasticsearch-7.0 with MIT License 6 votes vote down vote up
def test_ufunc_at_manual(self):
        def check(ufunc, a, ind, b=None):
            a0 = a.copy()
            if b is None:
                ufunc.at(a0, ind.copy())
                c1 = a0.copy()
                ufunc.at(a, ind)
                c2 = a.copy()
            else:
                ufunc.at(a0, ind.copy(), b.copy())
                c1 = a0.copy()
                ufunc.at(a, ind, b)
                c2 = a.copy()
            assert_array_equal(c1, c2)

        # Overlap with index
        a = np.arange(10000, dtype=np.int16)
        check(np.invert, a[::-1], a)

        # Overlap with second data array
        a = np.arange(100, dtype=np.int16)
        ind = np.arange(0, 100, 2, dtype=np.int16)
        check(np.add, a, ind, a[25:75]) 
Example #3
Source File: rattlesnake.py    From rattlesnake with MIT License 6 votes vote down vote up
def invert(data):
    """
    Inverts the byte data it received utilizing an XOR operation.

    :param data: A chunk of byte data
    :return inverted: The same size of chunked data inverted bitwise
    """

    # Convert the bytestring into an integer
    intwave = np.fromstring(data, np.int32)
    # Invert the integer
    intwave = np.invert(intwave)
    # Convert the integer back into a bytestring
    inverted = np.frombuffer(intwave, np.byte)
    # Return the inverted audio data
    return inverted 
Example #4
Source File: metrics.py    From timeception with GNU General Public License v3.0 6 votes vote down vote up
def map_charades(y_true, y_pred):
    """ Returns mAP """
    m_aps = []
    n_classes = y_pred.shape[1]
    for oc_i in range(n_classes):
        pred_row = y_pred[:, oc_i]
        sorted_idxs = np.argsort(-pred_row)
        true_row = y_true[:, oc_i]
        tp = true_row[sorted_idxs] == 1
        fp = np.invert(tp)
        n_pos = tp.sum()
        if n_pos < 0.1:
            m_aps.append(float('nan'))
            continue
        f_pcs = np.cumsum(fp)
        t_pcs = np.cumsum(tp)
        prec = t_pcs / (f_pcs + t_pcs).astype(float)
        avg_prec = 0
        for i in range(y_pred.shape[0]):
            if tp[i]:
                avg_prec += prec[i]
        m_aps.append(avg_prec / n_pos.astype(float))
    m_aps = np.array(m_aps)
    m_ap = np.mean(m_aps)
    return m_ap 
Example #5
Source File: metrics.py    From deep-smoke-machine with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def map_charades(y_true, y_pred):
    """ Returns mAP """
    m_aps = []
    n_classes = y_pred.shape[1]
    for oc_i in range(n_classes):
        pred_row = y_pred[:, oc_i]
        sorted_idxs = np.argsort(-pred_row)
        true_row = y_true[:, oc_i]
        tp = true_row[sorted_idxs] == 1
        fp = np.invert(tp)
        n_pos = tp.sum()
        if n_pos < 0.1:
            m_aps.append(float('nan'))
            continue
        f_pcs = np.cumsum(fp)
        t_pcs = np.cumsum(tp)
        prec = t_pcs / (f_pcs + t_pcs).astype(float)
        avg_prec = 0
        for i in range(y_pred.shape[0]):
            if tp[i]:
                avg_prec += prec[i]
        m_aps.append(avg_prec / n_pos.astype(float))
    m_aps = np.array(m_aps)
    m_ap = np.mean(m_aps)
    return m_ap 
Example #6
Source File: dataset.py    From AugmentedAutoencoder with MIT License 6 votes vote down vote up
def augment_occlusion_mask(self, masks, verbose=False, min_trans = 0.2, max_trans=0.7, max_occl = 0.25,min_occl = 0.0):


        new_masks = np.zeros_like(masks,dtype=np.bool)
        occl_masks_batch = self.random_syn_masks[np.random.choice(len(self.random_syn_masks),len(masks))]
        for idx,mask in enumerate(masks):
            occl_mask = occl_masks_batch[idx]
            while True:
                trans_x = int(np.random.choice([-1,1])*(np.random.rand()*(max_trans-min_trans) + min_trans)*occl_mask.shape[0])
                trans_y = int(np.random.choice([-1,1])*(np.random.rand()*(max_trans-min_trans) + min_trans)*occl_mask.shape[1])
                M = np.float32([[1,0,trans_x],[0,1,trans_y]])

                transl_occl_mask = cv2.warpAffine(occl_mask,M,(occl_mask.shape[0],occl_mask.shape[1]))

                overlap_matrix = np.invert(mask.astype(np.bool)) * transl_occl_mask.astype(np.bool)
                overlap = len(overlap_matrix[overlap_matrix==True])/float(len(mask[mask==0]))

                if overlap < max_occl and overlap > min_occl:
                    new_masks[idx,...] = np.logical_xor(mask.astype(np.bool), overlap_matrix)
                    if verbose:
                        print('overlap is ', overlap)
                    break

        return new_masks 
Example #7
Source File: stylecloud.py    From stylecloud with MIT License 6 votes vote down vote up
def gen_mask_array(icon_dir: str, invert_mask: bool, size: int):
    """Generates a numpy array of an icon mask."""
    icon = Image.open(os.path.join(icon_dir, "icon.png"))

    if isinstance(size, int):
        size = (size, size)

    # https://stackoverflow.com/a/2563883
    icon_w, icon_h = icon.size
    icon_mask = Image.new("RGBA", icon.size, (255, 255, 255, 255))
    icon_mask.paste(icon, icon)
    mask = Image.new("RGBA", size, (255, 255, 255, 255))
    mask_w, mask_h = mask.size
    offset = ((mask_w - icon_w) // 2, (mask_h - icon_h) // 2)
    mask.paste(icon_mask, offset)
    mask_array = np.array(mask, dtype="uint8")

    if invert_mask:
        mask_array = np.invert(mask_array)

    return mask_array 
Example #8
Source File: test_mem_overlap.py    From recruit with Apache License 2.0 6 votes vote down vote up
def test_ufunc_at_manual(self):
        def check(ufunc, a, ind, b=None):
            a0 = a.copy()
            if b is None:
                ufunc.at(a0, ind.copy())
                c1 = a0.copy()
                ufunc.at(a, ind)
                c2 = a.copy()
            else:
                ufunc.at(a0, ind.copy(), b.copy())
                c1 = a0.copy()
                ufunc.at(a, ind, b)
                c2 = a.copy()
            assert_array_equal(c1, c2)

        # Overlap with index
        a = np.arange(10000, dtype=np.int16)
        check(np.invert, a[::-1], a)

        # Overlap with second data array
        a = np.arange(100, dtype=np.int16)
        ind = np.arange(0, 100, 2, dtype=np.int16)
        check(np.add, a, ind, a[25:75]) 
Example #9
Source File: utility.py    From isp with MIT License 6 votes vote down vote up
def degamma_srgb(self, clip_range=[0, 65535]):

        # bring data in range 0 to 1
        data = np.clip(self.data, clip_range[0], clip_range[1])
        data = np.divide(data, clip_range[1])

        data = np.asarray(data)
        mask = data > 0.04045

        # basically, if data[x, y, c] > 0.04045, data[x, y, c] = ( (data[x, y, c] + 0.055) / 1.055 ) ^ 2.4
        #            else, data[x, y, c] = data[x, y, c] / 12.92
        data[mask] += 0.055
        data[mask] /= 1.055
        data[mask] **= 2.4

        data[np.invert(mask)] /= 12.92

        # rescale
        return np.clip(data * clip_range[1], clip_range[0], clip_range[1]) 
Example #10
Source File: utility.py    From isp with MIT License 6 votes vote down vote up
def gamma_srgb(self, clip_range=[0, 65535]):

        # bring data in range 0 to 1
        data = np.clip(self.data, clip_range[0], clip_range[1])
        data = np.divide(data, clip_range[1])

        data = np.asarray(data)
        mask = data > 0.0031308

        # basically, if data[x, y, c] > 0.0031308, data[x, y, c] = 1.055 * ( var_R(i, j) ^ ( 1 / 2.4 ) ) - 0.055
        #            else, data[x, y, c] = data[x, y, c] * 12.92
        data[mask] **= 0.4167
        data[mask] *= 1.055
        data[mask] -= 0.055

        data[np.invert(mask)] *= 12.92

        # rescale
        return np.clip(data * clip_range[1], clip_range[0], clip_range[1]) 
Example #11
Source File: test_mem_overlap.py    From vnpy_crypto with MIT License 6 votes vote down vote up
def test_ufunc_at_manual(self):
        def check(ufunc, a, ind, b=None):
            a0 = a.copy()
            if b is None:
                ufunc.at(a0, ind.copy())
                c1 = a0.copy()
                ufunc.at(a, ind)
                c2 = a.copy()
            else:
                ufunc.at(a0, ind.copy(), b.copy())
                c1 = a0.copy()
                ufunc.at(a, ind, b)
                c2 = a.copy()
            assert_array_equal(c1, c2)

        # Overlap with index
        a = np.arange(10000, dtype=np.int16)
        check(np.invert, a[::-1], a)

        # Overlap with second data array
        a = np.arange(100, dtype=np.int16)
        ind = np.arange(0, 100, 2, dtype=np.int16)
        check(np.add, a, ind, a[25:75]) 
Example #12
Source File: utility.py    From isp with MIT License 6 votes vote down vote up
def xyz2lab(self, cie_version="1931", illuminant="d65"):

        xyz_reference = helpers().get_xyz_reference(cie_version, illuminant)

        data = self.data
        data[:, :, 0] = data[:, :, 0] / xyz_reference[0]
        data[:, :, 1] = data[:, :, 1] / xyz_reference[1]
        data[:, :, 2] = data[:, :, 2] / xyz_reference[2]

        data = np.asarray(data)

        # if data[x, y, c] > 0.008856, data[x, y, c] = data[x, y, c] ^ (1/3)
        # else, data[x, y, c] = 7.787 * data[x, y, c] + 16/116
        mask = data > 0.008856
        data[mask] **= 1./3.
        data[np.invert(mask)] *= 7.787
        data[np.invert(mask)] += 16./116.

        data = np.float32(data)
        output = np.empty(np.shape(self.data), dtype=np.float32)
        output[:, :, 0] = 116. * data[:, :, 1] - 16.
        output[:, :, 1] = 500. * (data[:, :, 0] - data[:, :, 1])
        output[:, :, 2] = 200. * (data[:, :, 1] - data[:, :, 2])

        return output 
Example #13
Source File: utility.py    From isp with MIT License 6 votes vote down vote up
def lab2xyz(self, cie_version="1931", illuminant="d65"):

        output = np.empty(np.shape(self.data), dtype=np.float32)

        output[:, :, 1] = (self.data[:, :, 0] + 16.) / 116.
        output[:, :, 0] = (self.data[:, :, 1] / 500.) + output[:, :, 1]
        output[:, :, 2] = output[:, :, 1] - (self.data[:, :, 2] / 200.)

        # if output[x, y, c] > 0.008856, output[x, y, c] ^ 3
        # else, output[x, y, c] = ( output[x, y, c] - 16/116 ) / 7.787
        output = np.asarray(output)
        mask = output > 0.008856
        output[mask] **= 3.
        output[np.invert(mask)] -= 16/116
        output[np.invert(mask)] /= 7.787

        xyz_reference = helpers().get_xyz_reference(cie_version, illuminant)

        output = np.float32(output)
        output[:, :, 0] = output[:, :, 0] * xyz_reference[0]
        output[:, :, 1] = output[:, :, 1] * xyz_reference[1]
        output[:, :, 2] = output[:, :, 2] * xyz_reference[2]

        return output 
Example #14
Source File: train_CRF_LC.py    From sato with Apache License 2.0 6 votes vote down vote up
def eval_batch(table_batch, label_batch, mask_batch):
    # reshap (table_batch * table_size * features)
    for f_g in table_batch:
        table_batch[f_g] = table_batch[f_g].view(batch_size * MAX_COL_COUNT, -1)

    emissions = classifier(table_batch).view(batch_size, MAX_COL_COUNT, -1)
    pred = model.decode(emissions, mask_batch)

    pred = np.concatenate(pred)
    labels = label_batch.view(-1).cpu().numpy()
    masks = mask_batch.view(-1).cpu().numpy()
    invert_masks = np.invert(masks==1)
    
    return pred, ma.array(labels, mask=invert_masks).compressed()

# randomly shuffle the orders of columns in a table batch 
Example #15
Source File: interactive.py    From pyiron with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _extend_species_elements(struct_species, species_array):
        if not all(np.isin(struct_species, species_array)):
            new_elements_index = np.invert(np.isin(struct_species, species_array))
            species_array = np.append(species_array, struct_species[new_elements_index])
        return species_array

    # Functions which have to be implemented by the fin 
Example #16
Source File: test_extras.py    From Mastering-Elasticsearch-7.0 with MIT License 5 votes vote down vote up
def test_in1d_invert(self):
        # Test in1d's invert parameter
        a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1])
        b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1])
        assert_equal(np.invert(in1d(a, b)), in1d(a, b, invert=True))

        a = array([5, 5, 2, 1, -1], mask=[0, 0, 0, 0, 1])
        b = array([1, 5, -1], mask=[0, 0, 1])
        assert_equal(np.invert(in1d(a, b)), in1d(a, b, invert=True))

        assert_array_equal([], in1d([], [], invert=True)) 
Example #17
Source File: dataset.py    From AugmentedAutoencoder with MIT License 5 votes vote down vote up
def augment_squares(self,masks,rand_idcs,max_occl=0.25):
        new_masks = np.invert(masks)

        idcs = np.arange(len(masks))
        while len(idcs) > 0:
            new_masks[idcs] = self._aug_occl.augment_images(np.invert(masks[idcs]))
            new_noof_obj_pixels = np.count_nonzero(new_masks,axis=(1,2))
            idcs = np.where(new_noof_obj_pixels/self.noof_obj_pixels[rand_idcs].astype(np.float32) < 1-max_occl)[0]
            print(idcs)
        return np.invert(new_masks) 
Example #18
Source File: test_mem_overlap.py    From Mastering-Elasticsearch-7.0 with MIT License 5 votes vote down vote up
def test_unary_ufunc_where_same(self):
        # Check behavior at wheremask overlap
        ufunc = np.invert

        def check(a, out, mask):
            c1 = ufunc(a, out=out.copy(), where=mask.copy())
            c2 = ufunc(a, out=out, where=mask)
            assert_array_equal(c1, c2)

        # Check behavior with same input and output arrays
        x = np.arange(100).astype(np.bool_)
        check(x, x, x)
        check(x, x.copy(), x)
        check(x, x, x.copy()) 
Example #19
Source File: base_dataset.py    From iSketchNFill with GNU General Public License v3.0 5 votes vote down vote up
def __binarize(img):
    img = ImageOps.invert(img)
    img = img.convert('1')
    #img_np = np.array(img)
    ##img_np_inverted = np.invert(img_np)
    ##final_img=Image.fromarray(img_np_inverted)
    #final_img=Image.fromarray(img_np)
    return img 
Example #20
Source File: obsdata.py    From eht-imaging with GNU General Public License v3.0 5 votes vote down vote up
def flag_uvdist(self, uv_min=0.0, uv_max=1e12, output='kept'):
        """Flag data points outside a given uv range

           Args:
               uv_min (float): remove points with uvdist less than  this
               uv_max (float): remove points with uvdist greater than  this
               output (str): returns 'kept', 'flagged', or 'both' (a dictionary)

           Returns:
               (Obsdata): a observation object with flagged data points removed
        """

        uvdist_list = self.unpack('uvdist')['uvdist']
        mask = np.array([uv_min <= uvdist_list[j] <= uv_max for j in range(len(uvdist_list))])
        datatable_kept = self.data.copy()
        datatable_flagged = self.data.copy()

        datatable_kept = datatable_kept[mask]
        datatable_flagged = datatable_flagged[np.invert(mask)]
        print('U-V flagged %d/%d visibilities' % (len(datatable_flagged), len(self.data)))

        obs_kept = self.copy()
        obs_flagged = self.copy()
        obs_kept.data = datatable_kept
        obs_flagged.data = datatable_flagged

        if output == 'flagged':
            return obs_flagged
        elif output == 'both':
            return {'kept': obs_kept, 'flagged': obs_flagged}
        else:
            return obs_kept 
Example #21
Source File: obsdata.py    From eht-imaging with GNU General Public License v3.0 5 votes vote down vote up
def flag_large_fractional_pol(self, max_fractional_pol=1.0, output='kept'):
        """Flag visibilities for which the fractional polarization is above a specified threshold

           Args:
               max_fractional_pol (float): Maximum fractional polarization
               output (str): returns 'kept', 'flagged', or 'both' (a dictionary)

           Returns:
               (Obsdata): a observation object with flagged data points removed
        """

        m = np.nan_to_num(self.unpack(['mamp'])['mamp'])
        mask = m < max_fractional_pol

        datatable_kept = self.data.copy()
        datatable_flagged = self.data.copy()

        datatable_kept = datatable_kept[mask]
        datatable_flagged = datatable_flagged[np.invert(mask)]
        print('Flagged %d/%d visibilities' % (len(datatable_flagged), len(self.data)))

        obs_kept = self.copy()
        obs_flagged = self.copy()
        obs_kept.data = datatable_kept
        obs_flagged.data = datatable_flagged

        if output == 'flagged':
            return obs_flagged
        elif output == 'both':
            return {'kept': obs_kept, 'flagged': obs_flagged}
        else:
            return obs_kept 
Example #22
Source File: obsdata.py    From eht-imaging with GNU General Public License v3.0 5 votes vote down vote up
def flag_elev(self, elev_min=0.0, elev_max=90, output='kept'):
        """Flag visibilities for which either station is outside a stated elevation range

           Args:
               elev_min (float): Minimum elevation (deg)
               elev_max (float): Maximum elevation (deg)
               output (str): returns 'kept', 'flagged', or 'both' (a dictionary)

           Returns:
               (Obsdata): a observation object with flagged data points removed
        """

        el_pairs = self.unpack(['el1', 'el2'])
        mask = (np.min((el_pairs['el1'], el_pairs['el2']), axis=0) > elev_min)
        mask *= (np.max((el_pairs['el1'], el_pairs['el2']), axis=0) < elev_max)

        datatable_kept = self.data.copy()
        datatable_flagged = self.data.copy()

        datatable_kept = datatable_kept[mask]
        datatable_flagged = datatable_flagged[np.invert(mask)]
        print('Flagged %d/%d visibilities' % (len(datatable_flagged), len(self.data)))

        obs_kept = self.copy()
        obs_flagged = self.copy()
        obs_kept.data = datatable_kept
        obs_flagged.data = datatable_flagged

        if output == 'flagged':
            return obs_flagged
        elif output == 'both':
            return {'kept': obs_kept, 'flagged': obs_flagged}
        else:
            return obs_kept 
Example #23
Source File: test_extras.py    From Computable with MIT License 5 votes vote down vote up
def test_in1d_invert(self):
        "Test in1d's invert parameter"
        a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1])
        b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1])
        assert_equal(np.invert(in1d(a, b)), in1d(a, b, invert=True))

        a = array([5, 5, 2, 1, -1], mask=[0, 0, 0, 0, 1])
        b = array([1, 5, -1], mask=[0, 0, 1])
        assert_equal(np.invert(in1d(a, b)), in1d(a, b, invert=True))

        assert_array_equal([], in1d([], [], invert=True)) 
Example #24
Source File: test_arraysetops.py    From Computable with MIT License 5 votes vote down vote up
def test_in1d_invert(self):
        "Test in1d's invert parameter"
        # We use two different sizes for the b array here to test the
        # two different paths in in1d().
        for mult in (1, 10):
            a = np.array([5, 4, 5, 3, 4, 4, 3, 4, 3, 5, 2, 1, 5, 5])
            b = [2, 3, 4] * mult
            assert_array_equal(np.invert(in1d(a, b)), in1d(a, b, invert=True)) 
Example #25
Source File: _conversion.py    From OpenFermion with Apache License 2.0 5 votes vote down vote up
def _iterate_basis_order_(reference_determinant, order):
    """A helper for iterating over determinants of a fixed excitation rank.

    Args:
        reference_determinant(list(bool)): The reference state with respect to
            which we are iterating over excited determinants.
        order(int): The number of excitations from the modes which are occupied
            in the reference_determinant.

    Yields:
        Lists of bools which indicate which orbitals are occupied and which are
            unoccupied in the current determinant.
        """
    occupied_indices = numpy.where(reference_determinant)[0]
    unoccupied_indices = numpy.where(numpy.invert(reference_determinant))[0]

    for occ_ind, unocc_ind in itertools.product(
            itertools.combinations(occupied_indices, order),
            itertools.combinations(unoccupied_indices, order)):
        basis_state = reference_determinant.copy()

        occ_ind = list(occ_ind)
        unocc_ind = list(unocc_ind)

        basis_state[occ_ind] = False
        basis_state[unocc_ind] = True

        yield basis_state 
Example #26
Source File: Preprocess.py    From ALPR-Indonesia with MIT License 5 votes vote down vote up
def preprocess(imgOriginal):
    #coba = cv2.cvtColor(imgOriginal, cv2.COLOR_BGR2HSV)
    #cv2.imshow("hsv", coba )
    imgGrayscale = extractValue(imgOriginal)
    #cv2.imshow("imgGrayscale", imgGrayscale )

    imgGrayscale = np.invert(imgGrayscale) # last best use this
    #cv2.imshow("invert", imgGrayscale )
    imgMaxContrastGrayscale = maximizeContrast(imgGrayscale)
    #cv2.imshow("imgMaxContrastGrayscale", imgMaxContrastGrayscale )
    #imgMaxContrastGrayscale = np.invert(imgMaxContrastGrayscale)
    height, width = imgGrayscale.shape

    imgBlurred = np.zeros((height, width, 1), np.uint8)
    #cv2.imshow("c_3", imgBlurred )

    imgBlurred = cv2.GaussianBlur(imgMaxContrastGrayscale, GAUSSIAN_SMOOTH_FILTER_SIZE, 0)
    #cv2.imshow("imgBlurred", imgBlurred )
    #imgBlurred = np.invert(imgBlurred)
    imgThresh = cv2.adaptiveThreshold(imgBlurred, THRESHOLD_VALUE , cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, ADAPTIVE_THRESH_BLOCK_SIZE, ADAPTIVE_THRESH_WEIGHT)
    #imgThresh = np.invert(imgThresh)
    #cv2.imshow("cobaaa", imgThresh)

    return imgGrayscale, imgThresh
# end function

################################################################################################### 
Example #27
Source File: test_mem_overlap.py    From vnpy_crypto with MIT License 5 votes vote down vote up
def test_unary_ufunc_call_fuzz(self):
        self.check_unary_fuzz(np.invert, None, np.int16) 
Example #28
Source File: invert_imageData.py    From ALPR-Indonesia with MIT License 5 votes vote down vote up
def main():
    ap = argparse.ArgumentParser()
    ap.add_argument("-d", "--image_train",
            help = "path for the images that you're going to invert")
    args = vars(ap.parse_args())
    if args.get("image", True):
        imgTrainingNumbers = cv2.imread(args["image_train"]) # read in training numbers image
        if imgTrainingNumbers is None:
            print "error: image not read from file \n\n"        # print error message to std out
            os.system("pause")                                  # pause so user can see error message
            return
    else:
        print("Please add -d or --image_train argument")

    imgGray = cv2.cvtColor(imgTrainingNumbers, cv2.COLOR_BGR2GRAY)          # get grayscale image
    imgBlurred = cv2.GaussianBlur(imgGray, (5,5), 0)                        # blur

                                                        # filter image from grayscale to black and white
    imgThresh = cv2.adaptiveThreshold(imgBlurred,                           # input image
                                      0,                                  # make pixels that pass the threshold full white
                                      cv2.ADAPTIVE_THRESH_GAUSSIAN_C,       # use gaussian rather than mean, seems to give better results
                                      cv2.THRESH_BINARY_INV,                # invert so foreground will be white, background will be black
                                      11,                                   # size of a pixel neighborhood used to calculate threshold value
                                      2)                                    # constant subtracted from the mean or weighted mean

    imgTrainingNumbers = np.invert(imgTrainingNumbers)
    cv2.imwrite("invert_"+args["image_train"],imgTrainingNumbers)
    cv2.imwrite("imgThresh_"+args["image_train"],imgThresh)
    return

################################################################################################### 
Example #29
Source File: extreme_learning_machine.py    From brainforge with GNU General Public License v3.0 5 votes vote down vote up
def solve_with_covariance_matrices(self, Z, Y):
        A = np.cov(Z.T)
        B = np.cov(Z.T, Y.T)
        W = np.invert(A) @ B
        self.layers[-1].set_weights([W, np.array([0] * self.layers[-1].neurons)], fold=False) 
Example #30
Source File: train_resnet.py    From ngraph-python with Apache License 2.0 5 votes vote down vote up
def loop_eval(dataset, input_ops, metric_name, computation, en_top5=False):
    # Reset validation set
    dataset.reset()
    all_results = None

    eval_bsz = dataset.config['batch_size']
    eval_iter = int(dataset.ndata // eval_bsz)
    if dataset.ndata % eval_bsz != 0:
        logger.warning("Eval dataset isn't divisible by eval batch size (%d/%d),"
                       " some data will be ignored", dataset.ndata, eval_bsz)

    # Iterating over the dataset
    for step in range(eval_iter):
        feed_dict = fill_feed_dict(input_or_ph_ops=input_ops, dataset=dataset)
        # Tuple of results from computation
        predictions, miss_class, labels = computation(feed_dict=feed_dict)
        # Collect top5 and top1 results
        top5 = np.argsort(predictions, axis=0)[-5:]
        top1 = top5[-1:]
        # Get ground truth labels
        correct_labels = labels.T
        # Compare if any of the top5 matches with labels
        top5_results = np.any(np.equal(correct_labels, top5), axis=0)
        # Invert for mis-classification
        top5_results = np.invert(top5_results)
        # Compare which are not equal
        top1_results = np.not_equal(correct_labels, top1)
        # Make a list of results
        if(en_top5):
            total_results = [miss_class, top5_results, top1_results]
        else:
            total_results = [miss_class, top1_results]
        # Accumulate results
        if all_results is None:
            all_results = {name: list(res) for name, res in zip(metric_names, total_results)}
        else:
            for name, res in zip(metric_names, total_results):
                all_results[name].extend(list(res))
    # Take mean of results
    reduced_results = {k: np.mean(v[:dataset.ndata]) for k, v in all_results.items()}
    return reduced_results