Python numpy.multiply() Examples

The following are 30 code examples of numpy.multiply(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module numpy , or try the search function .
Example #1
Source File: captcha_generator.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 9 votes vote down vote up
def image(self, captcha_str):
        """
        Generate a greyscale captcha image representing number string

        Parameters
        ----------
        captcha_str: str
            string a characters for captcha image

        Returns
        -------
        numpy.ndarray
            Generated greyscale image in np.ndarray float type with values normalized to [0, 1]
        """
        img = self.captcha.generate(captcha_str)
        img = np.fromstring(img.getvalue(), dtype='uint8')
        img = cv2.imdecode(img, cv2.IMREAD_GRAYSCALE)
        img = cv2.resize(img, (self.h, self.w))
        img = img.transpose(1, 0)
        img = np.multiply(img, 1 / 255.0)
        return img 
Example #2
Source File: fm.py    From Jtyoui with MIT License 6 votes vote down vote up
def get_prediction(data, w0, w, v):
    """预测值

    :param data: 特征
    :param w0: 一次项权重
    :param w: 常数项权重
    :param v: 交叉项权重
    :return: 预测结果
    """
    m = np.shape(data)[0]
    result = []
    for x in range(m):
        inter_1 = data[x] * v
        inter_2 = np.multiply(data[x], data[x]) * np.multiply(v, v)
        inter = np.sum(np.multiply(inter_1, inter_1) - inter_2) / 2.
        p = w0 + data[x] * w + inter
        pre = sigmoid(p[0, 0])
        result.append(pre)
    return result 
Example #3
Source File: 3_logistic_regression.py    From deep-learning-note with MIT License 6 votes vote down vote up
def gradientReg(theta, X, y, learningRate):
    theta = np.matrix(theta)
    X = np.matrix(X)
    y = np.matrix(y)
    
    parameters = int(theta.ravel().shape[1])
    grad = np.zeros(parameters)
    
    error = sigmoid(X * theta.T) - y
    
    for i in range(parameters):
        term = np.multiply(error, X[:,i])
        
        if (i == 0):
            grad[i] = np.sum(term) / len(X)
        else:
            grad[i] = (np.sum(term) / len(X)) + ((learningRate / len(X)) * theta[:,i])
    
    return grad 
Example #4
Source File: 9_anomaly_and_rec.py    From deep-learning-note with MIT License 6 votes vote down vote up
def cost(params, Y, R, num_features):
    Y = np.matrix(Y)  # (1682, 943)
    R = np.matrix(R)  # (1682, 943)
    num_movies = Y.shape[0]
    num_users = Y.shape[1]
    
    # reshape the parameter array into parameter matrices
    X = np.matrix(np.reshape(params[:num_movies * num_features], (num_movies, num_features)))  # (1682, 10)
    Theta = np.matrix(np.reshape(params[num_movies * num_features:], (num_users, num_features)))  # (943, 10)
    
    # initializations
    J = 0
    
    # compute the cost
    error = np.multiply((X * Theta.T) - Y, R)  # (1682, 943)
    squared_error = np.power(error, 2)  # (1682, 943)
    J = (1. / 2) * np.sum(squared_error)
    
    return J 
Example #5
Source File: 5_nueral_network.py    From deep-learning-note with MIT License 6 votes vote down vote up
def cost0(params, input_size, hidden_size, num_labels, X, y, learning_rate):
    m = X.shape[0]
    X = np.matrix(X)
    y = np.matrix(y)
    
    # reshape the parameter array into parameter matrices for each layer
    theta1 = np.matrix(np.reshape(params[:hidden_size * (input_size + 1)], (hidden_size, (input_size + 1))))
    theta2 = np.matrix(np.reshape(params[hidden_size * (input_size + 1):], (num_labels, (hidden_size + 1))))
    
    # run the feed-forward pass
    a1, z2, a2, z3, h = forward_propagate(X, theta1, theta2)
    
    # compute the cost
    J = 0
    for i in range(m):
        first_term = np.multiply(-y[i,:], np.log(h[i,:]))
        second_term = np.multiply((1 - y[i,:]), np.log(1 - h[i,:]))
        J += np.sum(first_term - second_term)
    
    J = J / m
    
    return J 
Example #6
Source File: 5_nueral_network.py    From deep-learning-note with MIT License 6 votes vote down vote up
def cost(params, input_size, hidden_size, num_labels, X, y, learning_rate):
    m = X.shape[0]
    X = np.matrix(X)
    y = np.matrix(y)
    
    # reshape the parameter array into parameter matrices for each layer
    theta1 = np.matrix(np.reshape(params[:hidden_size * (input_size + 1)], (hidden_size, (input_size + 1))))
    theta2 = np.matrix(np.reshape(params[hidden_size * (input_size + 1):], (num_labels, (hidden_size + 1))))
    
    # run the feed-forward pass
    a1, z2, a2, z3, h = forward_propagate(X, theta1, theta2)
    
    # compute the cost
    J = 0
    for i in range(m):
        first_term = np.multiply(-y[i,:], np.log(h[i,:]))
        second_term = np.multiply((1 - y[i,:]), np.log(1 - h[i,:]))
        J += np.sum(first_term - second_term)
    
    J = J / m
    
    # add the cost regularization term
    J += (float(learning_rate) / (2 * m)) * (np.sum(np.power(theta1[:,1:], 2)) + np.sum(np.power(theta2[:,1:], 2)))
    
    return J 
Example #7
Source File: system_eq.py    From BiblioPixelAnimations with MIT License 6 votes vote down vote up
def get_audio_data(self):
        frames = self.rec.get_frames()
        result = [0] * self.bins
        if len(frames) > 0:
            # keeps only the last frame
            current_frame = frames[-1]
            # plots the time signal
            # self.line_top.set_data(self.time_vect, current_frame)
            # computes and plots the fft signal
            fft_frame = np.fft.rfft(current_frame)
            if self.auto_gain:
                fft_frame /= np.abs(fft_frame).max()
            else:
                fft_frame *= (1 + self.gain) / 5000000.

            fft_frame = np.abs(fft_frame)
            if self.log_scale:
                fft_frame = np.log10(np.add(1, np.multiply(10, fft_frame)))

            result = [min(int(max(i, 0.) * 1023), 1023) for i in fft_frame][0:self.bins]

        return result 
Example #8
Source File: ocr_predict.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def forward_ocr(self, img_):
        img_ = cv2.resize(img_, (80, 30))
        img_ = img_.transpose(1, 0)
        print(img_.shape)
        img_ = img_.reshape((1, 80, 30))
        print(img_.shape)
        # img_ = img_.reshape((80 * 30))
        img_ = np.multiply(img_, 1 / 255.0)
        self.predictor.forward(data=img_, **self.init_state_dict)
        prob = self.predictor.get_output(0)
        label_list = []
        for p in prob:
            print(np.argsort(p))
            max_index = np.argsort(p)[::-1][0]
            label_list.append(max_index)
        return self.__get_string(label_list) 
Example #9
Source File: minitaur.py    From soccer-matlab with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def getMotorAngles(self):
    motorAngles = []
    for i in range(self.nMotors):
      jointState = p.getJointState(self.quadruped, self.motorIdList[i])
      motorAngles.append(jointState[0])
    motorAngles = np.multiply(motorAngles, self.motorDir)
    return motorAngles 
Example #10
Source File: system_eq.py    From BiblioPixelAnimations with MIT License 6 votes vote down vote up
def get_audio_data(self):
        frames = self.rec.get_frames()
        result = [0] * self.bins
        if len(frames) > 0:
            # keeps only the last frame
            current_frame = frames[-1]
            # plots the time signal
            # self.line_top.set_data(self.time_vect, current_frame)
            # computes and plots the fft signal
            fft_frame = np.fft.rfft(current_frame)
            if self.auto_gain:
                fft_frame /= np.abs(fft_frame).max()
            else:
                fft_frame *= (1 + self.gain) / 5000000.

            fft_frame = np.abs(fft_frame)
            if self.log_scale:
                fft_frame = np.log10(np.add(1, np.multiply(10, fft_frame)))

            result = [min(int(max(i, 0.) * 1023), 1023) for i in fft_frame][0:self.bins]

        return result 
Example #11
Source File: bnf.py    From openISP with MIT License 6 votes vote down vote up
def execute(self):
        img_pad = self.padding()
        img_pad = img_pad.astype(np.uint16)
        raw_h = self.img.shape[0]
        raw_w = self.img.shape[1]
        bnf_img = np.empty((raw_h, raw_w), np.uint16)
        rdiff = np.zeros((5,5), dtype='uint16')
        for y in range(img_pad.shape[0] - 4):
            for x in range(img_pad.shape[1] - 4):
                for i in range(5):
                    for j in range(5):
                        rdiff[i,j] = abs(img_pad[y+i,x+j] - img_pad[y+2, x+2])
                        if rdiff[i,j] >= self.rthres[0]:
                            rdiff[i,j] = self.rw[0]
                        elif rdiff[i,j] < self.rthres[0] and rdiff[i,j] >= self.rthres[1]:
                            rdiff[i,j] = self.rw[1]
                        elif rdiff[i,j] < self.rthres[1] and rdiff[i,j] >= self.rthres[2]:
                            rdiff[i,j] = self.rw[2]
                        elif rdiff[i,j] < self.rthres[2]:
                            rdiff[i,j] = self.rw[3]
                weights = np.multiply(rdiff, self.dw)
                bnf_img[y,x] = np.sum(np.multiply(img_pad[y:y+5,x:x+5], weights[:,:])) / np.sum(weights)
        self.img = bnf_img
        return self.clipping() 
Example #12
Source File: ciftify_PINT_vertices.py    From ciftify with MIT License 6 votes vote down vote up
def roi_surf_data(df, vertex_colname, surf, hemisphere, roi_radius):
    '''
    uses wb_command -surface-geodesic-rois to build rois (3D files)
    then load and collasp that into 1D array
    '''
    ## right the L and R hemisphere vertices from the table out to temptxt
    with ciftify.utils.TempDir() as lil_tmpdir:
        ## write a temp vertex list file
        vertex_list = os.path.join(lil_tmpdir, 'vertex_list.txt')
        df.loc[df.hemi == hemisphere, vertex_colname].to_csv(vertex_list,sep='\n',index=False, header=False)

        ## from the temp text build - func masks and target masks
        roi_surf = os.path.join(lil_tmpdir,'roi_surf.func.gii')
        docmd(['wb_command', '-surface-geodesic-rois', surf,
            str(roi_radius),  vertex_list, roi_surf,
            '-overlap-logic', 'EXCLUDE'])
        rois_data = ciftify.niio.load_gii_data(roi_surf)

    ## multiply by labels and reduce to 1 vector
    vlabels = df[df.hemi == hemisphere].roiidx.tolist()
    rois_data = np.multiply(rois_data, vlabels)
    rois_data1D = np.max(rois_data, axis=1)

    return rois_data1D 
Example #13
Source File: mnist_input_data.py    From python-esppy with Apache License 2.0 6 votes vote down vote up
def __init__(self, images, labels, fake_data=False, one_hot=False):
    """Construct a DataSet. one_hot arg is used only if fake_data is true."""

    if fake_data:
      self._num_examples = 10000
      self.one_hot = one_hot
    else:
      assert images.shape[0] == labels.shape[0], (
          'images.shape: %s labels.shape: %s' % (images.shape,
                                                 labels.shape))
      self._num_examples = images.shape[0]

      # Convert shape from [num examples, rows, columns, depth]
      # to [num examples, rows*columns] (assuming depth == 1)
      assert images.shape[3] == 1
      images = images.reshape(images.shape[0],
                              images.shape[1] * images.shape[2])
      # Convert from [0, 255] -> [0.0, 1.0].
      images = images.astype(numpy.float32)
      images = numpy.multiply(images, 1.0 / 255.0)
    self._images = images
    self._labels = labels
    self._epochs_completed = 0
    self._index_in_epoch = 0 
Example #14
Source File: edge_smooth.py    From CartoonGAN-Tensorflow with MIT License 6 votes vote down vote up
def make_edge_smooth(dataset_name, img_size) :
    check_folder('./dataset/{}/{}'.format(dataset_name, 'trainB_smooth'))

    file_list = glob('./dataset/{}/{}/*.*'.format(dataset_name, 'trainB'))
    save_dir = './dataset/{}/trainB_smooth'.format(dataset_name)

    kernel_size = 5
    kernel = np.ones((kernel_size, kernel_size), np.uint8)
    gauss = cv2.getGaussianKernel(kernel_size, 0)
    gauss = gauss * gauss.transpose(1, 0)

    for f in tqdm(file_list) :
        file_name = os.path.basename(f)

        bgr_img = cv2.imread(f)
        gray_img = cv2.imread(f, 0)

        bgr_img = cv2.resize(bgr_img, (img_size, img_size))
        pad_img = np.pad(bgr_img, ((2, 2), (2, 2), (0, 0)), mode='reflect')
        gray_img = cv2.resize(gray_img, (img_size, img_size))

        edges = cv2.Canny(gray_img, 100, 200)
        dilation = cv2.dilate(edges, kernel)

        gauss_img = np.copy(bgr_img)
        idx = np.where(dilation != 0)
        for i in range(np.sum(dilation != 0)):
            gauss_img[idx[0][i], idx[1][i], 0] = np.sum(
                np.multiply(pad_img[idx[0][i]:idx[0][i] + kernel_size, idx[1][i]:idx[1][i] + kernel_size, 0], gauss))
            gauss_img[idx[0][i], idx[1][i], 1] = np.sum(
                np.multiply(pad_img[idx[0][i]:idx[0][i] + kernel_size, idx[1][i]:idx[1][i] + kernel_size, 1], gauss))
            gauss_img[idx[0][i], idx[1][i], 2] = np.sum(
                np.multiply(pad_img[idx[0][i]:idx[0][i] + kernel_size, idx[1][i]:idx[1][i] + kernel_size, 2], gauss))

        cv2.imwrite(os.path.join(save_dir, file_name), gauss_img) 
Example #15
Source File: fisheye.py    From DualFisheye with MIT License 6 votes vote down vote up
def get_uv(self, xyz_vec):
        # Extract lens parameters of interest.
        fov_rad = self.lens.fov_deg * pi / 180
        fov_scale = np.float32(2 * self.lens.radius_px / fov_rad)
        # Normalize the input vector and rotate to match lens reference axes.
        xyz_rot = get_rotation_matrix(self.lens.center_qq) * matrix_norm(xyz_vec)
        # Convert to polar coordinates relative to lens boresight.
        # (In lens coordinates, unit vector's X axis gives boresight angle;
        #  normalize Y/Z to get a planar unit vector for the bearing.)
        # Note: Image +Y maps to 3D +Y, and image +X maps to 3D +Z.
        theta_rad = np.arccos(xyz_rot[0,:])
        proj_vec = matrix_norm(np.concatenate((xyz_rot[2,:], xyz_rot[1,:])))
        # Fisheye lens maps 3D angle to focal-plane radius.
        # TODO: Do we need a better model for lens distortion?
        rad_px = theta_rad * fov_scale
        # Convert back to focal-plane rectangular coordinates.
        uv = np.multiply(rad_px, proj_vec) + self.lens.center_px
        return np.asarray(uv + 0.5, dtype=int)

    # Given an 2xN array of UV pixel coordinates, check if each pixel is
    # within the fisheye field of view. Returns N-element boolean mask. 
Example #16
Source File: fisheye.py    From DualFisheye with MIT License 6 votes vote down vote up
def add_pixels(self, uv_px, img1d, weight=None):
        # Lookup row & column for each in-bounds coordinate.
        mask = self.get_mask(uv_px)
        xx = uv_px[0,mask]
        yy = uv_px[1,mask]
        # Update matrix according to assigned weight.
        if weight is None:
            img1d[mask] = self.img[yy,xx]
        elif np.isscalar(weight):
            img1d[mask] += self.img[yy,xx] * weight
        else:
            w1 = np.asmatrix(weight, dtype='float32')
            w3 = w1.transpose() * np.ones((1,3))
            img1d[mask] += np.multiply(self.img[yy,xx], w3[mask])


# A panorama image made from several FisheyeImage sources.
# TODO: Add support for supersampled anti-aliasing filters. 
Example #17
Source File: EasyTL.py    From transferlearning with MIT License 6 votes vote down vote up
def get_cosine_dist(A, B):
    B = np.reshape(B, (1, -1))
    
    if A.shape[1] == 1:
        A = np.hstack((A, np.zeros((A.shape[0], 1))))
        B = np.hstack((B, np.zeros((B.shape[0], 1))))
    
    aa = np.sum(np.multiply(A, A), axis=1).reshape(-1, 1)
    bb = np.sum(np.multiply(B, B), axis=1).reshape(-1, 1)
    ab = A @ B.T
    
    # to avoid NaN for zero norm
    aa[aa==0] = 1
    bb[bb==0] = 1
    
    D = np.real(np.ones((A.shape[0], B.shape[0])) - np.multiply((1/np.sqrt(np.kron(aa, bb.T))), ab))
    
    return D 
Example #18
Source File: nlm.py    From openISP with MIT License 6 votes vote down vote up
def calWeights(self, img, kernel, y, x):
        wmax = 0
        sweight = 0
        average = 0
        for j in range(2 * self.Ds + 1 - 2 * self.ds - 1):
            for i in range(2 * self.Ds + 1 - 2 * self.ds - 1):
                start_y = y - self.Ds + self.ds + j
                start_x = x - self.Ds + self.ds + i
                neighbour_w = img[start_y - self.ds:start_y + self.ds + 1, start_x - self.ds:start_x + self.ds + 1]
                center_w = img[y-self.ds:y+self.ds+1, x-self.ds:x+self.ds+1]
                if j != y or i != x:
                    sub = np.subtract(neighbour_w, center_w)
                    dist = np.sum(np.multiply(kernel, np.multiply(sub, sub)))
                    w = np.exp(-dist/pow(self.h, 2))    # replaced by look up table
                    if w > wmax:
                        wmax = w
                    sweight = sweight + w
                    average = average + w * img[start_y, start_x]
        return sweight, average, wmax 
Example #19
Source File: test_transform.py    From tensortrade with Apache License 2.0 5 votes vote down vote up
def test_select():

    a3 = Stream([3, 2, 1]).rename("a3")

    with Module("world") as a:
        a1 = Stream([7, 8, 9]).rename("a1")
        a2 = Stream([3, 2, 1]).rename("a2")

    t1 = BinOp(np.multiply)(a1, a3).rename("t1")

    s = Select("world:/a1")(t1, a)
    feed = DataFeed([s])

    assert feed.next() == {"world:/a1": 7} 
Example #20
Source File: test_transform.py    From tensortrade with Apache License 2.0 5 votes vote down vote up
def test_namespace():

    a = Stream([1, 2, 3]).rename("a")

    with Module("world") as world:
        a1 = Stream([4, 5, 6]).rename("a1")
        a2 = Stream([7, 8, 9]).rename("a2")

        with Module("sub-world") as sub_world:
            a3 = Stream([10, 11, 12]).rename("a3")
            a4 = Stream([13, 14, 15]).rename("a4")

            t3 = BinOp(np.add)(a2, a4).rename("t3")

    t1 = BinOp(np.multiply)(a, t3).rename("t1")

    feed = DataFeed([t1, world, sub_world])

    assert feed.next() == {
        "world:/a1": 4,
        "world:/a2": 7,
        "world:/sub-world:/a3": 10,
        "world:/sub-world:/a4": 13,
        "world:/sub-world:/t3": 20,
        "t1": 20
    }
    feed.reset()
    assert feed.next() == {
        "world:/a1": 4,
        "world:/a2": 7,
        "world:/sub-world:/a3": 10,
        "world:/sub-world:/a4": 13,
        "world:/sub-world:/t3": 20,
        "t1": 20
    } 
Example #21
Source File: nettack.py    From nettack with MIT License 5 votes vote down vote up
def compute_cooccurrence_constraint(self, nodes):
        """
        Co-occurrence constraint as described in the paper.

        Parameters
        ----------
        nodes: np.array
            Nodes whose features are considered for change

        Returns
        -------
        np.array [len(nodes), D], dtype bool
            Binary matrix of dimension len(nodes) x D. A 1 in entry n,d indicates that
            we are allowed to add feature d to the features of node n.

        """

        words_graph = self.cooc_matrix.copy()
        D = self.X_obs.shape[1]
        words_graph.setdiag(0)
        words_graph = (words_graph > 0)
        word_degrees = np.sum(words_graph, axis=0).A1

        inv_word_degrees = np.reciprocal(word_degrees.astype(float) + 1e-8)

        sd = np.zeros([self.N])
        for n in range(self.N):
            n_idx = self.X_obs[n, :].nonzero()[1]
            sd[n] = np.sum(inv_word_degrees[n_idx.tolist()])

        scores_matrix = sp.lil_matrix((self.N, D))

        for n in nodes:
            common_words = words_graph.multiply(self.X_obs[n])
            idegs = inv_word_degrees[common_words.nonzero()[1]]
            nnz = common_words.nonzero()[0]
            scores = np.array([idegs[nnz == ix].sum() for ix in range(D)])
            scores_matrix[n] = scores
        self.cooc_constraint = sp.csr_matrix(scores_matrix - 0.5 * sd[:, None] > 0) 
Example #22
Source File: prepare.py    From DeepLung with GNU General Public License v3.0 5 votes vote down vote up
def binarize_per_slice(image, spacing, intensity_th=-600, sigma=1, area_th=30, eccen_th=0.99, bg_patch_size=10):
    bw = np.zeros(image.shape, dtype=bool)
    
    # prepare a mask, with all corner values set to nan
    image_size = image.shape[1]
    grid_axis = np.linspace(-image_size/2+0.5, image_size/2-0.5, image_size)
    x, y = np.meshgrid(grid_axis, grid_axis)
    d = (x**2+y**2)**0.5
    nan_mask = (d<image_size/2).astype(float)
    nan_mask[nan_mask == 0] = np.nan
    for i in range(image.shape[0]):
        # Check if corner pixels are identical, if so the slice  before Gaussian filtering
        if len(np.unique(image[i, 0:bg_patch_size, 0:bg_patch_size])) == 1:
            current_bw = scipy.ndimage.filters.gaussian_filter(np.multiply(image[i].astype('float32'), nan_mask), sigma, truncate=2.0) < intensity_th
        else:
            current_bw = scipy.ndimage.filters.gaussian_filter(image[i].astype('float32'), sigma, truncate=2.0) < intensity_th
        
        # select proper components
        label = measure.label(current_bw)
        properties = measure.regionprops(label)
        valid_label = set()
        for prop in properties:
            if prop.area * spacing[1] * spacing[2] > area_th and prop.eccentricity < eccen_th:
                valid_label.add(prop.label)
        current_bw = np.in1d(label, list(valid_label)).reshape(label.shape)
        bw[i] = current_bw
        
    return bw 
Example #23
Source File: data_process.py    From nlp-tensorflow with MIT License 5 votes vote down vote up
def sentence_to_tfidf_pos(lines, vocab, IDF):
    vocab_size = len(vocab)
    doc_size = len(lines)
    
    assert (type(lines) is list or tuple), "Input type must be list or tuple."

    tf_idfs = []
    for line in lines:
        tokens = pos_extractor(line)
        freq = dict()
        TF = np.zeros(vocab_size, dtype=float)
        for token in tokens:
            if token in vocab.keys():
                if token in freq.keys():
                    freq[token] += 1
                else:
                    freq[token] = 1
        if len(freq) == 0:
            max_tf = 0
        else:
            max_tf = max(freq.values())
        tokens = set(tokens)
        for token in tokens:
            if token in vocab.keys():
                TF[vocab[token]] = 0.5 + 0.5 * freq[token] / max_tf
        tf_idf = np.multiply(TF, IDF)
        tf_idfs.append(tf_idf)
    tf_idfs = np.asarray(tf_idfs)

    return tf_idfs 
Example #24
Source File: node.py    From tensortrade with Apache License 2.0 5 votes vote down vote up
def __mul__(self, other):
        if np.isscalar(other):
            other = Constant(other, "Constant({})".format(other))
            name = "Multiply({},{})".format(self.name, other.name)
            return BinOp(np.multiply, name)(self, other)
        assert isinstance(other, Node)
        name = "Multiply({},{})".format(self.name, other.name)
        return BinOp(np.multiply, name)(self, other) 
Example #25
Source File: data_process.py    From nlp-tensorflow with MIT License 5 votes vote down vote up
def sentence_to_tfidf_morphs(lines, vocab, IDF):
    vocab_size = len(vocab)
    doc_size = len(lines)
    
    assert (type(lines) is list or tuple), "Input type must be list or tuple."
    
    tf_idfs = []
    for line in lines:
        tokens = morphs_extractor(line)
        freq = dict()
        TF = np.zeros(vocab_size, dtype=float)
        for token in tokens:
            if token in vocab.keys():
                if token in freq.keys():
                    freq[token] += 1
                else:
                    freq[token] = 1
        if len(freq) == 0:
            max_tf = 0
        else:
            max_tf = max(freq.values())
        tokens = set(tokens)
        for token in tokens:
            if token in vocab.keys():
                TF[vocab[token]] = 0.5 + 0.5 * freq[token] / max_tf
        tf_idf = np.multiply(TF, IDF)
        tf_idfs.append(tf_idf)
    tf_idfs = np.asarray(tf_idfs)

    return tf_idfs 
Example #26
Source File: nettack.py    From nettack with MIT License 5 votes vote down vote up
def feature_scores(self):
        """
        Compute feature scores for all possible feature changes.
        """

        if self.cooc_constraint is None:
            self.compute_cooccurrence_constraint(self.influencer_nodes)
        logits = self.compute_logits()
        best_wrong_class = self.strongest_wrong_class(logits)
        gradient = self.gradient_wrt_x(self.label_u) - self.gradient_wrt_x(best_wrong_class)
        surrogate_loss = logits[self.label_u] - logits[best_wrong_class]

        gradients_flipped = (gradient * -1).tolil()
        gradients_flipped[self.X_obs.nonzero()] *= -1

        X_influencers = sp.lil_matrix(self.X_obs.shape)
        X_influencers[self.influencer_nodes] = self.X_obs[self.influencer_nodes]
        gradients_flipped = gradients_flipped.multiply((self.cooc_constraint + X_influencers) > 0)
        nnz_ixs = np.array(gradients_flipped.nonzero()).T

        sorting = np.argsort(gradients_flipped[tuple(nnz_ixs.T)]).A1
        sorted_ixs = nnz_ixs[sorting]
        grads = gradients_flipped[tuple(nnz_ixs[sorting].T)]

        scores = surrogate_loss - grads
        return sorted_ixs[::-1], scores.A1[::-1] 
Example #27
Source File: nettack.py    From nettack with MIT License 5 votes vote down vote up
def update_Sx(S_old, n_old, d_old, d_new, d_min):
    """
    Update on the sum of log degrees S_d and n based on degree distribution resulting from inserting or deleting
    a single edge.

    Parameters
    ----------
    S_old: float
         Sum of log degrees in the distribution that are larger than or equal to d_min.

    n_old: int
        Number of entries in the old distribution that are larger than or equal to d_min.

    d_old: np.array, shape [N,] dtype int
        The old degree sequence.

    d_new: np.array, shape [N,] dtype int
        The new degree sequence

    d_min: int
        The minimum degree of nodes to consider

    Returns
    -------
    new_S_d: float, the updated sum of log degrees in the distribution that are larger than or equal to d_min.
    new_n: int, the updated number of entries in the old distribution that are larger than or equal to d_min.
    """

    old_in_range = d_old >= d_min
    new_in_range = d_new >= d_min

    d_old_in_range = np.multiply(d_old, old_in_range)
    d_new_in_range = np.multiply(d_new, new_in_range)

    new_S_d = S_old - np.log(np.maximum(d_old_in_range, 1)).sum(1) + np.log(np.maximum(d_new_in_range, 1)).sum(1)
    new_n = n_old - np.sum(old_in_range, 1) + np.sum(new_in_range, 1)

    return new_S_d, new_n 
Example #28
Source File: qchem_inter_rf.py    From pyscf with Apache License 2.0 5 votes vote down vote up
def kernel_qchem_inter_rf(self, **kw):
    from pyscf.gw.gw import rpa_AB_matrices
    """ This is constructing A B matrices and diagonalizes the problem """

    nf = self.nfermi[0]
    nv = self.norbs-self.vstart[0]
    vs = self.vstart[0]
    
    x = self.mo_coeff[0,0,:,:,0]
    pab2v = self.pb.get_ac_vertex_array()
    self.pmn2v = pmn2v = einsum('nb,pmb->pmn', x[:nf,:], einsum('ma,pab->pmb', x[vs:,:], pab2v))
    pmn2c = einsum('qp,pmn->qmn', self.hkernel_den, pmn2v)
    meri = einsum('pmn,pik->mnik', pmn2c, pmn2v)
    #meri.fill(0.0)

    A = (diagflat( self.FmE ).reshape([nv,nf,nv,nf]) + meri).reshape([nv*nf,nv*nf])
    B = meri.reshape([nv*nf,nv*nf])

    assert np.allclose(A, A.transpose())
    assert np.allclose(B, B.transpose())

    AmB = A-B
    n = len(AmB)
    print(__name__)
    for i in range(n): print(i, AmB[i,i])
    
    ham_rpa = np.multiply(self.sqrt_FmE[:,None], np.multiply(A+B, self.sqrt_FmE))
    esq, self.s2z = np.linalg.eigh(ham_rpa)
    self.s2omega = np.sqrt(esq)
    print(self.s2omega*27.2114)

    self.s2z = self.s2z.T
    self.s2xpy = np.zeros_like(self.s2z)
    for s,(e2,z) in enumerate(zip(esq, self.s2z)):
      w = np.sqrt(e2)
      self.s2xpy[s] = np.multiply(self.sqrt_FmE, self.s2z[s])/np.sqrt(w)
      #print(e2, abs(np.dot(ham_rpa,z)-e2*z).sum())
    return self.s2omega,self.s2z 
Example #29
Source File: data_process.py    From nlp-tensorflow with MIT License 5 votes vote down vote up
def sentence_to_tfidf(lines, vocab, IDF):
    vocab_size = len(vocab)
    doc_size = len(lines)
    
    assert (type(lines) is list or tuple), "Input type must be list or tuple."

    tf_idfs = []
    for line in lines:
        tokens = tokenizer(line)
        freq = dict()
        TF = np.zeros(vocab_size, dtype=float)
        for token in tokens:
            if token in vocab.keys():
                if token in freq.keys():
                    freq[token] += 1
                else:
                    freq[token] = 1
        if len(freq) == 0:
            max_tf = 0
        else:
            max_tf = max(freq.values())
        tokens = set(tokens)
        for token in tokens:
            if token in vocab.keys():
                TF[vocab[token]] = 0.5 + 0.5 * freq[token] / max_tf
        tf_idf = np.multiply(TF, IDF)
        tf_idfs.append(tf_idf)
    tf_idfs = np.asarray(tf_idfs)

    return tf_idfs 
Example #30
Source File: data_process.py    From nlp-tensorflow with MIT License 5 votes vote down vote up
def sentence_to_tfidf_pos(lines, vocab, IDF):
    vocab_size = len(vocab)
    doc_size = len(lines)
    
    assert (type(lines) is list or tuple), "Input type must be list or tuple."

    tf_idfs = []
    for line in lines:
        tokens = pos_extractor(line)
        freq = dict()
        TF = np.zeros(vocab_size, dtype=float)
        for token in tokens:
            if token in vocab.keys():
                if token in freq.keys():
                    freq[token] += 1
                else:
                    freq[token] = 1
        if len(freq) == 0:
            max_tf = 0
        else:
            max_tf = max(freq.values())
        tokens = set(tokens)
        for token in tokens:
            if token in vocab.keys():
                TF[vocab[token]] = 0.5 + 0.5 * freq[token] / max_tf
        tf_idf = np.multiply(TF, IDF)
        tf_idfs.append(tf_idf)
    tf_idfs = np.asarray(tf_idfs)

    return tf_idfs