Python numpy.multiply() Examples

The following are 30 code examples for showing how to use numpy.multiply(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module numpy , or try the search function .

Example 1
def image(self, captcha_str):
        """
        Generate a greyscale captcha image representing number string

        Parameters
        ----------
        captcha_str: str
            string a characters for captcha image

        Returns
        -------
        numpy.ndarray
            Generated greyscale image in np.ndarray float type with values normalized to [0, 1]
        """
        img = self.captcha.generate(captcha_str)
        img = np.fromstring(img.getvalue(), dtype='uint8')
        img = cv2.imdecode(img, cv2.IMREAD_GRAYSCALE)
        img = cv2.resize(img, (self.h, self.w))
        img = img.transpose(1, 0)
        img = np.multiply(img, 1 / 255.0)
        return img 
Example 2
Project: BiblioPixelAnimations   Author: ManiacalLabs   File: system_eq.py    License: MIT License 6 votes vote down vote up
def get_audio_data(self):
        frames = self.rec.get_frames()
        result = [0] * self.bins
        if len(frames) > 0:
            # keeps only the last frame
            current_frame = frames[-1]
            # plots the time signal
            # self.line_top.set_data(self.time_vect, current_frame)
            # computes and plots the fft signal
            fft_frame = np.fft.rfft(current_frame)
            if self.auto_gain:
                fft_frame /= np.abs(fft_frame).max()
            else:
                fft_frame *= (1 + self.gain) / 5000000.

            fft_frame = np.abs(fft_frame)
            if self.log_scale:
                fft_frame = np.log10(np.add(1, np.multiply(10, fft_frame)))

            result = [min(int(max(i, 0.) * 1023), 1023) for i in fft_frame][0:self.bins]

        return result 
Example 3
Project: BiblioPixelAnimations   Author: ManiacalLabs   File: system_eq.py    License: MIT License 6 votes vote down vote up
def get_audio_data(self):
        frames = self.rec.get_frames()
        result = [0] * self.bins
        if len(frames) > 0:
            # keeps only the last frame
            current_frame = frames[-1]
            # plots the time signal
            # self.line_top.set_data(self.time_vect, current_frame)
            # computes and plots the fft signal
            fft_frame = np.fft.rfft(current_frame)
            if self.auto_gain:
                fft_frame /= np.abs(fft_frame).max()
            else:
                fft_frame *= (1 + self.gain) / 5000000.

            fft_frame = np.abs(fft_frame)
            if self.log_scale:
                fft_frame = np.log10(np.add(1, np.multiply(10, fft_frame)))

            result = [min(int(max(i, 0.) * 1023), 1023) for i in fft_frame][0:self.bins]

        return result 
Example 4
Project: deep-learning-note   Author: wdxtub   File: 5_nueral_network.py    License: MIT License 6 votes vote down vote up
def cost0(params, input_size, hidden_size, num_labels, X, y, learning_rate):
    m = X.shape[0]
    X = np.matrix(X)
    y = np.matrix(y)
    
    # reshape the parameter array into parameter matrices for each layer
    theta1 = np.matrix(np.reshape(params[:hidden_size * (input_size + 1)], (hidden_size, (input_size + 1))))
    theta2 = np.matrix(np.reshape(params[hidden_size * (input_size + 1):], (num_labels, (hidden_size + 1))))
    
    # run the feed-forward pass
    a1, z2, a2, z3, h = forward_propagate(X, theta1, theta2)
    
    # compute the cost
    J = 0
    for i in range(m):
        first_term = np.multiply(-y[i,:], np.log(h[i,:]))
        second_term = np.multiply((1 - y[i,:]), np.log(1 - h[i,:]))
        J += np.sum(first_term - second_term)
    
    J = J / m
    
    return J 
Example 5
Project: deep-learning-note   Author: wdxtub   File: 5_nueral_network.py    License: MIT License 6 votes vote down vote up
def cost(params, input_size, hidden_size, num_labels, X, y, learning_rate):
    m = X.shape[0]
    X = np.matrix(X)
    y = np.matrix(y)
    
    # reshape the parameter array into parameter matrices for each layer
    theta1 = np.matrix(np.reshape(params[:hidden_size * (input_size + 1)], (hidden_size, (input_size + 1))))
    theta2 = np.matrix(np.reshape(params[hidden_size * (input_size + 1):], (num_labels, (hidden_size + 1))))
    
    # run the feed-forward pass
    a1, z2, a2, z3, h = forward_propagate(X, theta1, theta2)
    
    # compute the cost
    J = 0
    for i in range(m):
        first_term = np.multiply(-y[i,:], np.log(h[i,:]))
        second_term = np.multiply((1 - y[i,:]), np.log(1 - h[i,:]))
        J += np.sum(first_term - second_term)
    
    J = J / m
    
    # add the cost regularization term
    J += (float(learning_rate) / (2 * m)) * (np.sum(np.power(theta1[:,1:], 2)) + np.sum(np.power(theta2[:,1:], 2)))
    
    return J 
Example 6
Project: deep-learning-note   Author: wdxtub   File: 9_anomaly_and_rec.py    License: MIT License 6 votes vote down vote up
def cost(params, Y, R, num_features):
    Y = np.matrix(Y)  # (1682, 943)
    R = np.matrix(R)  # (1682, 943)
    num_movies = Y.shape[0]
    num_users = Y.shape[1]
    
    # reshape the parameter array into parameter matrices
    X = np.matrix(np.reshape(params[:num_movies * num_features], (num_movies, num_features)))  # (1682, 10)
    Theta = np.matrix(np.reshape(params[num_movies * num_features:], (num_users, num_features)))  # (943, 10)
    
    # initializations
    J = 0
    
    # compute the cost
    error = np.multiply((X * Theta.T) - Y, R)  # (1682, 943)
    squared_error = np.power(error, 2)  # (1682, 943)
    J = (1. / 2) * np.sum(squared_error)
    
    return J 
Example 7
Project: deep-learning-note   Author: wdxtub   File: 3_logistic_regression.py    License: MIT License 6 votes vote down vote up
def gradientReg(theta, X, y, learningRate):
    theta = np.matrix(theta)
    X = np.matrix(X)
    y = np.matrix(y)
    
    parameters = int(theta.ravel().shape[1])
    grad = np.zeros(parameters)
    
    error = sigmoid(X * theta.T) - y
    
    for i in range(parameters):
        term = np.multiply(error, X[:,i])
        
        if (i == 0):
            grad[i] = np.sum(term) / len(X)
        else:
            grad[i] = (np.sum(term) / len(X)) + ((learningRate / len(X)) * theta[:,i])
    
    return grad 
Example 8
def forward_ocr(self, img_):
        img_ = cv2.resize(img_, (80, 30))
        img_ = img_.transpose(1, 0)
        print(img_.shape)
        img_ = img_.reshape((1, 80, 30))
        print(img_.shape)
        # img_ = img_.reshape((80 * 30))
        img_ = np.multiply(img_, 1 / 255.0)
        self.predictor.forward(data=img_, **self.init_state_dict)
        prob = self.predictor.get_output(0)
        label_list = []
        for p in prob:
            print(np.argsort(p))
            max_index = np.argsort(p)[::-1][0]
            label_list.append(max_index)
        return self.__get_string(label_list) 
Example 9
Project: transferlearning   Author: jindongwang   File: EasyTL.py    License: MIT License 6 votes vote down vote up
def get_cosine_dist(A, B):
    B = np.reshape(B, (1, -1))
    
    if A.shape[1] == 1:
        A = np.hstack((A, np.zeros((A.shape[0], 1))))
        B = np.hstack((B, np.zeros((B.shape[0], 1))))
    
    aa = np.sum(np.multiply(A, A), axis=1).reshape(-1, 1)
    bb = np.sum(np.multiply(B, B), axis=1).reshape(-1, 1)
    ab = A @ B.T
    
    # to avoid NaN for zero norm
    aa[aa==0] = 1
    bb[bb==0] = 1
    
    D = np.real(np.ones((A.shape[0], B.shape[0])) - np.multiply((1/np.sqrt(np.kron(aa, bb.T))), ab))
    
    return D 
Example 10
Project: openISP   Author: cruxopen   File: nlm.py    License: MIT License 6 votes vote down vote up
def calWeights(self, img, kernel, y, x):
        wmax = 0
        sweight = 0
        average = 0
        for j in range(2 * self.Ds + 1 - 2 * self.ds - 1):
            for i in range(2 * self.Ds + 1 - 2 * self.ds - 1):
                start_y = y - self.Ds + self.ds + j
                start_x = x - self.Ds + self.ds + i
                neighbour_w = img[start_y - self.ds:start_y + self.ds + 1, start_x - self.ds:start_x + self.ds + 1]
                center_w = img[y-self.ds:y+self.ds+1, x-self.ds:x+self.ds+1]
                if j != y or i != x:
                    sub = np.subtract(neighbour_w, center_w)
                    dist = np.sum(np.multiply(kernel, np.multiply(sub, sub)))
                    w = np.exp(-dist/pow(self.h, 2))    # replaced by look up table
                    if w > wmax:
                        wmax = w
                    sweight = sweight + w
                    average = average + w * img[start_y, start_x]
        return sweight, average, wmax 
Example 11
Project: openISP   Author: cruxopen   File: bnf.py    License: MIT License 6 votes vote down vote up
def execute(self):
        img_pad = self.padding()
        img_pad = img_pad.astype(np.uint16)
        raw_h = self.img.shape[0]
        raw_w = self.img.shape[1]
        bnf_img = np.empty((raw_h, raw_w), np.uint16)
        rdiff = np.zeros((5,5), dtype='uint16')
        for y in range(img_pad.shape[0] - 4):
            for x in range(img_pad.shape[1] - 4):
                for i in range(5):
                    for j in range(5):
                        rdiff[i,j] = abs(img_pad[y+i,x+j] - img_pad[y+2, x+2])
                        if rdiff[i,j] >= self.rthres[0]:
                            rdiff[i,j] = self.rw[0]
                        elif rdiff[i,j] < self.rthres[0] and rdiff[i,j] >= self.rthres[1]:
                            rdiff[i,j] = self.rw[1]
                        elif rdiff[i,j] < self.rthres[1] and rdiff[i,j] >= self.rthres[2]:
                            rdiff[i,j] = self.rw[2]
                        elif rdiff[i,j] < self.rthres[2]:
                            rdiff[i,j] = self.rw[3]
                weights = np.multiply(rdiff, self.dw)
                bnf_img[y,x] = np.sum(np.multiply(img_pad[y:y+5,x:x+5], weights[:,:])) / np.sum(weights)
        self.img = bnf_img
        return self.clipping() 
Example 12
Project: ciftify   Author: edickie   File: ciftify_PINT_vertices.py    License: MIT License 6 votes vote down vote up
def roi_surf_data(df, vertex_colname, surf, hemisphere, roi_radius):
    '''
    uses wb_command -surface-geodesic-rois to build rois (3D files)
    then load and collasp that into 1D array
    '''
    ## right the L and R hemisphere vertices from the table out to temptxt
    with ciftify.utils.TempDir() as lil_tmpdir:
        ## write a temp vertex list file
        vertex_list = os.path.join(lil_tmpdir, 'vertex_list.txt')
        df.loc[df.hemi == hemisphere, vertex_colname].to_csv(vertex_list,sep='\n',index=False, header=False)

        ## from the temp text build - func masks and target masks
        roi_surf = os.path.join(lil_tmpdir,'roi_surf.func.gii')
        docmd(['wb_command', '-surface-geodesic-rois', surf,
            str(roi_radius),  vertex_list, roi_surf,
            '-overlap-logic', 'EXCLUDE'])
        rois_data = ciftify.niio.load_gii_data(roi_surf)

    ## multiply by labels and reduce to 1 vector
    vlabels = df[df.hemi == hemisphere].roiidx.tolist()
    rois_data = np.multiply(rois_data, vlabels)
    rois_data1D = np.max(rois_data, axis=1)

    return rois_data1D 
Example 13
Project: python-esppy   Author: sassoftware   File: mnist_input_data.py    License: Apache License 2.0 6 votes vote down vote up
def __init__(self, images, labels, fake_data=False, one_hot=False):
    """Construct a DataSet. one_hot arg is used only if fake_data is true."""

    if fake_data:
      self._num_examples = 10000
      self.one_hot = one_hot
    else:
      assert images.shape[0] == labels.shape[0], (
          'images.shape: %s labels.shape: %s' % (images.shape,
                                                 labels.shape))
      self._num_examples = images.shape[0]

      # Convert shape from [num examples, rows, columns, depth]
      # to [num examples, rows*columns] (assuming depth == 1)
      assert images.shape[3] == 1
      images = images.reshape(images.shape[0],
                              images.shape[1] * images.shape[2])
      # Convert from [0, 255] -> [0.0, 1.0].
      images = images.astype(numpy.float32)
      images = numpy.multiply(images, 1.0 / 255.0)
    self._images = images
    self._labels = labels
    self._epochs_completed = 0
    self._index_in_epoch = 0 
Example 14
Project: CartoonGAN-Tensorflow   Author: taki0112   File: edge_smooth.py    License: MIT License 6 votes vote down vote up
def make_edge_smooth(dataset_name, img_size) :
    check_folder('./dataset/{}/{}'.format(dataset_name, 'trainB_smooth'))

    file_list = glob('./dataset/{}/{}/*.*'.format(dataset_name, 'trainB'))
    save_dir = './dataset/{}/trainB_smooth'.format(dataset_name)

    kernel_size = 5
    kernel = np.ones((kernel_size, kernel_size), np.uint8)
    gauss = cv2.getGaussianKernel(kernel_size, 0)
    gauss = gauss * gauss.transpose(1, 0)

    for f in tqdm(file_list) :
        file_name = os.path.basename(f)

        bgr_img = cv2.imread(f)
        gray_img = cv2.imread(f, 0)

        bgr_img = cv2.resize(bgr_img, (img_size, img_size))
        pad_img = np.pad(bgr_img, ((2, 2), (2, 2), (0, 0)), mode='reflect')
        gray_img = cv2.resize(gray_img, (img_size, img_size))

        edges = cv2.Canny(gray_img, 100, 200)
        dilation = cv2.dilate(edges, kernel)

        gauss_img = np.copy(bgr_img)
        idx = np.where(dilation != 0)
        for i in range(np.sum(dilation != 0)):
            gauss_img[idx[0][i], idx[1][i], 0] = np.sum(
                np.multiply(pad_img[idx[0][i]:idx[0][i] + kernel_size, idx[1][i]:idx[1][i] + kernel_size, 0], gauss))
            gauss_img[idx[0][i], idx[1][i], 1] = np.sum(
                np.multiply(pad_img[idx[0][i]:idx[0][i] + kernel_size, idx[1][i]:idx[1][i] + kernel_size, 1], gauss))
            gauss_img[idx[0][i], idx[1][i], 2] = np.sum(
                np.multiply(pad_img[idx[0][i]:idx[0][i] + kernel_size, idx[1][i]:idx[1][i] + kernel_size, 2], gauss))

        cv2.imwrite(os.path.join(save_dir, file_name), gauss_img) 
Example 15
Project: DualFisheye   Author: ooterness   File: fisheye.py    License: MIT License 6 votes vote down vote up
def get_uv(self, xyz_vec):
        # Extract lens parameters of interest.
        fov_rad = self.lens.fov_deg * pi / 180
        fov_scale = np.float32(2 * self.lens.radius_px / fov_rad)
        # Normalize the input vector and rotate to match lens reference axes.
        xyz_rot = get_rotation_matrix(self.lens.center_qq) * matrix_norm(xyz_vec)
        # Convert to polar coordinates relative to lens boresight.
        # (In lens coordinates, unit vector's X axis gives boresight angle;
        #  normalize Y/Z to get a planar unit vector for the bearing.)
        # Note: Image +Y maps to 3D +Y, and image +X maps to 3D +Z.
        theta_rad = np.arccos(xyz_rot[0,:])
        proj_vec = matrix_norm(np.concatenate((xyz_rot[2,:], xyz_rot[1,:])))
        # Fisheye lens maps 3D angle to focal-plane radius.
        # TODO: Do we need a better model for lens distortion?
        rad_px = theta_rad * fov_scale
        # Convert back to focal-plane rectangular coordinates.
        uv = np.multiply(rad_px, proj_vec) + self.lens.center_px
        return np.asarray(uv + 0.5, dtype=int)

    # Given an 2xN array of UV pixel coordinates, check if each pixel is
    # within the fisheye field of view. Returns N-element boolean mask. 
Example 16
Project: DualFisheye   Author: ooterness   File: fisheye.py    License: MIT License 6 votes vote down vote up
def add_pixels(self, uv_px, img1d, weight=None):
        # Lookup row & column for each in-bounds coordinate.
        mask = self.get_mask(uv_px)
        xx = uv_px[0,mask]
        yy = uv_px[1,mask]
        # Update matrix according to assigned weight.
        if weight is None:
            img1d[mask] = self.img[yy,xx]
        elif np.isscalar(weight):
            img1d[mask] += self.img[yy,xx] * weight
        else:
            w1 = np.asmatrix(weight, dtype='float32')
            w3 = w1.transpose() * np.ones((1,3))
            img1d[mask] += np.multiply(self.img[yy,xx], w3[mask])


# A panorama image made from several FisheyeImage sources.
# TODO: Add support for supersampled anti-aliasing filters. 
Example 17
Project: Jtyoui   Author: jtyoui   File: fm.py    License: MIT License 6 votes vote down vote up
def get_prediction(data, w0, w, v):
    """预测值

    :param data: 特征
    :param w0: 一次项权重
    :param w: 常数项权重
    :param v: 交叉项权重
    :return: 预测结果
    """
    m = np.shape(data)[0]
    result = []
    for x in range(m):
        inter_1 = data[x] * v
        inter_2 = np.multiply(data[x], data[x]) * np.multiply(v, v)
        inter = np.sum(np.multiply(inter_1, inter_1) - inter_2) / 2.
        p = w0 + data[x] * w + inter
        pre = sigmoid(p[0, 0])
        result.append(pre)
    return result 
Example 18
Project: deep-learning-note   Author: wdxtub   File: 4_multi_classification.py    License: MIT License 5 votes vote down vote up
def cost(theta, X, y, learningRate):
    theta = np.matrix(theta)
    X = np.matrix(X)
    y = np.matrix(y)
    first = np.multiply(-y, np.log(sigmoid(X * theta.T)))
    second = np.multiply((1 - y), np.log(1 - sigmoid(X * theta.T)))
    reg = (learningRate / (2 * len(X))) * np.sum(np.power(theta[:,1:theta.shape[1]], 2))
    return np.sum(first - second) / len(X) + reg 
Example 19
Project: deep-learning-note   Author: wdxtub   File: 4_multi_classification.py    License: MIT License 5 votes vote down vote up
def gradient(theta, X, y, learningRate):
    theta = np.matrix(theta)
    X = np.matrix(X)
    y = np.matrix(y)
    
    parameters = int(theta.ravel().shape[1])
    error = sigmoid(X * theta.T) - y
    
    grad = ((X.T * error) / len(X)).T + ((learningRate / len(X)) * theta)
    
    # intercept gradient is not regularized
    grad[0, 0] = np.sum(np.multiply(error, X[:,0])) / len(X)
    
    return np.array(grad).ravel() 
Example 20
Project: deep-learning-note   Author: wdxtub   File: 5_nueral_network.py    License: MIT License 5 votes vote down vote up
def sigmoid_gradient(z):
    return np.multiply(sigmoid(z), (1 - sigmoid(z))) 
Example 21
Project: deep-learning-note   Author: wdxtub   File: 2_linear_regression.py    License: MIT License 5 votes vote down vote up
def gradientDescent(X, y, theta, alpha, iters):
    temp = np.matrix(np.zeros(theta.shape))
    parameters = int(theta.ravel().shape[1])
    cost = np.zeros(iters)

    for i in range(iters):
        error = (X * theta.T) - y

        for j in range(parameters):
            term = np.multiply(error, X[:, j])
            temp[0,j] = theta[0,j] - ((alpha / len(X)) * np.sum(term))
        
        theta = temp
        cost[i] = computeCost(X, y, theta)
    return theta, cost 
Example 22
Project: deep-learning-note   Author: wdxtub   File: 9_anomaly_and_rec.py    License: MIT License 5 votes vote down vote up
def cost0(params, Y, R, num_features):
    Y = np.matrix(Y)  # (1682, 943)
    R = np.matrix(R)  # (1682, 943)
    num_movies = Y.shape[0]
    num_users = Y.shape[1]
    
    # reshape the parameter array into parameter matrices
    X = np.matrix(np.reshape(params[:num_movies * num_features], (num_movies, num_features)))  # (1682, 10)
    Theta = np.matrix(np.reshape(params[num_movies * num_features:], (num_users, num_features)))  # (943, 10)
    
    # initializations
    J = 0
    X_grad = np.zeros(X.shape)  # (1682, 10)
    Theta_grad = np.zeros(Theta.shape)  # (943, 10)
    
    # compute the cost
    error = np.multiply((X * Theta.T) - Y, R)  # (1682, 943)
    squared_error = np.power(error, 2)  # (1682, 943)
    J = (1. / 2) * np.sum(squared_error)
    
    # calculate the gradients
    X_grad = error * Theta
    Theta_grad = error.T * X
    
    # unravel the gradient matrices into a single array
    grad = np.concatenate((np.ravel(X_grad), np.ravel(Theta_grad)))
    
    return J, grad 
Example 23
Project: deep-learning-note   Author: wdxtub   File: 9_anomaly_and_rec.py    License: MIT License 5 votes vote down vote up
def cost1(params, Y, R, num_features, learning_rate):
    Y = np.matrix(Y)  # (1682, 943)
    R = np.matrix(R)  # (1682, 943)
    num_movies = Y.shape[0]
    num_users = Y.shape[1]
    
    # reshape the parameter array into parameter matrices
    X = np.matrix(np.reshape(params[:num_movies * num_features], (num_movies, num_features)))  # (1682, 10)
    Theta = np.matrix(np.reshape(params[num_movies * num_features:], (num_users, num_features)))  # (943, 10)
    
    # initializations
    J = 0
    X_grad = np.zeros(X.shape)  # (1682, 10)
    Theta_grad = np.zeros(Theta.shape)  # (943, 10)
    
    # compute the cost
    error = np.multiply((X * Theta.T) - Y, R)  # (1682, 943)
    squared_error = np.power(error, 2)  # (1682, 943)
    J = (1. / 2) * np.sum(squared_error)
    
    # add the cost regularization
    J = J + ((learning_rate / 2) * np.sum(np.power(Theta, 2)))
    J = J + ((learning_rate / 2) * np.sum(np.power(X, 2)))
    
    # calculate the gradients with regularization
    X_grad = (error * Theta) + (learning_rate * X)
    Theta_grad = (error.T * X) + (learning_rate * Theta)
    
    # unravel the gradient matrices into a single array
    grad = np.concatenate((np.ravel(X_grad), np.ravel(Theta_grad)))
    
    return J, grad 
Example 24
Project: deep-learning-note   Author: wdxtub   File: 3_logistic_regression.py    License: MIT License 5 votes vote down vote up
def cost(theta, X, y):
    theta = np.matrix(theta)
    X = np.matrix(X)
    y = np.matrix(y)
    first = np.multiply(-y, np.log(sigmoid(X * theta.T)))
    second = np.multiply((1 - y), np.log(1 - sigmoid(X * theta.T)))
    return np.sum(first - second) / (len(X)) 
Example 25
Project: deep-learning-note   Author: wdxtub   File: 3_logistic_regression.py    License: MIT License 5 votes vote down vote up
def costReg(theta, X, y, learningRate):
    theta = np.matrix(theta)
    X = np.matrix(X)
    y = np.matrix(y)
    first = np.multiply(-y, np.log(sigmoid(X * theta.T)))
    second = np.multiply((1 - y), np.log(1 - sigmoid(X * theta.T)))
    reg = (learningRate / (2 * len(X))) * np.sum(np.power(theta[:,1:theta.shape[1]], 2))
    return np.sum(first - second) / len(X) + reg 
Example 26
Project: NiBetaSeries   Author: HBClab   File: conftest.py    License: MIT License 5 votes vote down vote up
def sub_events(bids_dir, sub_metadata, preproc_file,
               bids_events_fname=bids_events_fname):
    events_file = bids_dir.ensure(bids_events_fname)
    # read in subject metadata to get the TR
    with open(str(sub_metadata), 'r') as md:
        bold_metadata = json.load(md)
    tr = bold_metadata["RepetitionTime"]
    # time_points
    tp = nib.load(str(preproc_file)).shape[-1]
    # create voxel timeseries
    task_onsets = np.zeros(tp)
    # add waffles at every 40 time points
    task_onsets[0::40] = 1
    # add fries at every 40 time points starting at 3
    task_onsets[3::40] = 1
    # add milkshakes at every 40 time points starting at 6
    task_onsets[6::40] = 1
    # create event tsv
    num_trials = np.where(task_onsets == 1)[0].shape[0]
    onsets = np.multiply(np.where(task_onsets == 1), tr).reshape(num_trials)
    durations = [1] * num_trials
    num_conds = 3
    trial_types = ['waffle', 'fry', 'milkshake'] * int((num_trials / num_conds))
    events_df = pd.DataFrame.from_dict({'onset': onsets,
                                        'duration': durations,
                                        'trial_type': trial_types})
    # reorder columns
    events_df = events_df[['onset', 'duration', 'trial_type']]
    # save the events_df to file
    events_df.to_csv(str(events_file), index=False, sep='\t')
    return events_file 
Example 27
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: gradcam.py    License: Apache License 2.0 5 votes vote down vote up
def get_guided_grad_cam(cam, imggrad):
    """Compute Guided Grad-CAM. Refer section 3 of https://arxiv.org/abs/1610.02391 for details"""
    return np.multiply(cam, imggrad) 
Example 28
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: randomproj.py    License: Apache License 2.0 5 votes vote down vote up
def _get_mask(self, idx, in_data):
        """Returns the mask by which to multiply the parts of the embedding layer.
        In this version, we have no weights to apply.
        """
        mask = idx >= 0  # bool False for -1 values that should be removed. shape=(b,mnz)
        mask = np.expand_dims(mask,2) # shape = (b,mnz,1)
        mask = np.repeat(mask, self._proj_dim, axis=2) # shape = (b,mnz,d)
        return mask 
Example 29
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: randomproj.py    License: Apache License 2.0 5 votes vote down vote up
def forward(self, is_train, req, in_data, out_data, aux):
        #Note: see this run in notebooks/howto-numpy-random-proj.ipynb
        # Notation for shapes: b = batch_size, mnz = max_nonzero, d = proj_dim
        idx = in_data[0].asnumpy().astype('int32') # shape=(b,mnz)

        wd = self.W[idx]  # shape= (b,mnz,d)
        mask = self._get_mask(idx, in_data)
        wd = np.multiply(wd,mask)  # shape=(b,mnz,d), but zero'd out non-masked
        y = np.sum(wd,axis=1)  # shape=(b,d)
        mxy = mx.nd.array(y)  #NOTE: this hangs if the environment variables aren't set correctly
        # See https://github.com/dmlc/mxnet/issues/3813
        self.assign(out_data[0], req[0], mxy) 
Example 30
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: VAE.py    License: Apache License 2.0 5 votes vote down vote up
def sampler(mu, logvar):
        z = mu + np.multiply(np.exp(0.5*logvar), np.random.normal(loc=0, scale=1,size=np.shape(logvar)))
        return z