Python autograd.numpy.matmul() Examples

The following are 13 code examples of autograd.numpy.matmul(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module autograd.numpy , or try the search function .
Example #1
Source File: RecurrentNeuralNetworks.py    From DeepLearningTutorial with MIT License 6 votes vote down vote up
def forward_pass(self, X, hyp):     
        Q = self.hidden_dim
        H = np.zeros((X.shape[1],Q))
        
        idx_1 = 0
        idx_2 = idx_1 + self.X_dim*Q
        idx_3 = idx_2 + Q
        idx_4 = idx_3 + Q*Q
        U = np.reshape(hyp[idx_1:idx_2], (self.X_dim,Q))
        b = np.reshape(hyp[idx_2:idx_3], (1,Q))
        W = np.reshape(hyp[idx_3:idx_4], (Q,Q))
        
        for i in range(0, self.lags):
            H = activation(np.matmul(H,W) + np.matmul(X[i,:,:],U) + b)
                
        idx_1 = idx_4
        idx_2 = idx_1 + Q*self.Y_dim
        idx_3 = idx_2 + self.Y_dim
        V = np.reshape(hyp[idx_1:idx_2], (Q,self.Y_dim))
        c = np.reshape(hyp[idx_2:idx_3], (1,self.Y_dim))
        Y = np.matmul(H,V) + c
        
        return Y 
Example #2
Source File: NeuralNetworks.py    From DeepLearningTutorial with MIT License 6 votes vote down vote up
def forward_pass(self, X, Q, hyp):
        H = X
        idx_3 = 0
        layers = Q.shape[0]   
        for layer in range(0,layers-2):        
            idx_1 = idx_3
            idx_2 = idx_1 + Q[layer]*Q[layer+1]
            idx_3 = idx_2 + Q[layer+1]
            A = np.reshape(hyp[idx_1:idx_2], (Q[layer],Q[layer+1]))
            b = np.reshape(hyp[idx_2:idx_3], (1,Q[layer+1]))
            H = activation(np.matmul(H,A) + b)
            
        idx_1 = idx_3
        idx_2 = idx_1 + Q[-2]*Q[-1]
        idx_3 = idx_2 + Q[-1]
        A = np.reshape(hyp[idx_1:idx_2], (Q[-2],Q[-1]))
        b = np.reshape(hyp[idx_2:idx_3], (1,Q[-1]))
        mu = np.matmul(H,A) + b
                
        return mu 
Example #3
Source File: ddp_gym.py    From ddp-gym with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def backward(self, x_seq, u_seq):
        self.v[-1] = self.lf(x_seq[-1])
        self.v_x[-1] = self.lf_x(x_seq[-1])
        self.v_xx[-1] = self.lf_xx(x_seq[-1])
        k_seq = []
        kk_seq = []
        for t in range(self.pred_time - 1, -1, -1):
            f_x_t = self.f_x(x_seq[t], u_seq[t])
            f_u_t = self.f_u(x_seq[t], u_seq[t])
            q_x = self.l_x(x_seq[t], u_seq[t]) + np.matmul(f_x_t.T, self.v_x[t + 1])
            q_u = self.l_u(x_seq[t], u_seq[t]) + np.matmul(f_u_t.T, self.v_x[t + 1])
            q_xx = self.l_xx(x_seq[t], u_seq[t]) + \
              np.matmul(np.matmul(f_x_t.T, self.v_xx[t + 1]), f_x_t) + \
              np.dot(self.v_x[t + 1], np.squeeze(self.f_xx(x_seq[t], u_seq[t])))
            tmp = np.matmul(f_u_t.T, self.v_xx[t + 1])
            q_uu = self.l_uu(x_seq[t], u_seq[t]) + np.matmul(tmp, f_u_t) + \
              np.dot(self.v_x[t + 1], np.squeeze(self.f_uu(x_seq[t], u_seq[t])))
            q_ux = self.l_ux(x_seq[t], u_seq[t]) + np.matmul(tmp, f_x_t) + \
              np.dot(self.v_x[t + 1], np.squeeze(self.f_ux(x_seq[t], u_seq[t])))
            inv_q_uu = np.linalg.inv(q_uu)
            k = -np.matmul(inv_q_uu, q_u)
            kk = -np.matmul(inv_q_uu, q_ux)
            dv = 0.5 * np.matmul(q_u, k)
            self.v[t] += dv
            self.v_x[t] = q_x - np.matmul(np.matmul(q_u, inv_q_uu), q_ux)
            self.v_xx[t] = q_xx + np.matmul(q_ux.T, kk)
            k_seq.append(k)
            kk_seq.append(kk)
        k_seq.reverse()
        kk_seq.reverse()
        return k_seq, kk_seq 
Example #4
Source File: ddp_gym.py    From ddp-gym with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def forward(self, x_seq, u_seq, k_seq, kk_seq):
        x_seq_hat = np.array(x_seq)
        u_seq_hat = np.array(u_seq)
        for t in range(len(u_seq)):
            control = k_seq[t] + np.matmul(kk_seq[t], (x_seq_hat[t] - x_seq[t]))
            u_seq_hat[t] = np.clip(u_seq[t] + control, -self.umax, self.umax)
            x_seq_hat[t + 1] = self.f(x_seq_hat[t], u_seq_hat[t])
        return x_seq_hat, u_seq_hat 
Example #5
Source File: methods.py    From tf-quant-finance with Apache License 2.0 5 votes vote down vote up
def taylor_approx(target, stencil, values):
  """Use taylor series to approximate up to second order derivatives.

  Args:
    target: An array of shape (..., n), a batch of n-dimensional points
      where one wants to approximate function value and derivatives.
    stencil: An array of shape broadcastable to (..., k, n), for each target
      point a set of k = triangle(n + 1) points to use on its approximation.
    values: An array of shape broadcastable to (..., k), the function value at
      each of the stencil points.

  Returns:
    An array of shape (..., k), for each target point the approximated
    function value, gradient and hessian evaluated at that point (flattened
    and in the same order as returned by derivative_names).
  """
  # Broadcast arrays to their required shape.
  batch_shape, ndim = target.shape[:-1], target.shape[-1]
  stencil = np.broadcast_to(stencil, batch_shape + (triangular(ndim + 1), ndim))
  values = np.broadcast_to(values, stencil.shape[:-1])

  # Subtract target from each stencil point.
  delta_x = stencil - np.expand_dims(target, axis=-2)
  delta_xy = np.matmul(
      np.expand_dims(delta_x, axis=-1), np.expand_dims(delta_x, axis=-2))
  i = np.arange(ndim)
  j, k = np.triu_indices(ndim, k=1)

  # Build coefficients for the Taylor series equations, namely:
  #   f(stencil) = coeffs @ [f(target), df/d0(target), ...]
  coeffs = np.concatenate([
      np.ones(delta_x.shape[:-1] + (1,)),  # f(target)
      delta_x,  # df/di(target)
      delta_xy[..., i, i] / 2,  # d^2f/di^2(target)
      delta_xy[..., j, k],  # d^2f/{dj dk}(target)
  ], axis=-1)

  # Then: [f(target), df/d0(target), ...] = coeffs^{-1} @ f(stencil)
  return np.squeeze(
      np.matmul(np.linalg.inv(coeffs), values[..., np.newaxis]), axis=-1) 
Example #6
Source File: multivariate_normal.py    From autograd with MIT License 5 votes vote down vote up
def generalized_outer_product(x):
    if np.ndim(x) == 1:
        return np.outer(x, x)
    return np.matmul(x, np.swapaxes(x, -1, -2)) 
Example #7
Source File: multivariate_normal.py    From autograd with MIT License 5 votes vote down vote up
def covgrad(x, mean, cov, allow_singular=False):
    if allow_singular:
        raise NotImplementedError("The multivariate normal pdf is not "
                "differentiable w.r.t. a singular covariance matix")
    J = np.linalg.inv(cov)
    solved = np.matmul(J, np.expand_dims(x - mean, -1))
    return 1./2 * (generalized_outer_product(solved) - J) 
Example #8
Source File: test_systematic.py    From autograd with MIT License 5 votes vote down vote up
def test_matmul(): combo_check(np.matmul, [0, 1])(
                               [R(3), R(2, 3), R(2, 2, 3), C(3), C(2, 3)],
                               [R(3), R(3, 4), R(2, 3, 4), C(3), C(3, 4)]) 
Example #9
Source File: test_systematic.py    From autograd with MIT License 5 votes vote down vote up
def test_matmul_broadcast(): combo_check(np.matmul, [0, 1])([R(1, 2, 2)], [R(3, 2, 1)]) 
Example #10
Source File: VariationalAutoencoders.py    From DeepLearningTutorial with MIT License 5 votes vote down vote up
def neural_net(self, X, Q, hyp):
        H = X
        idx_3 = 0
        layers = Q.shape[0]   
        for layer in range(0,layers-2):        
            idx_1 = idx_3
            idx_2 = idx_1 + Q[layer]*Q[layer+1]
            idx_3 = idx_2 + Q[layer+1]
            A = np.reshape(hyp[idx_1:idx_2], (Q[layer],Q[layer+1]))
            b = np.reshape(hyp[idx_2:idx_3], (1,Q[layer+1]))
            H = activation(np.matmul(H,A) + b)
            
        idx_1 = idx_3
        idx_2 = idx_1 + Q[-2]*Q[-1]
        idx_3 = idx_2 + Q[-1]
        A = np.reshape(hyp[idx_1:idx_2], (Q[-2],Q[-1]))
        b = np.reshape(hyp[idx_2:idx_3], (1,Q[-1]))
        mu = np.matmul(H,A) + b

        idx_1 = idx_3
        idx_2 = idx_1 + Q[-2]*Q[-1]
        idx_3 = idx_2 + Q[-1]
        A = np.reshape(hyp[idx_1:idx_2], (Q[-2],Q[-1]))
        b = np.reshape(hyp[idx_2:idx_3], (1,Q[-1]))
        Sigma = np.exp(np.matmul(H,A) + b)
        
        return mu, Sigma 
Example #11
Source File: ConditionalVariationalAutoencoders.py    From DeepLearningTutorial with MIT License 5 votes vote down vote up
def neural_net(self, X, Q, hyp):
        H = X
        idx_3 = 0
        layers = Q.shape[0]   
        for layer in range(0,layers-2):        
            idx_1 = idx_3
            idx_2 = idx_1 + Q[layer]*Q[layer+1]
            idx_3 = idx_2 + Q[layer+1]
            A = np.reshape(hyp[idx_1:idx_2], (Q[layer],Q[layer+1]))
            b = np.reshape(hyp[idx_2:idx_3], (1,Q[layer+1]))
            H = activation(np.matmul(H,A) + b)
            
        idx_1 = idx_3
        idx_2 = idx_1 + Q[-2]*Q[-1]
        idx_3 = idx_2 + Q[-1]
        A = np.reshape(hyp[idx_1:idx_2], (Q[-2],Q[-1]))
        b = np.reshape(hyp[idx_2:idx_3], (1,Q[-1]))
        mu = np.matmul(H,A) + b

        idx_1 = idx_3
        idx_2 = idx_1 + Q[-2]*Q[-1]
        idx_3 = idx_2 + Q[-1]
        A = np.reshape(hyp[idx_1:idx_2], (Q[-2],Q[-1]))
        b = np.reshape(hyp[idx_2:idx_3], (1,Q[-1]))
        Sigma = np.exp(np.matmul(H,A) + b)
        
        return mu, Sigma 
Example #12
Source File: parametric_GP.py    From ParametricGP with MIT License 4 votes vote down vote up
def likelihood(self, hyp): 
        M = self.M 
        Z = self.Z
        m = self.m
        S = self.S
        X_batch = self.X_batch
        y_batch = self.y_batch
        jitter = self.jitter 
        jitter_cov = self.jitter_cov
        N = X_batch.shape[0]
        
        
        logsigma_n = hyp[-1]
        sigma_n = np.exp(logsigma_n)
        
        # Compute K_u_inv
        K_u = kernel(Z, Z, hyp[:-1])    
        # K_u_inv = np.linalg.solve(K_u + np.eye(M)*jitter_cov, np.eye(M))
        L = np.linalg.cholesky(K_u + np.eye(M)*jitter_cov)    
        K_u_inv = np.linalg.solve(L.T, np.linalg.solve(L,np.eye(M)))
        
        self.K_u_inv = K_u_inv
          
        # Compute mu
        psi = kernel(Z, X_batch, hyp[:-1])    
        K_u_inv_m = np.matmul(K_u_inv,m)   
        MU = np.matmul(psi.T,K_u_inv_m)
        
        # Compute cov
        Alpha = np.matmul(K_u_inv,psi)
        COV = kernel(X_batch, X_batch, hyp[:-1]) - np.matmul(psi.T, np.matmul(K_u_inv,psi)) + \
                np.matmul(Alpha.T, np.matmul(S,Alpha))
        
        COV_inv = np.linalg.solve(COV  + np.eye(N)*sigma_n + np.eye(N)*jitter, np.eye(N))
        # L = np.linalg.cholesky(COV  + np.eye(N)*sigma_n + np.eye(N)*jitter) 
        # COV_inv = np.linalg.solve(np.transpose(L), np.linalg.solve(L,np.eye(N)))
        
        # Compute cov(Z, X)
        cov_ZX = np.matmul(S,Alpha)
        
        # Update m and S
        alpha = np.matmul(COV_inv, cov_ZX.T)
        m = m + np.matmul(cov_ZX, np.matmul(COV_inv, y_batch-MU))    
        S = S - np.matmul(cov_ZX, alpha)
        
        self.m = m
        self.S = S
        
        # Compute NLML                
        K_u_inv_m = np.matmul(K_u_inv,m)
        NLML = 0.5*np.matmul(m.T,K_u_inv_m) + np.sum(np.log(np.diag(L))) + 0.5*np.log(2.*np.pi)*M
        
        return NLML[0,0] 
Example #13
Source File: parametric_GP.py    From ParametricGP with MIT License 4 votes vote down vote up
def predict(self, X_star):
        Z = self.Z
        m = self.m.value
        S = self.S.value
        hyp = self.hyp
        K_u_inv = self.K_u_inv
        
        N_star = X_star.shape[0]
        partitions_size = 10000
        (number_of_partitions, remainder_partition) = divmod(N_star, partitions_size)
        
        mean_star = np.zeros((N_star,1));
        var_star = np.zeros((N_star,1));
        
        for partition in range(0,number_of_partitions):
            print("Predicting partition: %d" % (partition))
            idx_1 = partition*partitions_size
            idx_2 = (partition+1)*partitions_size
            
            # Compute mu
            psi = kernel(Z, X_star[idx_1:idx_2,:], hyp[:-1])    
            K_u_inv_m = np.matmul(K_u_inv,m)   
            mu = np.matmul(psi.T,K_u_inv_m)
            
            mean_star[idx_1:idx_2,0:1] = mu;        
        
            # Compute cov  
            Alpha = np.matmul(K_u_inv,psi)
            cov = kernel(X_star[idx_1:idx_2,:], X_star[idx_1:idx_2,:], hyp[:-1]) - \
                    np.matmul(psi.T, np.matmul(K_u_inv,psi)) + np.matmul(Alpha.T, np.matmul(S,Alpha))
            var = np.abs(np.diag(cov))# + np.exp(hyp[-1])
            
            var_star[idx_1:idx_2,0] = var
    
        print("Predicting the last partition")
        idx_1 = number_of_partitions*partitions_size
        idx_2 = number_of_partitions*partitions_size + remainder_partition
        
        # Compute mu
        psi = kernel(Z, X_star[idx_1:idx_2,:], hyp[:-1])    
        K_u_inv_m = np.matmul(K_u_inv,m)   
        mu = np.matmul(psi.T,K_u_inv_m)
        
        mean_star[idx_1:idx_2,0:1] = mu;        
    
        # Compute cov  
        Alpha = np.matmul(K_u_inv,psi)
        cov = kernel(X_star[idx_1:idx_2,:], X_star[idx_1:idx_2,:], hyp[:-1]) - \
                np.matmul(psi.T, np.matmul(K_u_inv,psi)) + np.matmul(Alpha.T, np.matmul(S,Alpha))
        var = np.abs(np.diag(cov))# + np.exp(hyp[-1])
        
        var_star[idx_1:idx_2,0] = var
        
        
        return mean_star, var_star