Python tensorflow.python.ops.linalg_ops.cholesky_solve() Examples
The following are 17
code examples of tensorflow.python.ops.linalg_ops.cholesky_solve().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.python.ops.linalg_ops
, or try the search function
.
Example #1
Source File: linear_operator_matrix.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def _solve(self, rhs, adjoint=False): if self._is_spd: return linalg_ops.cholesky_solve(self._chol, rhs) return linalg_ops.matrix_solve(self._matrix, rhs, adjoint=adjoint)
Example #2
Source File: linear_operator_matrix.py From keras-lambda with MIT License | 5 votes |
def _solve(self, rhs, adjoint=False): if self._is_spd: return linalg_ops.cholesky_solve(self._chol, rhs) return linalg_ops.matrix_solve(self._matrix, rhs, adjoint=adjoint)
Example #3
Source File: operator_pd_vdvt_update.py From keras-lambda with MIT License | 5 votes |
def _batch_sqrt_solve(self, rhs): # Recall the square root of this operator is M + VDV^T. # The Woodbury formula gives: # (M + VDV^T)^{-1} # = M^{-1} - M^{-1} V (D^{-1} + V^T M^{-1} V)^{-1} V^T M^{-1} # = M^{-1} - M^{-1} V C^{-1} V^T M^{-1} # where C is the capacitance matrix. m = self._operator v = self._v cchol = self._chol_capacitance(batch_mode=True) # The operators will use batch/singleton mode automatically. We don't # override. # M^{-1} rhs minv_rhs = m.solve(rhs) # V^T M^{-1} rhs vt_minv_rhs = math_ops.matmul(v, minv_rhs, adjoint_a=True) # C^{-1} V^T M^{-1} rhs cinv_vt_minv_rhs = linalg_ops.cholesky_solve(cchol, vt_minv_rhs) # V C^{-1} V^T M^{-1} rhs v_cinv_vt_minv_rhs = math_ops.matmul(v, cinv_vt_minv_rhs) # M^{-1} V C^{-1} V^T M^{-1} rhs minv_v_cinv_vt_minv_rhs = m.solve(v_cinv_vt_minv_rhs) # M^{-1} - M^{-1} V C^{-1} V^T M^{-1} return minv_rhs - minv_v_cinv_vt_minv_rhs
Example #4
Source File: operator_pd_vdvt_update.py From keras-lambda with MIT License | 5 votes |
def _sqrt_solve(self, rhs): # Recall the square root of this operator is M + VDV^T. # The Woodbury formula gives: # (M + VDV^T)^{-1} # = M^{-1} - M^{-1} V (D^{-1} + V^T M^{-1} V)^{-1} V^T M^{-1} # = M^{-1} - M^{-1} V C^{-1} V^T M^{-1} # where C is the capacitance matrix. # TODO(jvdillon) Determine if recursively applying rank-1 updates is more # efficient. May not be possible because a general n x n matrix can be # represeneted as n rank-1 updates, and solving with this matrix is always # done in O(n^3) time. m = self._operator v = self._v cchol = self._chol_capacitance(batch_mode=False) # The operators will use batch/singleton mode automatically. We don't # override. # M^{-1} rhs minv_rhs = m.solve(rhs) # V^T M^{-1} rhs vt_minv_rhs = math_ops.matmul(v, minv_rhs, transpose_a=True) # C^{-1} V^T M^{-1} rhs cinv_vt_minv_rhs = linalg_ops.cholesky_solve(cchol, vt_minv_rhs) # V C^{-1} V^T M^{-1} rhs v_cinv_vt_minv_rhs = math_ops.matmul(v, cinv_vt_minv_rhs) # M^{-1} V C^{-1} V^T M^{-1} rhs minv_v_cinv_vt_minv_rhs = m.solve(v_cinv_vt_minv_rhs) # M^{-1} - M^{-1} V C^{-1} V^T M^{-1} return minv_rhs - minv_v_cinv_vt_minv_rhs
Example #5
Source File: operator_pd_cholesky.py From keras-lambda with MIT License | 5 votes |
def _batch_solve(self, rhs): return linalg_ops.cholesky_solve(self._chol, rhs)
Example #6
Source File: utils.py From noisy-K-FAC with Apache License 2.0 | 5 votes |
def posdef_inv_cholesky(tensor, identity, damping): """Computes inverse(tensor + damping * identity) with Cholesky.""" chol = linalg_ops.cholesky(tensor + damping * identity) return linalg_ops.cholesky_solve(chol, identity)
Example #7
Source File: operator_pd_vdvt_update.py From deep_image_model with Apache License 2.0 | 5 votes |
def _batch_sqrt_solve(self, rhs): # Recall the square root of this operator is M + VDV^T. # The Woodbury formula gives: # (M + VDV^T)^{-1} # = M^{-1} - M^{-1} V (D^{-1} + V^T M^{-1} V)^{-1} V^T M^{-1} # = M^{-1} - M^{-1} V C^{-1} V^T M^{-1} # where C is the capacitance matrix. m = self._operator v = self._v cchol = self._chol_capacitance(batch_mode=True) # The operators will use batch/singleton mode automatically. We don't # override. # M^{-1} rhs minv_rhs = m.solve(rhs) # V^T M^{-1} rhs vt_minv_rhs = math_ops.batch_matmul(v, minv_rhs, adj_x=True) # C^{-1} V^T M^{-1} rhs cinv_vt_minv_rhs = linalg_ops.cholesky_solve(cchol, vt_minv_rhs) # V C^{-1} V^T M^{-1} rhs v_cinv_vt_minv_rhs = math_ops.batch_matmul(v, cinv_vt_minv_rhs) # M^{-1} V C^{-1} V^T M^{-1} rhs minv_v_cinv_vt_minv_rhs = m.solve(v_cinv_vt_minv_rhs) # M^{-1} - M^{-1} V C^{-1} V^T M^{-1} return minv_rhs - minv_v_cinv_vt_minv_rhs
Example #8
Source File: operator_pd_vdvt_update.py From deep_image_model with Apache License 2.0 | 5 votes |
def _sqrt_solve(self, rhs): # Recall the square root of this operator is M + VDV^T. # The Woodbury formula gives: # (M + VDV^T)^{-1} # = M^{-1} - M^{-1} V (D^{-1} + V^T M^{-1} V)^{-1} V^T M^{-1} # = M^{-1} - M^{-1} V C^{-1} V^T M^{-1} # where C is the capacitance matrix. # TODO(jvdillon) Determine if recursively applying rank-1 updates is more # efficient. May not be possible because a general n x n matrix can be # represeneted as n rank-1 updates, and solving with this matrix is always # done in O(n^3) time. m = self._operator v = self._v cchol = self._chol_capacitance(batch_mode=False) # The operators will use batch/singleton mode automatically. We don't # override. # M^{-1} rhs minv_rhs = m.solve(rhs) # V^T M^{-1} rhs vt_minv_rhs = math_ops.matmul(v, minv_rhs, transpose_a=True) # C^{-1} V^T M^{-1} rhs cinv_vt_minv_rhs = linalg_ops.cholesky_solve(cchol, vt_minv_rhs) # V C^{-1} V^T M^{-1} rhs v_cinv_vt_minv_rhs = math_ops.matmul(v, cinv_vt_minv_rhs) # M^{-1} V C^{-1} V^T M^{-1} rhs minv_v_cinv_vt_minv_rhs = m.solve(v_cinv_vt_minv_rhs) # M^{-1} - M^{-1} V C^{-1} V^T M^{-1} return minv_rhs - minv_v_cinv_vt_minv_rhs
Example #9
Source File: operator_pd_cholesky.py From deep_image_model with Apache License 2.0 | 5 votes |
def _batch_solve(self, rhs): return linalg_ops.cholesky_solve(self._chol, rhs)
Example #10
Source File: operator_pd_cholesky.py From lambda-packs with MIT License | 5 votes |
def _batch_solve(self, rhs): return linalg_ops.cholesky_solve(self._chol, rhs)
Example #11
Source File: operator_pd_vdvt_update.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def _batch_sqrt_solve(self, rhs): # Recall the square root of this operator is M + VDV^T. # The Woodbury formula gives: # (M + VDV^T)^{-1} # = M^{-1} - M^{-1} V (D^{-1} + V^T M^{-1} V)^{-1} V^T M^{-1} # = M^{-1} - M^{-1} V C^{-1} V^T M^{-1} # where C is the capacitance matrix. m = self._operator v = self._v cchol = self._chol_capacitance(batch_mode=True) # The operators will use batch/singleton mode automatically. We don't # override. # M^{-1} rhs minv_rhs = m.solve(rhs) # V^T M^{-1} rhs vt_minv_rhs = math_ops.matmul(v, minv_rhs, adjoint_a=True) # C^{-1} V^T M^{-1} rhs cinv_vt_minv_rhs = linalg_ops.cholesky_solve(cchol, vt_minv_rhs) # V C^{-1} V^T M^{-1} rhs v_cinv_vt_minv_rhs = math_ops.matmul(v, cinv_vt_minv_rhs) # M^{-1} V C^{-1} V^T M^{-1} rhs minv_v_cinv_vt_minv_rhs = m.solve(v_cinv_vt_minv_rhs) # M^{-1} - M^{-1} V C^{-1} V^T M^{-1} return minv_rhs - minv_v_cinv_vt_minv_rhs
Example #12
Source File: operator_pd_vdvt_update.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def _sqrt_solve(self, rhs): # Recall the square root of this operator is M + VDV^T. # The Woodbury formula gives: # (M + VDV^T)^{-1} # = M^{-1} - M^{-1} V (D^{-1} + V^T M^{-1} V)^{-1} V^T M^{-1} # = M^{-1} - M^{-1} V C^{-1} V^T M^{-1} # where C is the capacitance matrix. # TODO(jvdillon) Determine if recursively applying rank-1 updates is more # efficient. May not be possible because a general n x n matrix can be # represeneted as n rank-1 updates, and solving with this matrix is always # done in O(n^3) time. m = self._operator v = self._v cchol = self._chol_capacitance(batch_mode=False) # The operators will use batch/singleton mode automatically. We don't # override. # M^{-1} rhs minv_rhs = m.solve(rhs) # V^T M^{-1} rhs vt_minv_rhs = math_ops.matmul(v, minv_rhs, transpose_a=True) # C^{-1} V^T M^{-1} rhs cinv_vt_minv_rhs = linalg_ops.cholesky_solve(cchol, vt_minv_rhs) # V C^{-1} V^T M^{-1} rhs v_cinv_vt_minv_rhs = math_ops.matmul(v, cinv_vt_minv_rhs) # M^{-1} V C^{-1} V^T M^{-1} rhs minv_v_cinv_vt_minv_rhs = m.solve(v_cinv_vt_minv_rhs) # M^{-1} - M^{-1} V C^{-1} V^T M^{-1} return minv_rhs - minv_v_cinv_vt_minv_rhs
Example #13
Source File: operator_pd_cholesky.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def _batch_solve(self, rhs): return linalg_ops.cholesky_solve(self._chol, rhs)
Example #14
Source File: linear_operator_udvh_update.py From lambda-packs with MIT License | 5 votes |
def _solve(self, rhs, adjoint=False, adjoint_arg=False): if self.base_operator.is_non_singular is False: raise ValueError( "Solve not implemented unless this is a perturbation of a " "non-singular LinearOperator.") # The Woodbury formula gives: # https://en.wikipedia.org/wiki/Woodbury_matrix_identity # (L + UDV^H)^{-1} # = L^{-1} - L^{-1} U (D^{-1} + V^H L^{-1} U)^{-1} V^H L^{-1} # = L^{-1} - L^{-1} U C^{-1} V^H L^{-1} # where C is the capacitance matrix, C := D^{-1} + V^H L^{-1} U # Note also that, with ^{-H} being the inverse of the adjoint, # (L + UDV^H)^{-H} # = L^{-H} - L^{-H} V C^{-H} U^H L^{-H} l = self.base_operator if adjoint: v = self.u u = self.v else: v = self.v u = self.u # L^{-1} rhs linv_rhs = l.solve(rhs, adjoint=adjoint, adjoint_arg=adjoint_arg) # V^H L^{-1} rhs vh_linv_rhs = math_ops.matmul(v, linv_rhs, adjoint_a=True) # C^{-1} V^H L^{-1} rhs if self._use_cholesky: capinv_vh_linv_rhs = linalg_ops.cholesky_solve( self._chol_capacitance, vh_linv_rhs) else: capinv_vh_linv_rhs = linalg_ops.matrix_solve( self._capacitance, vh_linv_rhs, adjoint=adjoint) # U C^{-1} V^H M^{-1} rhs u_capinv_vh_linv_rhs = math_ops.matmul(u, capinv_vh_linv_rhs) # L^{-1} U C^{-1} V^H L^{-1} rhs linv_u_capinv_vh_linv_rhs = l.solve(u_capinv_vh_linv_rhs, adjoint=adjoint) # L^{-1} - L^{-1} U C^{-1} V^H L^{-1} return linv_rhs - linv_u_capinv_vh_linv_rhs
Example #15
Source File: linear_operator.py From lambda-packs with MIT License | 5 votes |
def _solve(self, rhs, adjoint=False, adjoint_arg=False): """Default implementation of _solve.""" if self.is_square is False: raise NotImplementedError( "Solve is not yet implemented for non-square operators.") logging.warn( "Using (possibly slow) default implementation of solve." " Requires conversion to a dense matrix and O(N^3) operations.") rhs = linear_operator_util.matrix_adjoint(rhs) if adjoint_arg else rhs if self._can_use_cholesky(): return linalg_ops.cholesky_solve(self._get_cached_chol(), rhs) return linalg_ops.matrix_solve( self._get_cached_dense_matrix(), rhs, adjoint=adjoint)
Example #16
Source File: operator_pd_vdvt_update.py From lambda-packs with MIT License | 5 votes |
def _batch_sqrt_solve(self, rhs): # Recall the square root of this operator is M + VDV^T. # The Woodbury formula gives: # (M + VDV^T)^{-1} # = M^{-1} - M^{-1} V (D^{-1} + V^T M^{-1} V)^{-1} V^T M^{-1} # = M^{-1} - M^{-1} V C^{-1} V^T M^{-1} # where C is the capacitance matrix. m = self._operator v = self._v cchol = self._chol_capacitance(batch_mode=True) # The operators will use batch/singleton mode automatically. We don't # override. # M^{-1} rhs minv_rhs = m.solve(rhs) # V^T M^{-1} rhs vt_minv_rhs = math_ops.matmul(v, minv_rhs, adjoint_a=True) # C^{-1} V^T M^{-1} rhs cinv_vt_minv_rhs = linalg_ops.cholesky_solve(cchol, vt_minv_rhs) # V C^{-1} V^T M^{-1} rhs v_cinv_vt_minv_rhs = math_ops.matmul(v, cinv_vt_minv_rhs) # M^{-1} V C^{-1} V^T M^{-1} rhs minv_v_cinv_vt_minv_rhs = m.solve(v_cinv_vt_minv_rhs) # M^{-1} - M^{-1} V C^{-1} V^T M^{-1} return minv_rhs - minv_v_cinv_vt_minv_rhs
Example #17
Source File: operator_pd_vdvt_update.py From lambda-packs with MIT License | 5 votes |
def _sqrt_solve(self, rhs): # Recall the square root of this operator is M + VDV^T. # The Woodbury formula gives: # (M + VDV^T)^{-1} # = M^{-1} - M^{-1} V (D^{-1} + V^T M^{-1} V)^{-1} V^T M^{-1} # = M^{-1} - M^{-1} V C^{-1} V^T M^{-1} # where C is the capacitance matrix. # TODO(jvdillon) Determine if recursively applying rank-1 updates is more # efficient. May not be possible because a general n x n matrix can be # represeneted as n rank-1 updates, and solving with this matrix is always # done in O(n^3) time. m = self._operator v = self._v cchol = self._chol_capacitance(batch_mode=False) # The operators will use batch/singleton mode automatically. We don't # override. # M^{-1} rhs minv_rhs = m.solve(rhs) # V^T M^{-1} rhs vt_minv_rhs = math_ops.matmul(v, minv_rhs, transpose_a=True) # C^{-1} V^T M^{-1} rhs cinv_vt_minv_rhs = linalg_ops.cholesky_solve(cchol, vt_minv_rhs) # V C^{-1} V^T M^{-1} rhs v_cinv_vt_minv_rhs = math_ops.matmul(v, cinv_vt_minv_rhs) # M^{-1} V C^{-1} V^T M^{-1} rhs minv_v_cinv_vt_minv_rhs = m.solve(v_cinv_vt_minv_rhs) # M^{-1} - M^{-1} V C^{-1} V^T M^{-1} return minv_rhs - minv_v_cinv_vt_minv_rhs