Python theano.tensor.add() Examples
The following are 30
code examples of theano.tensor.add().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
theano.tensor
, or try the search function
.
Example #1
Source File: core.py From modular_rl with MIT License | 6 votes |
def __init__(self, net, mixfrac=1.0, maxiter=25): EzPickle.__init__(self, net, mixfrac, maxiter) self.net = net self.mixfrac = mixfrac x_nx = net.input self.predict = theano.function([x_nx], net.output, **FNOPTS) ypred_ny = net.output ytarg_ny = T.matrix("ytarg") var_list = net.trainable_weights l2 = 1e-3 * T.add(*[T.square(v).sum() for v in var_list]) N = x_nx.shape[0] mse = T.sum(T.square(ytarg_ny - ypred_ny))/N symb_args = [x_nx, ytarg_ny] loss = mse + l2 self.opt = LbfgsOptimizer(loss, var_list, symb_args, maxiter=maxiter, extra_losses={"mse":mse, "l2":l2})
Example #2
Source File: rbm.py From TextDetector with GNU General Public License v3.0 | 6 votes |
def input_to_h_from_v(self, v): """ .. todo:: WRITEME """ D = self.Lambda alpha = self.alpha def sum_s(x): return x.reshape(( -1, self.nhid, self.n_s_per_h)).sum(axis=2) return tensor.add( self.b, -0.5 * tensor.dot(v * v, D), sum_s(self.mu * tensor.dot(v, self.W)), sum_s(0.5 * tensor.sqr(tensor.dot(v, self.W)) / alpha)) #def mean_h_given_v(self, v): # inherited version is OK: # return nnet.sigmoid(self.input_to_h_from_v(v))
Example #3
Source File: rbm.py From TextDetector with GNU General Public License v3.0 | 6 votes |
def free_energy_given_v(self, v): """ .. todo:: WRITEME """ sigmoid_arg = self.input_to_h_from_v(v) return tensor.add( 0.5 * (self.B * (v ** 2)).sum(axis=1), -tensor.nnet.softplus(sigmoid_arg).sum(axis=1)) #def __call__(self, v): # inherited version is OK #def reconstruction_error: # inherited version should be OK #def params(self): # inherited version is OK.
Example #4
Source File: rbm_adv.py From SteinGAN with MIT License | 6 votes |
def rbf_kernel(X): XY = T.dot(X, X.T) x2 = T.sum(X**2, axis=1).dimshuffle(0, 'x') X2e = T.repeat(x2, X.shape[0], axis=1) H = X2e + X2e.T - 2. * XY V = H.flatten() # median distance h = T.switch(T.eq((V.shape[0] % 2), 0), # if even vector T.mean(T.sort(V)[ ((V.shape[0] // 2) - 1) : ((V.shape[0] // 2) + 1) ]), # if odd vector T.sort(V)[V.shape[0] // 2]) h = T.sqrt(.5 * h / T.log(H.shape[0].astype('float32') + 1.)) # compute the rbf kernel kxy = T.exp(-H / (h ** 2) / 2.0) dxkxy = -T.dot(kxy, X) sumkxy = T.sum(kxy, axis=1).dimshuffle(0, 'x') dxkxy = T.add(dxkxy, T.mul(X, sumkxy)) / (h ** 2) return kxy, dxkxy
Example #5
Source File: output_layer.py From recnet with MIT License | 6 votes |
def sequence_iteration(self, output, mask,use_dropout=0,dropout_value=0.5): dot_product = T.dot(output , self.t_w_out) net_o = T.add( dot_product , self.t_b_out ) ex_net = T.exp(net_o) sum_net = T.sum(ex_net, axis=2, keepdims=True) softmax_o = ex_net / sum_net mask = T.addbroadcast(mask, 2) # to do nesseccary? output = T.mul(mask, softmax_o) + T.mul( (1. - mask) , 1e-6 ) return output #result ###### Linear Layer ########################################
Example #6
Source File: additionlayer.py From theanolm with Apache License 2.0 | 6 votes |
def create_structure(self): """Creates the symbolic graph of this layer. Sets self.output to a symbolic matrix that describes the output of this layer. If the inputs are the same size as the output, the output will be the elementwise sum of the inputs. If needed, the inputs will be projected to the same size. """ for input_index, input_layer in enumerate(self._input_layers): input_size = input_layer.output_size if input_size == self.output_size: input_matrix = input_layer.output else: input_matrix = self._tensor_preact(input_layer.output, 'input{}'.format(input_index), use_bias=False) if self.output is None: self.output = input_matrix else: self.output = tensor.add(self.output, input_matrix)
Example #7
Source File: net_theano.py From visual_dynamics with MIT License | 6 votes |
def build_bilinear_net(input_shapes, X_var=None, U_var=None, X_diff_var=None, axis=1): x_shape, u_shape = input_shapes X_var = X_var or T.tensor4('X') U_var = U_var or T.matrix('U') X_diff_var = X_diff_var or T.tensor4('X_diff') X_next_var = X_var + X_diff_var l_x = L.InputLayer(shape=(None,) + x_shape, input_var=X_var) l_u = L.InputLayer(shape=(None,) + u_shape, input_var=U_var) l_x_diff_pred = LT.BilinearLayer([l_x, l_u], axis=axis) l_x_next_pred = L.ElemwiseMergeLayer([l_x, l_x_diff_pred], T.add) l_y = L.flatten(l_x) l_y_diff_pred = L.flatten(l_x_diff_pred) X_next_pred_var = lasagne.layers.get_output(l_x_next_pred) loss = ((X_next_var - X_next_pred_var) ** 2).mean(axis=0).sum() / 2. net_name = 'BilinearNet' input_vars = OrderedDict([(var.name, var) for var in [X_var, U_var, X_diff_var]]) pred_layers = OrderedDict([('y_diff_pred', l_y_diff_pred), ('y', l_y), ('x0_next_pred', l_x_next_pred)]) return net_name, input_vars, pred_layers, loss
Example #8
Source File: recurrent_layer.py From recnet with MIT License | 6 votes |
def sequence_iteration(self, in_seq, mask, use_dropout,dropout_value=1): in_seq_d = T.switch(use_dropout, (in_seq * self.trng.binomial(in_seq.shape, p=dropout_value, n=1, dtype=in_seq.dtype)), in_seq) rz_in_seq = T.add( T.dot(in_seq_d, self.weights[0]) , self.weights[1] ) out_seq, updates = theano.scan( fn=self.t_forward_step, sequences=[mask, rz_in_seq], # in_seq_d], outputs_info=[self.t_ol_t00], non_sequences=[i for i in self.weights][2:] + [self.t_n_out], go_backwards = self.go_backwards, truncate_gradient=-1, #n_steps=50, strict=True, allow_gc=False, ) return out_seq
Example #9
Source File: update_function.py From recnet with MIT License | 6 votes |
def fit(self, weights, o_error, tpo ): gradients = T.grad(o_error ,weights) updates = [] for c, v, w, g in zip(self.t_cache, self.t_velocity, weights,gradients): new_velocity = T.sub( T.mul(tpo["momentum_rate"], v) , T.mul(tpo["learn_rate"], g) ) new_cache = T.add( T.mul(tpo["decay_rate"] , c) , T.mul(T.sub( 1, tpo["decay_rate"]) , T.sqr(g))) new_weights = T.sub(T.add(w , new_velocity) , T.true_div( T.mul(g,tpo["learn_rate"]) , T.sqrt(T.add(new_cache,0.1**8)))) updates.append((w, new_weights)) updates.append((v, new_velocity)) updates.append((c, new_cache)) return updates ###### Nesterov momentum ########################################
Example #10
Source File: recurrent_layer.py From recnet with MIT License | 5 votes |
def sequence_iteration(self, in_seq, mask, use_dropout, dropout_value=1): in_seq_d = T.switch(use_dropout, (in_seq * self.trng.binomial(in_seq.shape, p=dropout_value, n=1, dtype=in_seq.dtype)), in_seq) w_in_seq = T.add(T.dot(in_seq_d, self.weights[5]), self.weights[6]) [out_seq, cell_seq], updates = theano.scan( fn=self.t_forward_step, sequences=[mask, w_in_seq], outputs_info=[self.t_ol_t00, self.t_cs_t00], non_sequences=self.weights[:5] + [self.t_n_out], go_backwards=self.go_backwards, truncate_gradient=-1, # n_steps=50, strict=True, allow_gc=False, ) return out_seq ###### LSTM without peepholes Layer ########################################
Example #11
Source File: recurrent_layer.py From recnet with MIT License | 5 votes |
def t_forward_step(self, mask, cur_w_in_sig, pre_out_sig, pre_cell_sig, w_ig_c, w_fg_c, w_og_c, w_ifco, b_ifco, t_n_out): ifco = T.add(T.dot(pre_out_sig, w_ifco), b_ifco) inner_act = self.activation gate_act = self.sigmoid() # Input Gate ig_t1 = gate_act(T.add(ifco[:, 0:t_n_out], T.mul(pre_cell_sig, w_ig_c), cur_w_in_sig[:, 0:t_n_out])) # Forget Gate fg_t1 = gate_act(T.add(ifco[:, 1 * t_n_out:2 * t_n_out], T.mul(pre_cell_sig, w_fg_c), cur_w_in_sig[:, 1 * t_n_out:2 * t_n_out])) # Cell State cs_t1 = T.add(T.mul(fg_t1, pre_cell_sig), T.mul(ig_t1, inner_act( T.add(ifco[:, 2 * t_n_out:3 * t_n_out], cur_w_in_sig[:, 2 * t_n_out:3 * t_n_out])))) mask = T.addbroadcast(mask, 1) cs_t1 = mask * cs_t1 + (1. - mask) * pre_cell_sig # functionality: cs_t1 = T.switch(mask , cs_t1, pre_cell_sig) # Output Gate og_t1 = gate_act( T.add(ifco[:, 3 * t_n_out:4 * t_n_out], T.mul(cs_t1, w_og_c), cur_w_in_sig[:, 3 * t_n_out:4 * t_n_out])) # Output LSTM out_sig = T.mul(og_t1, inner_act(cs_t1)) out_sig = mask * out_sig + (1. - mask) * pre_out_sig return [out_sig, cs_t1]
Example #12
Source File: recurrent_layer.py From recnet with MIT License | 5 votes |
def t_forward_step(self, mask, cur_w_in_sig, pre_out_sig, w_hidden_hidden, b_act): pre_w_sig = T.dot(pre_out_sig, w_hidden_hidden) inner_act = self.activation out_sig = inner_act(T.add(cur_w_in_sig, pre_w_sig, b_act)) mask = T.addbroadcast(mask, 1) out_sig_m = mask * out_sig + (1. - mask) * pre_out_sig return [out_sig_m]
Example #13
Source File: ln_reccurent_layer.py From recnet with MIT License | 5 votes |
def t_forward_step(self, mask, cur_w_in_sig, pre_out_sig, pre_cell_sig, w_ifco, b_ifco,ln_b1,ln_s1, ln_b2,ln_s2,ln_b3,ln_s3, t_n_out): cur_w_in_sig_ln = self.ln(cur_w_in_sig, ln_b1, ln_s1) pre_w_out_sig = T.dot(pre_out_sig, w_ifco) pre_w_out_sig_ln = self.ln(pre_w_out_sig, ln_b2, ln_s2) preact = T.add(cur_w_in_sig_ln, pre_w_out_sig_ln, b_ifco) inner_act = self.activation # T.nnet.hard_sigmoid #T.tanh # T.nnet.hard_sigmoid T.tanh gate_act = self.sigmoid() # T.nnet.hard_sigmoid #T.nnet.sigmoid # Input Gate ig_t1 = gate_act(preact[:, 0:t_n_out]) # Forget Gate fg_t1 = gate_act(preact[:, 1 * t_n_out:2 * t_n_out]) # Cell State cs_t1 = T.add(T.mul(fg_t1, pre_cell_sig), T.mul(ig_t1, inner_act(preact[:, 2 * t_n_out:3 * t_n_out]))) mask = T.addbroadcast(mask, 1) cs_t1 = mask * cs_t1 + (1. - mask) * pre_cell_sig cs_t1_ln = self.ln(cs_t1, ln_b3, ln_s3) # Output Gate og_t1 = gate_act(preact[:, 3 * t_n_out:4 * t_n_out]) # Output LSTM out_sig = T.mul(og_t1, inner_act(cs_t1_ln)) out_sig = mask * out_sig + (1. - mask) * pre_out_sig return [out_sig, cs_t1]
Example #14
Source File: ln_reccurent_layer.py From recnet with MIT License | 5 votes |
def t_forward_step(self, mask, cur_w_in_sig, pre_out_sig, pre_cell_sig, w_ig_c, w_fg_c, w_og_c, w_ifco, b_ifco, ln_b1,ln_s1, ln_b2,ln_s2,ln_b3,ln_s3, t_n_out): cur_w_in_sig_ln = self.ln(cur_w_in_sig, ln_b1, ln_s1) pre_w_out_sig = T.dot(pre_out_sig, w_ifco) pre_w_out_sig_ln = self.ln(pre_w_out_sig, ln_b2, ln_s2) preact = T.add(cur_w_in_sig_ln, pre_w_out_sig_ln, b_ifco) inner_act = self.activation # T.nnet.hard_sigmoid T.tanh gate_act = self.sigmoid() # T.nnet.hard_sigmoid # Input Gate ig_t1 = gate_act(T.add(preact[:, 0:t_n_out], T.mul(pre_cell_sig, w_ig_c))) # Forget Gate fg_t1 = gate_act(T.add(preact[:, 1 * t_n_out:2 * t_n_out], T.mul(pre_cell_sig, w_fg_c),)) # Cell State cs_t1 = T.add(T.mul(fg_t1, pre_cell_sig), T.mul(ig_t1, inner_act( T.add(preact[:, 2 * t_n_out:3 * t_n_out])))) mask = T.addbroadcast(mask, 1) cs_t1 = mask * cs_t1 + (1. - mask) * pre_cell_sig # functionality: cs_t1 = T.switch(mask , cs_t1, pre_cell_sig) cs_t1_ln = self.ln(cs_t1, ln_b3, ln_s3) # Output Gate og_t1 = gate_act( T.add(preact[:, 3 * t_n_out:4 * t_n_out], T.mul(cs_t1_ln, w_og_c))) # Output LSTM out_sig = T.mul(og_t1, inner_act(cs_t1_ln)) out_sig = mask * out_sig + (1. - mask) * pre_out_sig return [out_sig, cs_t1]
Example #15
Source File: ln_reccurent_layer.py From recnet with MIT License | 5 votes |
def t_forward_step(self, mask, cur_w_in_sig, pre_out_sig, w_hidden_hidden, b_act, ln_s1, ln_b1, ln_s2, ln_b2): pre_w_sig = T.dot(pre_out_sig, w_hidden_hidden) inner_act = self.activation pre_w_sig_ln = self.ln(pre_w_sig, ln_b1, ln_s1) cur_w_in_sig_ln = self.ln(cur_w_in_sig, ln_b2, ln_s2) out_sig = inner_act(T.add(cur_w_in_sig_ln, pre_w_sig_ln, b_act)) mask = T.addbroadcast(mask, 1) out_sig_m = mask * out_sig + (1. - mask) * pre_out_sig return [out_sig_m]
Example #16
Source File: recurrent_layer.py From recnet with MIT License | 5 votes |
def t_forward_step(self, mask, cur_w_in_sig, pre_out_sig, pre_cell_sig, w_ifco, b_ifco, t_n_out): ifco = T.add(T.dot(pre_out_sig, w_ifco), b_ifco) inner_act = self.activation gate_act = self.sigmoid() # Input Gate ig_t1 = gate_act(T.add(ifco[:, 0:t_n_out], cur_w_in_sig[:, 0:t_n_out])) # Forget Gate fg_t1 = gate_act(T.add(ifco[:, 1 * t_n_out:2 * t_n_out], cur_w_in_sig[:, 1 * t_n_out:2 * t_n_out])) # Cell State cs_t1 = T.add(T.mul(fg_t1, pre_cell_sig), T.mul(ig_t1, inner_act( T.add(ifco[:, 2 * t_n_out:3 * t_n_out], cur_w_in_sig[:, 2 * t_n_out:3 * t_n_out])))) mask = T.addbroadcast(mask, 1) cs_t1 = mask * cs_t1 + (1. - mask) * pre_cell_sig # functionality: cs_t1 = T.switch(mask , cs_t1, pre_cell_sig) # Output Gate og_t1 = gate_act( T.add(ifco[:, 3 * t_n_out:4 * t_n_out], cur_w_in_sig[:, 3 * t_n_out:4 * t_n_out])) # Output LSTM out_sig = T.mul(og_t1, inner_act(cs_t1)) out_sig = mask * out_sig + (1. - mask) * pre_out_sig return [out_sig, cs_t1]
Example #17
Source File: output_layer.py From recnet with MIT License | 5 votes |
def sequence_iteration(self, output, mask, use_dropout=0, dropout_value=0.5): dot_product = T.dot(output, self.t_w_out) linear_o = T.add(dot_product, self.t_b_out) mask = T.addbroadcast(mask, 2) # to do nesseccary? output = T.mul(mask, linear_o) + T.mul((1. - mask), 1e-6) return output # result ### TEST FUNCTIONS # to do make new file with test functions
Example #18
Source File: recurrent_layer.py From recnet with MIT License | 5 votes |
def sequence_iteration(self, in_seq, mask, use_dropout, dropout_value=1): in_seq_d = T.switch(use_dropout, (in_seq * self.trng.binomial(in_seq.shape, p=dropout_value, n=1, dtype=in_seq.dtype)), in_seq) w_in_seq = T.add(T.dot(in_seq_d, self.weights[2]), self.weights[3]) [out_seq, cell_seq], updates = theano.scan( fn=self.t_forward_step, sequences=[mask, w_in_seq], outputs_info=[self.t_ol_t00, self.t_cs_t00], non_sequences=self.weights[:2] + [self.t_n_out], go_backwards=self.go_backwards, truncate_gradient=-1, # n_steps=50, strict=True, allow_gc=False, ) return out_seq ###### GRU Layer ########################################
Example #19
Source File: layers.py From Neural-Photo-Editor with MIT License | 5 votes |
def get_output_for(self, inputs, deterministic=False, **kwargs): alpha,beta = inputs # return 2*T.true_div(alpha,T.add(alpha,beta)+1e-8)-1 return 2*(alpha/(alpha+beta+1e-8))-1 # Convenience Function to produce a residual pre-activation MDCL block
Example #20
Source File: linear.py From TextDetector with GNU General Public License v3.0 | 5 votes |
def _lmul(self, x, T): raise NotImplementedError() #results = [t._lmul(x, T)] #return tensor.add(*results)
Example #21
Source File: basic.py From attention-lvcsr with MIT License | 5 votes |
def __radd__(right, left): return add(left, right)
Example #22
Source File: basic.py From D-VAE with MIT License | 5 votes |
def __radd__(right, left): return add(left, right)
Example #23
Source File: test_nnet.py From D-VAE with MIT License | 5 votes |
def test_softmax_optimizations_w_bias2(self): x = tensor.matrix('x') b = tensor.vector('b') c = tensor.vector('c') one_of_n = tensor.lvector('one_of_n') op = crossentropy_categorical_1hot fgraph = gof.FunctionGraph( [x, b, c, one_of_n], [op(softmax_op(T.add(x, b, c)), one_of_n)]) assert fgraph.outputs[0].owner.op == op # print 'BEFORE' # for node in fgraph.toposort(): # print node.op # print '----' theano.compile.mode.optdb.query( theano.compile.mode.OPT_FAST_RUN).optimize(fgraph) # print 'AFTER' # for node in fgraph.toposort(): # print node.op # print '====' assert len(fgraph.toposort()) == 3 assert str(fgraph.outputs[0].owner.op) == 'OutputGuard' assert (fgraph.outputs[0].owner.inputs[0].owner.op == crossentropy_softmax_argmax_1hot_with_bias)
Example #24
Source File: layers_theano.py From visual_dynamics with MIT License | 5 votes |
def set_layer_param_tags(layer, params=None, **tags): """ If params is None, update tags of all parameters, else only update tags of parameters in params. """ for param, param_tags in layer.params.items(): if params is None or param in params: for tag, value in tags.items(): if value: param_tags.add(tag) else: param_tags.discard(tag)
Example #25
Source File: layers_theano.py From visual_dynamics with MIT License | 5 votes |
def __init__(self, incomings, **kwargs): super(BatchwiseSumLayer, self).__init__(incomings, T.add, **kwargs)
Example #26
Source File: basic.py From attention-lvcsr with MIT License | 5 votes |
def __add__(left, right): return add(left, right)
Example #27
Source File: basic.py From D-VAE with MIT License | 5 votes |
def __add__(left, right): return add(left, right)
Example #28
Source File: test_nnet.py From attention-lvcsr with MIT License | 5 votes |
def test_softmax_optimizations_w_bias2(self): x = tensor.matrix('x') b = tensor.vector('b') c = tensor.vector('c') one_of_n = tensor.lvector('one_of_n') op = crossentropy_categorical_1hot fgraph = gof.FunctionGraph( [x, b, c, one_of_n], [op(softmax_op(T.add(x, b, c)), one_of_n)]) assert fgraph.outputs[0].owner.op == op # print 'BEFORE' # for node in fgraph.toposort(): # print node.op # print '----' theano.compile.mode.optdb.query( theano.compile.mode.OPT_FAST_RUN).optimize(fgraph) # print 'AFTER' # for node in fgraph.toposort(): # print node.op # print '====' assert len(fgraph.toposort()) == 3 assert str(fgraph.outputs[0].owner.op) == 'OutputGuard' assert (fgraph.outputs[0].owner.inputs[0].owner.op == crossentropy_softmax_argmax_1hot_with_bias)
Example #29
Source File: models.py From drmad with MIT License | 5 votes |
def __init__(self, x, y, args): self.params_theta = [] self.params_lambda = [] self.params_weight = [] if args.dataset == 'mnist': input_size = (None, 1, 28, 28) elif args.dataset == 'cifar10': input_size = (None, 3, 32, 32) else: raise AssertionError layers = [ll.InputLayer(input_size)] self.penalty = theano.shared(np.array(0.)) #conv1 layers.append(Conv2DLayerWithReg(args, layers[-1], 20, 5)) self.add_params_to_self(args, layers[-1]) layers.append(ll.MaxPool2DLayer(layers[-1], pool_size=2, stride=2)) #conv1 layers.append(Conv2DLayerWithReg(args, layers[-1], 50, 5)) self.add_params_to_self(args, layers[-1]) layers.append(ll.MaxPool2DLayer(layers[-1], pool_size=2, stride=2)) #fc1 layers.append(DenseLayerWithReg(args, layers[-1], num_units=500)) self.add_params_to_self(args, layers[-1]) #softmax layers.append(DenseLayerWithReg(args, layers[-1], num_units=10, nonlinearity=nonlinearities.softmax)) self.add_params_to_self(args, layers[-1]) self.layers = layers self.y = ll.get_output(layers[-1], x, deterministic=False) self.prediction = T.argmax(self.y, axis=1) # self.penalty = penalty if penalty != 0. else T.constant(0.) print(self.params_lambda) # time.sleep(20) # cost function self.loss = T.mean(categorical_crossentropy(self.y, y)) self.lossWithPenalty = T.add(self.loss, self.penalty) print "loss and losswithpenalty", type(self.loss), type(self.lossWithPenalty)
Example #30
Source File: linear.py From TextDetector with GNU General Public License v3.0 | 5 votes |
def _lmul(self, x, T): if T: if len(self.col_shape())>1: x2 = x.flatten(2) else: x2 = x n_rows = x2.shape[0] offset = 0 xWlist = [] assert len(self._col_sizes) == len(self._Wlist) for size, W in zip(self._col_sizes, self._Wlist): # split the output rows into pieces x_s = x2[:,offset:offset+size] # multiply each piece by one transform xWlist.append( W.lmul( x_s.reshape( (n_rows,)+W.col_shape()), T)) offset += size # sum the results rval = tensor.add(*xWlist) else: # multiply the input by each transform xWlist = [W.lmul(x,T).flatten(2) for W in self._Wlist] # join the resuls rval = tensor.join(1, *xWlist) return rval