Python mxnet.ndarray.max() Examples
The following are 12
code examples of mxnet.ndarray.max().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
mxnet.ndarray
, or try the search function
.
![](https://www.programcreek.com/common/static/images/search.png)
Example #1
Source File: tensor.py From dgl with Apache License 2.0 | 6 votes |
def pad_packed_tensor(input, lengths, value, l_min=None): old_shape = input.shape if isinstance(lengths, nd.NDArray): max_len = as_scalar(input.max()) else: max_len = builtins.max(lengths) if l_min is not None: max_len = builtins.max(max_len, l_min) batch_size = len(lengths) ctx = input.context dtype = input.dtype x = nd.full((batch_size * max_len, *old_shape[1:]), value, ctx=ctx, dtype=dtype) index = [] for i, l in enumerate(lengths): index.extend(range(i * max_len, i * max_len + l)) index = nd.array(index, ctx=ctx) return scatter_row(x, index, input).reshape(batch_size, max_len, *old_shape[1:])
Example #2
Source File: lstm_crf.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 5 votes |
def log_sum_exp(vec): max_score = nd.max(vec).asscalar() return nd.log(nd.sum(nd.exp(vec - max_score))) + max_score # Model
Example #3
Source File: lstm_crf.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 5 votes |
def _viterbi_decode(self, feats): backpointers = [] # Initialize the viterbi variables in log space vvars = nd.full((1, self.tagset_size), -10000.) vvars[0, self.tag2idx[START_TAG]] = 0 for feat in feats: bptrs_t = [] # holds the backpointers for this step viterbivars_t = [] # holds the viterbi variables for this step for next_tag in range(self.tagset_size): # next_tag_var[i] holds the viterbi variable for tag i at the # previous step, plus the score of transitioning # from tag i to next_tag. # We don't include the emission scores here because the max # does not depend on them (we add them in below) next_tag_var = vvars + self.transitions.data()[next_tag] best_tag_id = argmax(next_tag_var) bptrs_t.append(best_tag_id) viterbivars_t.append(next_tag_var[0, best_tag_id]) # Now add in the emission scores, and assign vvars to the set # of viterbi variables we just computed vvars = (nd.concat(*viterbivars_t, dim=0) + feat).reshape((1, -1)) backpointers.append(bptrs_t) # Transition to STOP_TAG terminal_var = vvars + self.transitions.data()[self.tag2idx[STOP_TAG]] best_tag_id = argmax(terminal_var) path_score = terminal_var[0, best_tag_id] # Follow the back pointers to decode the best path. best_path = [best_tag_id] for bptrs_t in reversed(backpointers): best_tag_id = bptrs_t[best_tag_id] best_path.append(best_tag_id) # Pop off the start tag (we dont want to return that to the caller) start = best_path.pop() assert start == self.tag2idx[START_TAG] # Sanity check best_path.reverse() return path_score, best_path
Example #4
Source File: tensor.py From dgl with Apache License 2.0 | 5 votes |
def max(input, dim): return nd.max(input, axis=dim)
Example #5
Source File: tensor.py From dgl with Apache License 2.0 | 5 votes |
def reduce_max(input): return input.max()
Example #6
Source File: custom_layers.py From d-SNE with Apache License 2.0 | 5 votes |
def hybrid_forward(self, F, preds, label): label = label.astype('float32') dist = F.sqrt(F.sum(F.square(preds), axis=1)) return label * F.square(dist) + (1 - label) * F.square(F.max(self._m - dist, 0))
Example #7
Source File: custom_layers.py From d-SNE with Apache License 2.0 | 5 votes |
def hybrid_forward(self, F, fts, ys, ftt, yt): """ Semantic Alignment Loss :param F: Function :param yt: label for the target domain [N] :param ftt: features for the target domain [N, K] :param ys: label for the source domain [M] :param fts: features for the source domain [M, K] :return: """ if self._fn: # Normalize ft fts = F.L2Normalization(fts, mode='instance') ftt = F.L2Normalization(ftt, mode='instance') fts_rpt = F.broadcast_to(fts.expand_dims(axis=0), shape=(self._bs_tgt, self._bs_src, self._embed_size)) ftt_rpt = F.broadcast_to(ftt.expand_dims(axis=1), shape=(self._bs_tgt, self._bs_src, self._embed_size)) dists = F.sum(F.square(ftt_rpt - fts_rpt), axis=2) yt_rpt = F.broadcast_to(yt.expand_dims(axis=1), shape=(self._bs_tgt, self._bs_src)).astype('int32') ys_rpt = F.broadcast_to(ys.expand_dims(axis=0), shape=(self._bs_tgt, self._bs_src)).astype('int32') y_same = F.equal(yt_rpt, ys_rpt).astype('float32') y_diff = F.not_equal(yt_rpt, ys_rpt).astype('float32') intra_cls_dists = dists * y_same inter_cls_dists = dists * y_diff max_dists = F.max(dists, axis=1, keepdims=True) max_dists = F.broadcast_to(max_dists, shape=(self._bs_tgt, self._bs_src)) revised_inter_cls_dists = F.where(y_same, max_dists, inter_cls_dists) max_intra_cls_dist = F.max(intra_cls_dists, axis=1) min_inter_cls_dist = F.min(revised_inter_cls_dists, axis=1) loss = F.relu(max_intra_cls_dist - min_inter_cls_dist + self._margin) return loss
Example #8
Source File: lstm_crf.py From training_results_v0.6 with Apache License 2.0 | 5 votes |
def log_sum_exp(vec): max_score = nd.max(vec).asscalar() return nd.log(nd.sum(nd.exp(vec - max_score))) + max_score # Model
Example #9
Source File: lstm_crf.py From training_results_v0.6 with Apache License 2.0 | 5 votes |
def _viterbi_decode(self, feats): backpointers = [] # Initialize the viterbi variables in log space vvars = nd.full((1, self.tagset_size), -10000.) vvars[0, self.tag2idx[START_TAG]] = 0 for feat in feats: bptrs_t = [] # holds the backpointers for this step viterbivars_t = [] # holds the viterbi variables for this step for next_tag in range(self.tagset_size): # next_tag_var[i] holds the viterbi variable for tag i at the # previous step, plus the score of transitioning # from tag i to next_tag. # We don't include the emission scores here because the max # does not depend on them (we add them in below) next_tag_var = vvars + self.transitions.data()[next_tag] best_tag_id = argmax(next_tag_var) bptrs_t.append(best_tag_id) viterbivars_t.append(next_tag_var[0, best_tag_id]) # Now add in the emission scores, and assign vvars to the set # of viterbi variables we just computed vvars = (nd.concat(*viterbivars_t, dim=0) + feat).reshape((1, -1)) backpointers.append(bptrs_t) # Transition to STOP_TAG terminal_var = vvars + self.transitions.data()[self.tag2idx[STOP_TAG]] best_tag_id = argmax(terminal_var) path_score = terminal_var[0, best_tag_id] # Follow the back pointers to decode the best path. best_path = [best_tag_id] for bptrs_t in reversed(backpointers): best_tag_id = bptrs_t[best_tag_id] best_path.append(best_tag_id) # Pop off the start tag (we dont want to return that to the caller) start = best_path.pop() assert start == self.tag2idx[START_TAG] # Sanity check best_path.reverse() return path_score, best_path
Example #10
Source File: model.py From NER_BiLSTM_CRF_Chinese with Apache License 2.0 | 5 votes |
def log_sum_exp(vec): max_score = nd.max(vec).asscalar() return nd.log(nd.sum(nd.exp(vec - max_score))) + max_score
Example #11
Source File: lstm_crf.py From SNIPER-mxnet with Apache License 2.0 | 5 votes |
def log_sum_exp(vec): max_score = nd.max(vec).asscalar() return nd.log(nd.sum(nd.exp(vec - max_score))) + max_score # Model
Example #12
Source File: lstm_crf.py From SNIPER-mxnet with Apache License 2.0 | 5 votes |
def _viterbi_decode(self, feats): backpointers = [] # Initialize the viterbi variables in log space vvars = nd.full((1, self.tagset_size), -10000.) vvars[0, self.tag2idx[START_TAG]] = 0 for feat in feats: bptrs_t = [] # holds the backpointers for this step viterbivars_t = [] # holds the viterbi variables for this step for next_tag in range(self.tagset_size): # next_tag_var[i] holds the viterbi variable for tag i at the # previous step, plus the score of transitioning # from tag i to next_tag. # We don't include the emission scores here because the max # does not depend on them (we add them in below) next_tag_var = vvars + self.transitions[next_tag] best_tag_id = argmax(next_tag_var) bptrs_t.append(best_tag_id) viterbivars_t.append(next_tag_var[0, best_tag_id]) # Now add in the emission scores, and assign vvars to the set # of viterbi variables we just computed vvars = (nd.concat(*viterbivars_t, dim=0) + feat).reshape((1, -1)) backpointers.append(bptrs_t) # Transition to STOP_TAG terminal_var = vvars + self.transitions[self.tag2idx[STOP_TAG]] best_tag_id = argmax(terminal_var) path_score = terminal_var[0, best_tag_id] # Follow the back pointers to decode the best path. best_path = [best_tag_id] for bptrs_t in reversed(backpointers): best_tag_id = bptrs_t[best_tag_id] best_path.append(best_tag_id) # Pop off the start tag (we dont want to return that to the caller) start = best_path.pop() assert start == self.tag2idx[START_TAG] # Sanity check best_path.reverse() return path_score, best_path