Python numpy.int32() Examples
The following are 30 code examples for showing how to use numpy.int32(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
You may check out the related API usage on the sidebar.
You may also want to check out all available functions/classes of the module
numpy
, or try the search function
.
Example 1
Project: cat-bbs Author: aleju File: common.py License: MIT License | 6 votes |
def draw_heatmap(img, heatmap, alpha=0.5): """Draw a heatmap overlay over an image.""" assert len(heatmap.shape) == 2 or \ (len(heatmap.shape) == 3 and heatmap.shape[2] == 1) assert img.dtype in [np.uint8, np.int32, np.int64] assert heatmap.dtype in [np.float32, np.float64] if img.shape[0:2] != heatmap.shape[0:2]: heatmap_rs = np.clip(heatmap * 255, 0, 255).astype(np.uint8) heatmap_rs = ia.imresize_single_image( heatmap_rs[..., np.newaxis], img.shape[0:2], interpolation="nearest" ) heatmap = np.squeeze(heatmap_rs) / 255.0 cmap = plt.get_cmap('jet') heatmap_cmapped = cmap(heatmap) heatmap_cmapped = np.delete(heatmap_cmapped, 3, 2) heatmap_cmapped = heatmap_cmapped * 255 mix = (1-alpha) * img + alpha * heatmap_cmapped mix = np.clip(mix, 0, 255).astype(np.uint8) return mix
Example 2
Project: cat-bbs Author: aleju File: create_dataset.py License: MIT License | 6 votes |
def load_keypoints(image_filepath, image_height, image_width): """Load facial keypoints of one image.""" fp_keypoints = "%s.cat" % (image_filepath,) if not os.path.isfile(fp_keypoints): raise Exception("Could not find keypoint coordinates for image '%s'." \ % (image_filepath,)) else: coords_raw = open(fp_keypoints, "r").readlines()[0].strip().split(" ") coords_raw = [abs(int(coord)) for coord in coords_raw] keypoints = [] #keypoints_arr = np.zeros((9*2,), dtype=np.int32) for i in range(1, len(coords_raw), 2): # first element is the number of coords x = np.clip(coords_raw[i], 0, image_width-1) y = np.clip(coords_raw[i+1], 0, image_height-1) keypoints.append((x, y)) return keypoints
Example 3
Project: Att-ChemdNER Author: lingluodlut File: theano_backend.py License: Apache License 2.0 | 6 votes |
def in_top_k(predictions, targets, k): '''Returns whether the `targets` are in the top `k` `predictions` # Arguments predictions: A tensor of shape batch_size x classess and type float32. targets: A tensor of shape batch_size and type int32 or int64. k: An int, number of top elements to consider. # Returns A tensor of shape batch_size and type int. output_i is 1 if targets_i is within top-k values of predictions_i ''' predictions_top_k = T.argsort(predictions)[:, -k:] result, _ = theano.map(lambda prediction, target: any(equal(prediction, target)), sequences=[predictions_top_k, targets]) return result # CONVOLUTIONS
Example 4
Project: Att-ChemdNER Author: lingluodlut File: theano_backend.py License: Apache License 2.0 | 6 votes |
def ctc_path_probs(predict, Y, alpha=1e-4): smoothed_predict = (1 - alpha) * predict[:, Y] + alpha * np.float32(1.) / Y.shape[0] L = T.log(smoothed_predict) zeros = T.zeros_like(L[0]) log_first = zeros f_skip_idxs = ctc_create_skip_idxs(Y) b_skip_idxs = ctc_create_skip_idxs(Y[::-1]) # there should be a shortcut to calculating this def step(log_f_curr, log_b_curr, f_active, log_f_prev, b_active, log_b_prev): f_active_next, log_f_next = ctc_update_log_p(f_skip_idxs, zeros, f_active, log_f_curr, log_f_prev) b_active_next, log_b_next = ctc_update_log_p(b_skip_idxs, zeros, b_active, log_b_curr, log_b_prev) return f_active_next, log_f_next, b_active_next, log_b_next [f_active, log_f_probs, b_active, log_b_probs], _ = theano.scan( step, sequences=[L, L[::-1, ::-1]], outputs_info=[np.int32(1), log_first, np.int32(1), log_first]) idxs = T.arange(L.shape[1]).dimshuffle('x', 0) mask = (idxs < f_active.dimshuffle(0, 'x')) & (idxs < b_active.dimshuffle(0, 'x'))[::-1, ::-1] log_probs = log_f_probs + log_b_probs[::-1, ::-1] - L return log_probs, mask
Example 5
Project: Collaborative-Learning-for-Weakly-Supervised-Object-Detection Author: Sunarker File: imdb.py License: MIT License | 6 votes |
def create_roidb_from_box_list(self, box_list, gt_roidb): assert len(box_list) == self.num_images, \ 'Number of boxes must match number of ground-truth images' roidb = [] if gt_roidb is not None: for i in range(self.num_images): boxes = box_list[i] real_label = gt_roidb[i]['labels'] roidb.append({'boxes' : boxes, 'labels' : np.array([real_label], dtype=np.int32), 'flipped' : False}) else: for i in range(self.num_images): boxes = box_list[i] roidb.append({'boxes' : boxes, 'labels' : np.zeros((1, 0), dtype=np.int32), 'flipped' : False}) return roidb
Example 6
Project: Collaborative-Learning-for-Weakly-Supervised-Object-Detection Author: Sunarker File: snippets.py License: MIT License | 6 votes |
def generate_anchors_pre(height, width, feat_stride, anchor_scales=(8,16,32), anchor_ratios=(0.5,1,2)): """ A wrapper function to generate anchors given different scales Also return the number of anchors in variable 'length' """ anchors = generate_anchors(ratios=np.array(anchor_ratios), scales=np.array(anchor_scales)) A = anchors.shape[0] shift_x = np.arange(0, width) * feat_stride shift_y = np.arange(0, height) * feat_stride shift_x, shift_y = np.meshgrid(shift_x, shift_y) shifts = np.vstack((shift_x.ravel(), shift_y.ravel(), shift_x.ravel(), shift_y.ravel())).transpose() K = shifts.shape[0] # width changes faster, so here it is H, W, C anchors = anchors.reshape((1, A, 4)) + shifts.reshape((1, K, 4)).transpose((1, 0, 2)) anchors = anchors.reshape((K * A, 4)).astype(np.float32, copy=False) length = np.int32(anchors.shape[0]) return anchors, length
Example 7
Project: gated-graph-transformer-network Author: hexahedria File: ggtnn_train.py License: MIT License | 6 votes |
def assemble_batch(story_fns, num_answer_words, format_spec): stories = [] for sfn in story_fns: with gzip.open(sfn,'rb') as f: cvtd_story, _, _, _ = pickle.load(f) stories.append(cvtd_story) sents, graphs, queries, answers = zip(*stories) cvtd_sents = np.array(sents, np.int32) cvtd_queries = np.array(queries, np.int32) max_ans_len = max(len(a) for a in answers) cvtd_answers = np.stack([convert_answer(answer, num_answer_words, format_spec, max_ans_len) for answer in answers]) num_new_nodes, new_node_strengths, new_node_ids, next_edges = zip(*graphs) num_new_nodes = np.stack(num_new_nodes) new_node_strengths = np.stack(new_node_strengths) new_node_ids = np.stack(new_node_ids) next_edges = np.stack(next_edges) return cvtd_sents, cvtd_queries, cvtd_answers, num_new_nodes, new_node_strengths, new_node_ids, next_edges
Example 8
Project: disentangling_conditional_gans Author: zalandoresearch File: dataset_tool.py License: MIT License | 6 votes |
def create_cifar100(tfrecord_dir, cifar100_dir): print('Loading CIFAR-100 from "%s"' % cifar100_dir) import pickle with open(os.path.join(cifar100_dir, 'train'), 'rb') as file: data = pickle.load(file, encoding='latin1') images = data['data'].reshape(-1, 3, 32, 32) labels = np.array(data['fine_labels']) assert images.shape == (50000, 3, 32, 32) and images.dtype == np.uint8 assert labels.shape == (50000,) and labels.dtype == np.int32 assert np.min(images) == 0 and np.max(images) == 255 assert np.min(labels) == 0 and np.max(labels) == 99 onehot = np.zeros((labels.size, np.max(labels) + 1), dtype=np.float32) onehot[np.arange(labels.size), labels] = 1.0 with TFRecordExporter(tfrecord_dir, images.shape[0]) as tfr: order = tfr.choose_shuffled_order() for idx in range(order.size): tfr.add_image(images[order[idx]]) tfr.add_labels(onehot[order]) #----------------------------------------------------------------------------
Example 9
Project: disentangling_conditional_gans Author: zalandoresearch File: dataset.py License: MIT License | 6 votes |
def __init__(self, resolution=1024, num_channels=3, dtype='uint8', dynamic_range=[0,255], label_size=0, label_dtype='float32'): self.resolution = resolution self.resolution_log2 = int(np.log2(resolution)) self.shape = [num_channels, resolution, resolution] self.dtype = dtype self.dynamic_range = dynamic_range self.label_size = label_size self.label_dtype = label_dtype self._tf_minibatch_var = None self._tf_lod_var = None self._tf_minibatch_np = None self._tf_labels_np = None assert self.resolution == 2 ** self.resolution_log2 with tf.name_scope('Dataset'): self._tf_minibatch_var = tf.Variable(np.int32(0), name='minibatch_var') self._tf_lod_var = tf.Variable(np.int32(0), name='lod_var')
Example 10
Project: mmdetection Author: open-mmlab File: maskiou_head.py License: Apache License 2.0 | 6 votes |
def _get_area_ratio(self, pos_proposals, pos_assigned_gt_inds, gt_masks): """Compute area ratio of the gt mask inside the proposal and the gt mask of the corresponding instance.""" num_pos = pos_proposals.size(0) if num_pos > 0: area_ratios = [] proposals_np = pos_proposals.cpu().numpy() pos_assigned_gt_inds = pos_assigned_gt_inds.cpu().numpy() # compute mask areas of gt instances (batch processing for speedup) gt_instance_mask_area = gt_masks.areas for i in range(num_pos): gt_mask = gt_masks[pos_assigned_gt_inds[i]] # crop the gt mask inside the proposal bbox = proposals_np[i, :].astype(np.int32) gt_mask_in_proposal = gt_mask.crop(bbox) ratio = gt_mask_in_proposal.areas[0] / ( gt_instance_mask_area[pos_assigned_gt_inds[i]] + 1e-7) area_ratios.append(ratio) area_ratios = torch.from_numpy(np.stack(area_ratios)).float().to( pos_proposals.device) else: area_ratios = pos_proposals.new_zeros((0, )) return area_ratios
Example 11
Project: neural-fingerprinting Author: StephanZheng File: run_attacks_and_defenses.py License: BSD 3-Clause "New" or "Revised" License | 6 votes |
def _load_dataset_clipping(self, dataset_dir, epsilon): """Helper method which loads dataset and determines clipping range. Args: dataset_dir: location of the dataset. epsilon: maximum allowed size of adversarial perturbation. """ self.dataset_max_clip = {} self.dataset_min_clip = {} self._dataset_image_count = 0 for fname in os.listdir(dataset_dir): if not fname.endswith('.png'): continue image_id = fname[:-4] image = np.array( Image.open(os.path.join(dataset_dir, fname)).convert('RGB')) image = image.astype('int32') self._dataset_image_count += 1 self.dataset_max_clip[image_id] = np.clip(image + epsilon, 0, 255).astype('uint8') self.dataset_min_clip[image_id] = np.clip(image - epsilon, 0, 255).astype('uint8')
Example 12
Project: neural-fingerprinting Author: StephanZheng File: test_imagenet_attacks.py License: BSD 3-Clause "New" or "Revised" License | 6 votes |
def load_images(input_dir, metadata_file_path, batch_shape): """Retrieve numpy arrays of images and labels, read from a directory.""" num_images = batch_shape[0] with open(metadata_file_path) as input_file: reader = csv.reader(input_file) header_row = next(reader) rows = list(reader) row_idx_image_id = header_row.index('ImageId') row_idx_true_label = header_row.index('TrueLabel') images = np.zeros(batch_shape) labels = np.zeros(num_images, dtype=np.int32) for idx in xrange(num_images): row = rows[idx] filepath = os.path.join(input_dir, row[row_idx_image_id] + '.png') with tf.gfile.Open(filepath, 'rb') as f: image = np.array( Image.open(f).convert('RGB')).astype(np.float) / 255.0 images[idx, :, :, :] = image labels[idx] = int(row[row_idx_true_label]) return images, labels
Example 13
Project: deep-learning-note Author: wdxtub File: 18_basic_tfrecord.py License: MIT License | 6 votes |
def read_from_tfrecord(filenames): tfrecord_file_queue = tf.train.string_input_producer(filenames, name='queue') reader = tf.TFRecordReader() _, tfrecord_serialized = reader.read(tfrecord_file_queue) tfrecord_features = tf.parse_single_example(tfrecord_serialized, features={ 'label': tf.FixedLenFeature([],tf.int64), 'shape': tf.FixedLenFeature([],tf.string), 'image': tf.FixedLenFeature([],tf.string), }, name='features') image = tf.decode_raw(tfrecord_features['image'], tf.uint8) shape = tf.decode_raw(tfrecord_features['shape'], tf.int32) image = tf.reshape(image, shape) label = tfrecord_features['label'] return label, shape, image
Example 14
Project: deep-learning-note Author: wdxtub File: w2v_utils.py License: MIT License | 6 votes |
def batch_gen(download_url, expected_byte, vocab_size, batch_size, skip_window, visual_fld): local_dest = 'data/w2v/text8.zip' utils.download_one_file(download_url, local_dest, expected_byte) words = read_data(local_dest) dictionary, _ = build_vocab(words, vocab_size, visual_fld) index_words = convert_words_to_index(words, dictionary) del words # to save memory single_gen = generate_sample(index_words, skip_window) while True: center_batch = np.zeros(batch_size, dtype=np.int32) target_batch = np.zeros([batch_size, 1]) for index in range(batch_size): center_batch[index], target_batch[index] = next(single_gen) yield center_batch, target_batch
Example 15
Project: fullrmc Author: bachiraoun File: run.py License: GNU Affero General Public License v3.0 | 6 votes |
def bonds_CH(ENGINE, rang=10, recur=10, refine=False, explore=True): groups = [] for idx in range(0,ENGINE.pdb.numberOfAtoms, 13): groups.append( np.array([idx+1 ,idx+2 ], dtype=np.int32) ) # C1-H11 groups.append( np.array([idx+1 ,idx+3 ], dtype=np.int32) ) # C1-H12 groups.append( np.array([idx+4 ,idx+5 ], dtype=np.int32) ) # C2-H21 groups.append( np.array([idx+4 ,idx+6 ], dtype=np.int32) ) # C2-H22 groups.append( np.array([idx+7 ,idx+8 ], dtype=np.int32) ) # C3-H31 groups.append( np.array([idx+7 ,idx+9 ], dtype=np.int32) ) # C3-H32 groups.append( np.array([idx+10,idx+11], dtype=np.int32) ) # C4-H41 groups.append( np.array([idx+10,idx+12], dtype=np.int32) ) # C4-H42 ENGINE.set_groups(groups) [g.set_move_generator(DistanceAgitationGenerator(amplitude=0.2,agitate=(True,True))) for g in ENGINE.groups] # set selector if refine or explore: gs = RecursiveGroupSelector(RandomSelector(ENGINE), recur=recur, refine=refine, explore=explore) ENGINE.set_group_selector(gs) # number of steps nsteps = recur*len(ENGINE.groups) for stepIdx in range(rang): LOGGER.info("Running 'bonds_CH' mode step %i"%(stepIdx)) ENGINE.run(numberOfSteps=nsteps, saveFrequency=nsteps) # ############ RUN H-C-H ANGLES ############ #
Example 16
Project: fullrmc Author: bachiraoun File: run.py License: GNU Affero General Public License v3.0 | 6 votes |
def angles_HCH(ENGINE, rang=5, recur=10, refine=False, explore=True): groups = [] for idx in range(0,ENGINE.pdb.numberOfAtoms, 13): groups.append( np.array([idx+1 ,idx+2, idx+3 ], dtype=np.int32) ) # H11-C1-H12 groups.append( np.array([idx+4 ,idx+5, idx+6 ], dtype=np.int32) ) # H21-C2-H22 groups.append( np.array([idx+7 ,idx+8, idx+9 ], dtype=np.int32) ) # H31-C3-H32 groups.append( np.array([idx+10,idx+11,idx+12], dtype=np.int32) ) # H41-C4-H42 ENGINE.set_groups(groups) [g.set_move_generator(AngleAgitationGenerator(amplitude=5)) for g in ENGINE.groups] # set selector if refine or explore: gs = RecursiveGroupSelector(RandomSelector(ENGINE), recur=recur, refine=refine, explore=explore) ENGINE.set_group_selector(gs) # number of steps nsteps = recur*len(ENGINE.groups) for stepIdx in range(rang): LOGGER.info("Running 'angles_HCH' mode step %i"%(stepIdx)) ENGINE.run(numberOfSteps=nsteps, saveFrequency=nsteps) # ############ RUN ATOMS ############ #
Example 17
Project: dynamic-training-with-apache-mxnet-on-aws Author: awslabs File: data.py License: Apache License 2.0 | 6 votes |
def tokenize(self, path): """Tokenizes a text file.""" assert os.path.exists(path) # Add words to the dictionary with open(path, 'r') as f: tokens = 0 for line in f: words = line.split() + ['<eos>'] tokens += len(words) for word in words: self.dictionary.add_word(word) # Tokenize file content with open(path, 'r') as f: ids = np.zeros((tokens,), dtype='int32') token = 0 for line in f: words = line.split() + ['<eos>'] for word in words: ids[token] = self.dictionary.word2idx[word] token += 1 return mx.nd.array(ids, dtype='int32')
Example 18
Project: dynamic-training-with-apache-mxnet-on-aws Author: awslabs File: utils.py License: Apache License 2.0 | 6 votes |
def sample_mog(prob, mean, var, rng): """Sample from independent mixture of gaussian (MoG) distributions Each batch is an independent MoG distribution. Parameters ---------- prob : numpy.ndarray mixture probability of each gaussian. Shape --> (batch_num, center_num) mean : numpy.ndarray mean of each gaussian. Shape --> (batch_num, center_num, sample_dim) var : numpy.ndarray variance of each gaussian. Shape --> (batch_num, center_num, sample_dim) rng : numpy.random.RandomState Returns ------- ret : numpy.ndarray sampling result. Shape --> (batch_num, sample_dim) """ gaussian_inds = sample_categorical(prob, rng).astype(numpy.int32) mean = mean[numpy.arange(mean.shape[0]), gaussian_inds, :] var = var[numpy.arange(mean.shape[0]), gaussian_inds, :] ret = sample_normal(mean=mean, var=var, rng=rng) return ret
Example 19
Project: dynamic-training-with-apache-mxnet-on-aws Author: awslabs File: ndarray.py License: Apache License 2.0 | 6 votes |
def dtype(self): """Data-type of the array's elements. Returns ------- numpy.dtype This NDArray's data type. Examples -------- >>> x = mx.nd.zeros((2,3)) >>> x.dtype <type 'numpy.float32'> >>> y = mx.nd.zeros((2,3), dtype='int32') >>> y.dtype <type 'numpy.int32'> """ mx_dtype = ctypes.c_int() check_call(_LIB.MXNDArrayGetDType( self.handle, ctypes.byref(mx_dtype))) return _DTYPE_MX_TO_NP[mx_dtype.value]
Example 20
Project: dynamic-training-with-apache-mxnet-on-aws Author: awslabs File: ndarray.py License: Apache License 2.0 | 6 votes |
def asnumpy(self): """Returns a ``numpy.ndarray`` object with value copied from this array. Examples -------- >>> x = mx.nd.ones((2,3)) >>> y = x.asnumpy() >>> type(y) <type 'numpy.ndarray'> >>> y array([[ 1., 1., 1.], [ 1., 1., 1.]], dtype=float32) >>> z = mx.nd.ones((2,3), dtype='int32') >>> z.asnumpy() array([[1, 1, 1], [1, 1, 1]], dtype=int32) """ data = np.empty(self.shape, dtype=self.dtype) check_call(_LIB.MXNDArraySyncCopyToCPU( self.handle, data.ctypes.data_as(ctypes.c_void_p), ctypes.c_size_t(data.size))) return data
Example 21
Project: dynamic-training-with-apache-mxnet-on-aws Author: awslabs File: ndarray.py License: Apache License 2.0 | 6 votes |
def asscalar(self): """Returns a scalar whose value is copied from this array. This function is equivalent to ``self.asnumpy()[0]``. This NDArray must have shape (1,). Examples -------- >>> x = mx.nd.ones((1,), dtype='int32') >>> x.asscalar() 1 >>> type(x.asscalar()) <type 'numpy.int32'> """ if self.shape != (1,): raise ValueError("The current array is not a scalar") return self.asnumpy()[0]
Example 22
Project: dynamic-training-with-apache-mxnet-on-aws Author: awslabs File: test_symbol.py License: Apache License 2.0 | 6 votes |
def test_zero_prop2(): x = mx.sym.Variable('x') idx = mx.sym.Variable('idx') y = mx.sym.batch_take(x, idx) z = mx.sym.stop_gradient(y) exe = z.simple_bind(ctx=mx.cpu(), x=(10, 10), idx=(10,), type_dict={'x': np.float32, 'idx': np.int32}) exe.forward() exe.backward() # The following bind() should throw an exception. We discard the expected stderr # output for this operation only in order to keep the test logs clean. with discard_stderr(): try: y.simple_bind(ctx=mx.cpu(), x=(10, 10), idx=(10,), type_dict={'x': np.float32, 'idx': np.int32}) except: return assert False
Example 23
Project: Black-Box-Audio Author: rtaori File: run_audio_attack.py License: MIT License | 5 votes |
def setup_graph(self, input_audio_batch, target_phrase): batch_size = input_audio_batch.shape[0] weird = (input_audio_batch.shape[1] - 1) // 320 logits_arg2 = np.tile(weird, batch_size) dense_arg1 = np.array(np.tile(target_phrase, (batch_size, 1)), dtype=np.int32) dense_arg2 = np.array(np.tile(target_phrase.shape[0], batch_size), dtype=np.int32) pass_in = np.clip(input_audio_batch, -2**15, 2**15-1) seq_len = np.tile(weird, batch_size).astype(np.int32) with tf.variable_scope('', reuse=tf.AUTO_REUSE): inputs = tf.placeholder(tf.float32, shape=pass_in.shape, name='a') len_batch = tf.placeholder(tf.float32, name='b') arg2_logits = tf.placeholder(tf.int32, shape=logits_arg2.shape, name='c') arg1_dense = tf.placeholder(tf.float32, shape=dense_arg1.shape, name='d') arg2_dense = tf.placeholder(tf.int32, shape=dense_arg2.shape, name='e') len_seq = tf.placeholder(tf.int32, shape=seq_len.shape, name='f') logits = get_logits(inputs, arg2_logits) target = ctc_label_dense_to_sparse(arg1_dense, arg2_dense, len_batch) ctcloss = tf.nn.ctc_loss(labels=tf.cast(target, tf.int32), inputs=logits, sequence_length=len_seq) decoded, _ = tf.nn.ctc_greedy_decoder(logits, arg2_logits, merge_repeated=True) sess = tf.Session() saver = tf.train.Saver(tf.global_variables()) saver.restore(sess, "models/session_dump") func1 = lambda a, b, c, d, e, f: sess.run(ctcloss, feed_dict={inputs: a, len_batch: b, arg2_logits: c, arg1_dense: d, arg2_dense: e, len_seq: f}) func2 = lambda a, b, c, d, e, f: sess.run([ctcloss, decoded], feed_dict={inputs: a, len_batch: b, arg2_logits: c, arg1_dense: d, arg2_dense: e, len_seq: f}) return (func1, func2)
Example 24
Project: Black-Box-Audio Author: rtaori File: run_audio_attack.py License: MIT License | 5 votes |
def getctcloss(self, input_audio_batch, target_phrase, decode=False): batch_size = input_audio_batch.shape[0] weird = (input_audio_batch.shape[1] - 1) // 320 logits_arg2 = np.tile(weird, batch_size) dense_arg1 = np.array(np.tile(target_phrase, (batch_size, 1)), dtype=np.int32) dense_arg2 = np.array(np.tile(target_phrase.shape[0], batch_size), dtype=np.int32) pass_in = np.clip(input_audio_batch, -2**15, 2**15-1) seq_len = np.tile(weird, batch_size).astype(np.int32) if decode: return self.funcs[1](pass_in, batch_size, logits_arg2, dense_arg1, dense_arg2, seq_len) else: return self.funcs[0](pass_in, batch_size, logits_arg2, dense_arg1, dense_arg2, seq_len)
Example 25
Project: vergeml Author: mme File: env.py License: MIT License | 5 votes |
def _convert(self, vals): res = {} for k, v in vals.items(): if isinstance(v, (np.int, np.int8, np.int16, np.int32, np.int64)): v = int(v) elif isinstance(v, (np.float, np.float16, np.float32, np.float64)): v = float(v) elif isinstance(v, Labels): v = list(v) elif isinstance(v, np.ndarray): v = v.tolist() elif isinstance(v, dict): v = self._convert(v) res[k] = v return res
Example 26
Project: vergeml Author: mme File: env.py License: MIT License | 5 votes |
def _toscalar(v): if isinstance(v, (np.float16, np.float32, np.float64, np.uint8, np.uint16, np.uint32, np.uint64, np.int8, np.int16, np.int32, np.int64)): return np.asscalar(v) else: return v
Example 27
Project: cat-bbs Author: aleju File: bbs.py License: MIT License | 5 votes |
def draw_on_image(self, img, color=[0, 255, 0], alpha=1.0, thickness=1, copy=copy): assert img.dtype in [np.uint8, np.float32, np.int32, np.int64] result = np.copy(img) if copy else img for i in range(thickness): y = [self.y1-i, self.y1-i, self.y2+i, self.y2+i] x = [self.x1-i, self.x2+i, self.x2+i, self.x1-i] rr, cc = draw.polygon_perimeter(y, x, shape=img.shape) if alpha >= 0.99: result[rr, cc, 0] = color[0] result[rr, cc, 1] = color[1] result[rr, cc, 2] = color[2] else: if result.dtype == np.float32: result[rr, cc, 0] = (1 - alpha) * result[rr, cc, 0] + alpha * color[0] result[rr, cc, 1] = (1 - alpha) * result[rr, cc, 1] + alpha * color[1] result[rr, cc, 2] = (1 - alpha) * result[rr, cc, 2] + alpha * color[2] result = np.clip(result, 0, 255) else: result = result.astype(np.float32) result[rr, cc, 0] = (1 - alpha) * result[rr, cc, 0] + alpha * color[0] result[rr, cc, 1] = (1 - alpha) * result[rr, cc, 1] + alpha * color[1] result[rr, cc, 2] = (1 - alpha) * result[rr, cc, 2] + alpha * color[2] result = np.clip(result, 0, 255).astype(np.uint8) return result
Example 28
Project: Att-ChemdNER Author: lingluodlut File: utils.py License: Apache License 2.0 | 5 votes |
def evaluate(parameters, f_eval, raw_sentences, parsed_sentences, id_to_tag, dictionary_tags,filename, useAttend=True): #{{{ """ Evaluate current model using CoNLL script. """ n_tags = len(id_to_tag) predictions = [] count = np.zeros((n_tags, n_tags), dtype=np.int32) for raw_sentence, data in zip(raw_sentences, parsed_sentences): input = create_input(data, parameters, False,useAttend=useAttend) if parameters['crf']: y_preds = np.array(f_eval(*input)) else: y_preds = f_eval(*input).argmax(axis=1) y_reals = np.array(data['tags']).astype(np.int32) assert len(y_preds) == len(y_reals) p_tags = [id_to_tag[y_pred] for y_pred in y_preds] r_tags = [id_to_tag[y_real] for y_real in y_reals] if parameters['tag_scheme'] == 'iobes': p_tags = iobes_iob(p_tags) r_tags = iobes_iob(r_tags) for i, (y_pred, y_real) in enumerate(zip(y_preds, y_reals)): new_line = " ".join(raw_sentence[i][:-1] + [r_tags[i], p_tags[i]]) predictions.append(new_line) count[y_real, y_pred] += 1 predictions.append("") #write to file with codecs.open(filename, 'w', 'utf8') as f: f.write("\n".join(predictions)) return get_perf(filename) #}}}
Example 29
Project: Att-ChemdNER Author: lingluodlut File: model.py License: Apache License 2.0 | 5 votes |
def modelScore(self,tag_ids,scores,s_len): #{{{ """ ATTENTATION THIS FUNCTION IS SYMBOL PROGRAMMING this function is to return the score of our model at a fixed sentence label @param: scores: the scores matrix ,the output of our model tag: a numpy array, which represent one sentence label sent_lens: a scalar number, the length of sentence. because our sentence label will be expand to max sentence length, so we will use this to get the original sentence label. @return: a scalar number ,the score; """ #{{{ n_tags=self.output_dim; transitions=self.transitions; #score from tags_scores real_path_score = scores[T.arange(s_len), tag_ids].sum() # Score from transitions b_id = theano.shared(value=np.array([n_tags], dtype=np.int32)) e_id = theano.shared(value=np.array([n_tags + 1], dtype=np.int32)) padded_tags_ids = T.concatenate([b_id, tag_ids, e_id], axis=0) real_path_score += transitions[ padded_tags_ids[T.arange(s_len + 1)], padded_tags_ids[T.arange(s_len + 1) + 1] ].sum() #to prevent T.exp(real_path_score) to be inf #return real_path_score; return real_path_score/s_len; #}}} #}}}
Example 30
Project: Att-ChemdNER Author: lingluodlut File: theano_backend.py License: Apache License 2.0 | 5 votes |
def arange(start, stop=None, step=1, dtype='int32'): '''Creates a 1-D tensor containing a sequence of integers. The function arguments use the same convention as Theano's arange: if only one argument is provided, it is in fact the "stop" argument. The default type of the returned tensor is 'int32' to match TensorFlow's default. ''' return T.arange(start, stop=stop, step=step, dtype=dtype)