Python caffe.TEST Examples

The following are 30 code examples of caffe.TEST(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module caffe , or try the search function .
Example #1
Source File: caffe2pytorch.py    From BDCN with MIT License 6 votes vote down vote up
def main():
	args = parse_args()
	sys.path.append(args.caffe_root)
	import caffe
	net = caffe.Net(args.caffe_proto, args.caffe_model, caffe.TEST)
	print dir(net.layers[1].blobs[0])
	# for i, x in enumerate(net._layer_names):
	# 	print x, net.layers[i].type,
	# 	if x in net.params:
	# 		print net.params[x][0].shape
	# 	print '\n'
	model = bulid(net)
	torch.save(model.state_dict(), args.caffe_proto.split('.')[0]+'.pth')
	f = open(args.caffe_proto.split('.')[0]+'.py', 'w')
	stdout = sys.stdout
	sys.stdout = f
	print 'model = ', model
	sys.stdout = stdout
	f.close() 
Example #2
Source File: CaffeUNet_2D.py    From peters-stuff with GNU General Public License v3.0 6 votes vote down vote up
def add_batchnormscale(self, input, name):

        if True : # necessary?
            batch_norm_param={'moving_average_fraction': 0.95, 'use_global_stats': True }
            param = [dict(lr_mult=0),dict(lr_mult=0),dict(lr_mult=0)]
            l = L.BatchNorm(input, name=name+'_bn', batch_norm_param=batch_norm_param, param=param, include={'phase': caffe.TEST}, ntop=1)
            setattr(self.net_spec, name+'_bn', l)

            batch_norm_param={'moving_average_fraction': 0.95, 'use_global_stats': False }
            l = L.BatchNorm(input, name=name+'_bn', top=name+'_bn', batch_norm_param=batch_norm_param, param=param, include={'phase': caffe.TRAIN}, ntop=0)
            setattr(self.net_spec, name+'_bn' + '_train', l)

            l = L.Scale(getattr(self.net_spec, name+'_bn'), scale_param = { 'bias_term': True } )
            setattr(self.net_spec, name, l)
        else : # here without split in use_global_stats True/False
            l = L.Scale(L.BatchNorm(input), scale_param={'bias_term': True})
            setattr(self.net_spec, name, l)

        return l 
Example #3
Source File: CaffeUNet_3D.py    From peters-stuff with GNU General Public License v3.0 6 votes vote down vote up
def add_batchnormscale(self, input, name):

        if True: # necessary?
            batch_norm_param = {'moving_average_fraction': 0.95, 'use_global_stats': True}
            param = [dict(lr_mult=0), dict(lr_mult=0), dict(lr_mult=0)]
            l = L.BatchNorm(input, name=name + '_bn', batch_norm_param=batch_norm_param, param=param, include={'phase': caffe.TEST}, ntop=1)
            setattr(self.net_spec, name + '_bn', l)

            batch_norm_param = {'moving_average_fraction': 0.95, 'use_global_stats': False}
            l = L.BatchNorm(input, name=name + '_bn', top=name + '_bn', batch_norm_param=batch_norm_param, param=param, include={'phase': caffe.TRAIN}, ntop=0)
            setattr(self.net_spec, name + '_bn' + '_train', l)

            l = L.Scale(getattr(self.net_spec, name + '_bn'), scale_param={'bias_term': True})
            setattr(self.net_spec, name, l)
        else: # here without split in use_global_stats True/False
            l = L.Scale(L.BatchNorm(input), scale_param={'bias_term': True})
            setattr(self.net_spec, name, l)

        return l 
Example #4
Source File: phocnet_evaluator.py    From phocnet with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _load_pretrained_phocnet(self, phocnet_bin_path, gpu_id, debug_mode, deploy_proto_path, phoc_size):
        # create a deploy proto file
        self.logger.info('Saving PHOCNet deploy proto file to %s...', deploy_proto_path)
        mpg = ModelProtoGenerator(initialization='msra', use_cudnn_engine=gpu_id is not None)
        proto = mpg.get_phocnet(word_image_lmdb_path=None, phoc_lmdb_path=None, phoc_size=phoc_size, generate_deploy=True)
        with open(deploy_proto_path, 'w') as proto_file:
            proto_file.write(str(proto))
            
        # create the Caffe PHOCNet object
        self.logger.info('Creating PHOCNet...')
        if debug_mode:
            phocnet = caffe.Net(deploy_proto_path, phocnet_bin_path, caffe.TEST)
        else:
            with Suppressor():
                phocnet = caffe.Net(deploy_proto_path, phocnet_bin_path, caffe.TEST)
        return phocnet 
Example #5
Source File: test.py    From PytorchConverter with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def TestCaffe(proto_path, model_path, inputs, LayerCheck, ModelInd):
    net = caffe.Net(proto_path, model_path, caffe.TEST)
    net.blobs['data'].data[...] = inputs
    print('input blob:')
    print(net.blobs['data'].data[...])

    net.forward()

    if LayerCheck == 'Softmax_1':
        PrintLabel(net.blobs[LayerCheck].data[0].flatten())
    else:
        print(net.blobs[LayerCheck].data[0][...].flatten())
        if (ModelInd == 17):
            result_img = net.blobs[LayerCheck].data[0] * 255
            result_img = result_img.astype(int)
            result_img = np.transpose(result_img, (1, 2, 0))
            result_img = result_img[..., ::-1]
            cv2.imwrite("AnimeNet_result.png", result_img)
        if (ModelInd == 91):
            result_img = net.blobs[LayerCheck].data[0] * 255
            result_img = result_img.astype(int)
            result_img = np.transpose(result_img, (1, 2, 0))
            result_img = result_img[..., ::-1]
            cv2.imwrite("Upsample_result.png", result_img) 
Example #6
Source File: predict.py    From iLID with MIT License 6 votes vote down vote up
def predict(sound_file, prototxt, model, output_path):

  image_files = wav_to_images(sound_file, output_path)

  caffe.set_mode_cpu()
  net = caffe.Classifier(prototxt, model,
                         #image_dims=(224, 224)
                         #channel_swap=(2,1,0),
                         raw_scale=255 # convert 0..255 values into range 0..1
                         #caffe.TEST
                        )

  input_images = np.array([caffe.io.load_image(image_file, color=False) for image_file in image_files["melfilter"]])
  #input_images = np.swapaxes(input_images, 1, 3)

  #prediction = net.forward_all(data=input_images)["prob"]

  prediction = net.predict(input_images, False)  # predict takes any number of images, and formats them for the Caffe net automatically

  print prediction
  print 'prediction shape:', prediction[0].shape
  print 'predicted class:', prediction[0].argmax()
  print image_files

  return prediction 
Example #7
Source File: extractor.py    From MMdnn with MIT License 6 votes vote down vote up
def inference(cls, architecture_name, architecture, path, image_path):
        if cls.sanity_check(architecture_name):
            import caffe
            import numpy as np
            net = caffe.Net(architecture[0], architecture[1], caffe.TEST)

            func = TestKit.preprocess_func['caffe'][architecture_name]
            img = func(image_path)
            img = np.transpose(img, (2, 0, 1))
            img = np.expand_dims(img, 0)
            net.blobs['data'].data[...] = img
            predict = np.squeeze(net.forward()[net._output_list[-1]][0])
            predict = np.squeeze(predict)
            return predict

        else:
            return None 
Example #8
Source File: model.py    From facade-segmentation with MIT License 6 votes vote down vote up
def net(weights=WEIGHTS):
    """
    Get the caffe net that has been trained to segment facade features.

    This initializes or re-initializes the global network with weights. There are certainly side-effects!

    The weights default to a caffe model that is part of the same sourcecode repository as this file.
    They can be changed by setting the I12_WEIGHTS environment variable, by passing a command line argument
    to some programs, or programatically (of course).

    :param weights: The weights to use for the net.
    :return:
    """
    global WEIGHTS
    global _net
    if _net is None or weights != WEIGHTS:
        if weights is not None:
            WEIGHTS = weights
        _net = caffe.Net(LAYERS, WEIGHTS, caffe.TEST)
    return _net 
Example #9
Source File: ssd_net.py    From Hand-Keypoint-Detection with GNU General Public License v3.0 6 votes vote down vote up
def __init__(self, model_weights, model_def, threshold=0.5, GPU_MODE=False):
        if GPU_MODE:
            caffe.set_device(0)
            caffe.set_mode_gpu()
        else:
            caffe.set_mode_cpu()
        self.net = caffe.Net(model_def,  # defines the structure of the model
                        model_weights,  # contains the trained weights
                        caffe.TEST)  # use test mode (e.g., don't perform dropout)
        self.threshold = threshold
        self.transformer = caffe.io.Transformer({'data': self.net.blobs['data'].data.shape})
        self.transformer.set_transpose('data', (2, 0, 1))
        self.transformer.set_mean('data', np.array([127.0, 127.0, 127.0]))  # mean pixel
        self.transformer.set_raw_scale('data',
                                  255)  # the reference model operates on images in [0,255] range instead of [0,1]
        self.transformer.set_channel_swap('data', (2, 1, 0))  # the reference model has channels in BGR order instead of RGB
        image_resize = 300
        self.net.blobs['data'].reshape(1, 3, image_resize, image_resize) 
Example #10
Source File: predict.py    From cloudless with Apache License 2.0 6 votes vote down vote up
def _initialize_caffe(deploy_file, input_weight_file, training_mean_pickle, inference_width,
            inference_height):
    """
    Initializes Caffe to prepare to run some data through the model for inference.
    """
    caffe.set_mode_gpu()
    net = caffe.Net(deploy_file, input_weight_file, caffe.TEST)

    # input preprocessing: 'data' is the name of the input blob == net.inputs[0]
    transformer = caffe.io.Transformer({"data": net.blobs["data"].data.shape})
    # PIL.Image loads the data with the channel last.
    transformer.set_transpose("data", (2, 0, 1))
    # Mean pixel.
    transformer.set_mean("data", np.load(training_mean_pickle).mean(1).mean(1))
    # The reference model operates on images in [0, 255] range instead of [0, 1].
    transformer.set_raw_scale("data", 255)
    # The reference model has channels in BGR order instead of RGB.
    transformer.set_channel_swap("data", (2, 1, 0))

    net.blobs["data"].reshape(1, 3, inference_height, inference_width)

    return (net, transformer) 
Example #11
Source File: colorize_image.py    From interactive-deep-colorization with MIT License 6 votes vote down vote up
def prep_net(self, gpu_id, prototxt_path='', caffemodel_path=''):
        import caffe
        print('gpu_id = %d, net_path = %s, model_path = %s' % (gpu_id, prototxt_path, caffemodel_path))
        if gpu_id == -1:
            caffe.set_mode_cpu()
        else:
            caffe.set_device(gpu_id)
            caffe.set_mode_gpu()
        self.gpu_id = gpu_id
        self.net = caffe.Net(prototxt_path, caffemodel_path, caffe.TEST)
        self.net_set = True

        # automatically set cluster centers
        if len(self.net.params[self.pred_ab_layer][0].data[...].shape) == 4 and self.net.params[self.pred_ab_layer][0].data[...].shape[1] == 313:
            print('Setting ab cluster centers in layer: %s' % self.pred_ab_layer)
            self.net.params[self.pred_ab_layer][0].data[:, :, 0, 0] = self.pts_in_hull.T

        # automatically set upsampling kernel
        for layer in self.net._layer_names:
            if layer[-3:] == '_us':
                print('Setting upsampling layer kernel: %s' % layer)
                self.net.params[layer][0].data[:, 0, :, :] = np.array(((.25, .5, .25, 0), (.5, 1., .5, 0), (.25, .5, .25, 0), (0, 0, 0, 0)))[np.newaxis, :, :]

    # ***** Call forward ***** 
Example #12
Source File: testify_pytorch_to_caffe_example.py    From nn_tools with MIT License 6 votes vote down vote up
def test(net_caffe,net_torch,data_np,data_torch,args):
    blobs_caffe, rsts_caffe = forward_caffe(net_caffe, data_np)
    blobs_torch, rsts_torchs = forward_torch(net_torch, data_torch)
    # test the output of every layer
    for layer, value in blobs_caffe.items():
        if layer in blobs_torch:
            value_torch = blobs_torch[layer]
            value = value[0]
            if value.size!=value_torch.size:continue
            if 'relu' in layer: continue
            try:
                np.testing.assert_almost_equal(value, value_torch, decimal=args.decimal)
                print("TEST layer {}: PASS".format(layer))
            except:
                print("TEST layer {}: FAIL".format(layer))
                # np.testing.assert_almost_equal(np.clip(value, min=0), np.clip(value_torch, min=0))
    # test the output
    print("TEST output")
    for rst_caffe,rst_torch in zip(rsts_caffe,rsts_torchs):
        np.testing.assert_almost_equal(rst_caffe, rst_torch, decimal=args.decimal)
    print("TEST output: PASS") 
Example #13
Source File: action_caffe.py    From temporal-segment-networks with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def __init__(self, net_proto, net_weights, device_id, input_size=None):
        caffe.set_mode_gpu()
        caffe.set_device(device_id)
        self._net = caffe.Net(net_proto, net_weights, caffe.TEST)

        input_shape = self._net.blobs['data'].data.shape

        if input_size is not None:
            input_shape = input_shape[:2] + input_size

        transformer = caffe.io.Transformer({'data': input_shape})

        if self._net.blobs['data'].data.shape[1] == 3:
            transformer.set_transpose('data', (2, 0, 1))  # move image channels to outermost dimension
            transformer.set_mean('data', np.array([104, 117, 123]))  # subtract the dataset-mean value in each channel
        else:
            pass # non RGB data need not use transformer

        self._transformer = transformer

        self._sample_shape = self._net.blobs['data'].data.shape 
Example #14
Source File: loadcaffe.py    From dataflow with Apache License 2.0 6 votes vote down vote up
def load_caffe(model_desc, model_file):
    """
    Load a caffe model. You must be able to ``import caffe`` to use this
    function.

    Args:
        model_desc (str): path to caffe model description file (.prototxt).
        model_file (str): path to caffe model parameter file (.caffemodel).
    Returns:
        dict: the parameters.
    """
    with change_env('GLOG_minloglevel', '2'):
        import caffe
        caffe.set_mode_cpu()
        net = caffe.Net(model_desc, model_file, caffe.TEST)
    param_dict = CaffeLayerProcessor(net).process()
    logger.info("Model loaded from caffe. Params: " +
                ", ".join(sorted(param_dict.keys())))
    return param_dict 
Example #15
Source File: rtpose_shufflenetV2.py    From EverybodyDanceNow_reproduce_pytorch with MIT License 5 votes vote down vote up
def convert_to_caffe(self, name):
        caffe_net = caffe.NetSpec()
        layer = L.Input(shape=dict(dim=[1, 3, args.image_hw, args.image_hw]))
        caffe_net.tops['data'] = layer
        slim.generate_caffe_prototxt(self, caffe_net, layer)
        print(caffe_net.to_proto())
        with open(name + '.prototxt', 'wb') as f:
            f.write(str(caffe_net.to_proto()).encode())
        caffe_net = caffe.Net(name + '.prototxt', caffe.TEST)
        slim.convert_pytorch_to_caffe(self, caffe_net)
        caffe_net.save(name + '.caffemodel') 
Example #16
Source File: conversion_imagenet.py    From MMdnn with MIT License 5 votes vote down vote up
def caffe_parse(architecture_name, test_input_path):
        from mmdnn.conversion.examples.caffe.extractor import caffe_extractor

        # download model
        architecture_file, weight_file = caffe_extractor.download(architecture_name, TestModels.cachedir)

        # get original model prediction result
        original_predict = caffe_extractor.inference(architecture_name, (architecture_file, weight_file), TestModels.cachedir, test_input_path)
        del caffe_extractor

        # original to IR
        from mmdnn.conversion.caffe.transformer import CaffeTransformer
        transformer = CaffeTransformer(architecture_file, weight_file, "tensorflow", None, phase = 'TEST')
        graph = transformer.transform_graph()
        data = transformer.transform_data()
        del CaffeTransformer

        from mmdnn.conversion.caffe.writer import ModelSaver, PyWriter

        prototxt = graph.as_graph_def().SerializeToString()
        IR_file = TestModels.tmpdir + 'caffe_' + architecture_name + "_converted"
        pb_path = IR_file + '.pb'
        with open(pb_path, 'wb') as of:
            of.write(prototxt)
        print ("IR network structure is saved as [{}].".format(pb_path))

        import numpy as np
        npy_path = IR_file + '.npy'
        with open(npy_path, 'wb') as of:
            np.save(of, data)
        print ("IR weights are saved as [{}].".format(npy_path))

        if original_predict.ndim == 3:
            original_predict = np.transpose(original_predict, (1, 2, 0))

        return original_predict 
Example #17
Source File: model_caffe.py    From intermediate-cnn-features with Apache License 2.0 5 votes vote down vote up
def __init__(self, name, prototxt, caffemodel):
        """
          Class initializer.

          Args:
            name: name of the CNN network
            prototxt: path to prototxt file of the pre-trained CNN model
            caffemodel: path to caffemodel file of the pre-trained CNN model

          Raise:
            ValueError: if provided network name is not provided
        """
        self.net_name = name

        # intermediate convolutional layers to extract features
        if name == 'googlenet':
            self.desired_size = 224
            self.layers = ['inception_3a/output', 'inception_3b/output',
                      'inception_4a/output', 'inception_4b/output',
                      'inception_4c/output', 'inception_4d/output',
                      'inception_4e/output', 'inception_5a/output',
                      'inception_5b/output']
        elif name == 'resnet':
            self.desired_size = 224
            self.layers = ['res2c', 'res3b7', 'res4b35', 'res5c']
        elif name == 'vgg':
            self.desired_size = 224
            self.layers = ['conv2_1', 'conv2_2', 'conv3_1',
                           'conv3_2', 'conv3_3', 'conv4_1',
                           'conv4_2', 'conv4_3', 'conv5_1',
                           'conv5_2', 'conv5_3']
        else:
            raise ValueError('Network not found. Supported networks for Caffe framework: googlenet, vgg, resnet')

        # load network
        self.net = caffe.Net(prototxt, caffemodel, caffe.TEST)
        self.final_sz = np.sum(
            [self.net.blobs[layer].data.shape[1] for layer in self.layers]) 
Example #18
Source File: slim.py    From EverybodyDanceNow_reproduce_pytorch with MIT License 5 votes vote down vote up
def convert_to_caffe(self, name):
            caffe_net = caffe.NetSpec()
            layer = L.Input(shape=dict(dim=[1, 3, 224, 224]))
            caffe_net.tops['data'] = layer
            generate_caffe_prototxt(self, caffe_net, layer)
            print(caffe_net.to_proto())
            with open(name + '.prototxt', 'wb') as f:
                f.write(str(caffe_net.to_proto()))
            caffe_net = caffe.Net(name + '.prototxt', caffe.TEST)
            convert_pytorch_to_caffe(self, caffe_net)
            caffe_net.save(name + '.caffemodel') 
Example #19
Source File: __init__.py    From saliency-2016-cvpr with MIT License 5 votes vote down vote up
def _create_net(specfile, modelfile):
    if not PYCAFFE_DIR in sys.path:
        sys.path.insert(0, PYCAFFE_DIR)
    import caffe
    return caffe.Net(specfile, modelfile, caffe.TEST) 
Example #20
Source File: vgg16d.py    From SSENet-pytorch with MIT License 5 votes vote down vote up
def convert_caffe_to_torch(caffemodel_path, prototxt_path='network/vgg16_20M.prototxt'):
    import caffe

    caffe_model = caffe.Net(prototxt_path, caffemodel_path, caffe.TEST)

    dict = {}
    for caffe_name in list(caffe_model.params.keys()):
        dict[caffe_name + '.weight'] = torch.from_numpy(caffe_model.params[caffe_name][0].data)
        dict[caffe_name + '.bias'] = torch.from_numpy(caffe_model.params[caffe_name][1].data)

    return dict 
Example #21
Source File: caffemodel2npy.py    From TF-deeplab with Apache License 2.0 5 votes vote down vote up
def load_caffe(model_desc, model_file):
    """
    return a dict of params
    """
    import caffe
    caffe.set_mode_cpu()
    net = caffe.Net(model_desc, model_file, caffe.TEST)
    param_dict = CaffeLayerProcessor(net).process()
    return param_dict 
Example #22
Source File: test.py    From ccnn with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, prototxt, caffemodel, n_scales):       
        # Load a precomputed caffe model
        self.net = caffe.Net(prototxt, caffemodel, caffe.TEST)
        
        # input preprocessing: 'data' is the name of the input blob == net.inputs[0]
        self.transformer = caffe.io.Transformer({'data': self.net.blobs['data_s0'].data.shape})
        self.transformer.set_transpose('data', (2, 0, 1)) # It's already RGB
        # Reshape net for the single input
        b_shape = self.net.blobs['data_s0'].data.shape
        self._n_scales = n_scales
        for s in range(n_scales):
            scale_name = 'data_s{}'.format(s)
            self.net.blobs[scale_name].reshape(b_shape[0],b_shape[1],b_shape[2],b_shape[3])

    # Probably it is not the eficient way to do it... 
Example #23
Source File: sampling_caption.py    From ppgn with MIT License 5 votes vote down vote up
def __init__ (self, lstm_definition, lstm_weights):

        self.lstm = caffe.Net(lstm_definition, lstm_weights, caffe.TEST) 
Example #24
Source File: sampling_class.py    From ppgn with MIT License 5 votes vote down vote up
def get_code(encoder, path, layer, mask=None):
    '''
    Push the given image through an encoder (here, AlexNet) to get a code.
    '''

    # set up the inputs for the net: 
    image_size = encoder.blobs['data'].shape[2:]    # (1, 3, 227, 227)
    images = np.zeros_like(encoder.blobs["data"].data, dtype='float32')

    in_image = scipy.misc.imread(path)
    in_image = scipy.misc.imresize(in_image, (image_size[0], image_size[1]))
    images[0] = np.transpose(in_image, (2, 0, 1))   # convert to (3, 227, 227) format

    data = images[:,::-1]   # convert from RGB to BGR

    # subtract the ImageNet mean
    image_mean = scipy.io.loadmat('misc/ilsvrc_2012_mean.mat')['image_mean'] # (256, 256, 3)
    topleft = util.compute_topleft(image_size, image_mean.shape[:2])
    image_mean = image_mean[topleft[0]:topleft[0]+image_size[0], topleft[1]:topleft[1]+image_size[1]]   # crop the image mean
    data -= np.expand_dims(np.transpose(image_mean, (2,0,1)), 0)    # mean is already BGR

    if mask is not None:
        data *= mask

    # initialize the encoder
    encoder = caffe.Net(settings.encoder_definition, settings.encoder_weights, caffe.TEST)

    # extract the features
    encoder.forward(data=data)
    features = encoder.blobs[layer].data.copy()

    return features, data 
Example #25
Source File: test_caffe.py    From rl-attack-detection with MIT License 5 votes vote down vote up
def __init__(self, mean, weight, K, num_act, num_step=1, data_path='test'):
        self.K = K
        self.num_act = num_act
        self.num_step = num_step

        caffe.set_mode_gpu()
        caffe.set_device(0)

        test_net_file, net_proto = N.create_netfile(1, data_path, mean, K, K,
            1, num_act, num_step=self.num_step, mode='test')

        self.test_net = caffe.Net(test_net_file, caffe.TEST)
        self.test_net.copy_from(weight) 
Example #26
Source File: feature_extractor.py    From fk-visual-search with Apache License 2.0 5 votes vote down vote up
def __init__(self, path_to_deploy_file, path_to_model_file, input_layer_name="data_q", gpu_mode=True, device_id=1,
                 height=None, width=None):
        self.path_to_deploy_file = path_to_deploy_file
        self.path_to_model_file = path_to_model_file
        if gpu_mode:
            caffe.set_mode_gpu()
            caffe.set_device(device_id)
        else:
            caffe.set_mode_cpu()
        self.net = caffe.Net(path_to_deploy_file, path_to_model_file, caffe.TEST)
        self.input_layer_name = input_layer_name
        self.height = height or self.net.blobs[self.input_layer_name].data.shape[2]
        self.width = width or self.net.blobs[self.input_layer_name].data.shape[3] 
Example #27
Source File: train_faster_rcnn_alt_opt.py    From faster-rcnn-resnet with MIT License 5 votes vote down vote up
def rpn_generate(queue=None, imdb_name=None, rpn_model_path=None, cfg=None,
                 rpn_test_prototxt=None):
    """Use a trained RPN to generate proposals.
    """

    cfg.TEST.RPN_PRE_NMS_TOP_N = -1     # no pre NMS filtering
    cfg.TEST.RPN_POST_NMS_TOP_N = 2000  # limit top boxes after NMS
    print 'RPN model: {}'.format(rpn_model_path)
    print('Using config:')
    pprint.pprint(cfg)

    import caffe
    _init_caffe(cfg)

    # NOTE: the matlab implementation computes proposals on flipped images, too.
    # We compute them on the image once and then flip the already computed
    # proposals. This might cause a minor loss in mAP (less proposal jittering).
    imdb = get_imdb(imdb_name)
    print 'Loaded dataset `{:s}` for proposal generation'.format(imdb.name)

    # Load RPN and configure output directory
    rpn_net = caffe.Net(rpn_test_prototxt, rpn_model_path, caffe.TEST)
    output_dir = get_output_dir(imdb)
    print 'Output will be saved to `{:s}`'.format(output_dir)
    # Generate proposals on the imdb
    rpn_proposals = imdb_proposals(rpn_net, imdb)
    # Write proposals to disk and send the proposal file path through the
    # multiprocessing queue
    rpn_net_name = os.path.splitext(os.path.basename(rpn_model_path))[0]
    rpn_proposals_path = os.path.join(
        output_dir, rpn_net_name + '_proposals.pkl')
    with open(rpn_proposals_path, 'wb') as f:
        cPickle.dump(rpn_proposals, f, cPickle.HIGHEST_PROTOCOL)
    print 'Wrote RPN proposals to {}'.format(rpn_proposals_path)
    queue.put({'proposal_path': rpn_proposals_path}) 
Example #28
Source File: eval.py    From PeleeNet with Apache License 2.0 5 votes vote down vote up
def create_model(num_classes, engine='torch'):

    if engine == 'torch':
        if args.arch == 'peleenet':
                model = PeleeNet(num_classes=num_classes)
        else:
                print("=> unsupported model '{}'. creating PeleeNet by default.".format(args.arch))
                model = PeleeNet(num_classes=num_classes)

        # print(model)

        model = torch.nn.DataParallel(model).cuda()

        if args.weights:
            if os.path.isfile(args.weights):
                print("=> loading checkpoint '{}'".format(args.weights))
                checkpoint = torch.load(args.weights)
                model.load_state_dict(checkpoint['state_dict'])

            else:
                print("=> no checkpoint found at '{}'".format(args.weights))



        cudnn.benchmark = True

    else:
        # create caffe model
        import caffe 
        caffe.set_mode_gpu()
        caffe.set_device(0)

        model_def = args.deploy
        model_weights = args.weights 

        model = caffe.Net(model_def,      # defines the structure of the model
                        model_weights,  # contains the trained weights
                        caffe.TEST)     # use test mode (e.g., don't perform dropout)

    return model 
Example #29
Source File: caffe2pkl.py    From VNect with Apache License 2.0 5 votes vote down vote up
def load_net(proto_path, weights_path):
    """Load caffe model"""
    caffe.set_mode_cpu()
    return caffe.Net(proto_path, weights_path, caffe.TEST) 
Example #30
Source File: model.py    From facade-segmentation with MIT License 5 votes vote down vote up
def net():
    """Delay loading the net until the last possible moment.

    Loading the net is SLOW and produces a ton of terminal garbage.
    Also we want to wait to load it until we have called some other
    caffe initializations code (caffe.set_mode_gpu(), caffe.set_device(0), etc)

    """
    global __net
    if __net is None:
        __net = caffe.Net(LAYERS, WEIGHTS, caffe.TEST)
    return __net