Python caffe.Net() Examples

The following are 30 code examples of caffe.Net(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module caffe , or try the search function .
Example #1
Source File: ssd_net.py    From Hand-Keypoint-Detection with GNU General Public License v3.0 6 votes vote down vote up
def __init__(self, model_weights, model_def, threshold=0.5, GPU_MODE=False):
        if GPU_MODE:
            caffe.set_device(0)
            caffe.set_mode_gpu()
        else:
            caffe.set_mode_cpu()
        self.net = caffe.Net(model_def,  # defines the structure of the model
                        model_weights,  # contains the trained weights
                        caffe.TEST)  # use test mode (e.g., don't perform dropout)
        self.threshold = threshold
        self.transformer = caffe.io.Transformer({'data': self.net.blobs['data'].data.shape})
        self.transformer.set_transpose('data', (2, 0, 1))
        self.transformer.set_mean('data', np.array([127.0, 127.0, 127.0]))  # mean pixel
        self.transformer.set_raw_scale('data',
                                  255)  # the reference model operates on images in [0,255] range instead of [0,1]
        self.transformer.set_channel_swap('data', (2, 1, 0))  # the reference model has channels in BGR order instead of RGB
        image_resize = 300
        self.net.blobs['data'].reshape(1, 3, image_resize, image_resize) 
Example #2
Source File: model.py    From facade-segmentation with MIT License 6 votes vote down vote up
def net(weights=WEIGHTS):
    """
    Get the caffe net that has been trained to segment facade features.

    This initializes or re-initializes the global network with weights. There are certainly side-effects!

    The weights default to a caffe model that is part of the same sourcecode repository as this file.
    They can be changed by setting the I12_WEIGHTS environment variable, by passing a command line argument
    to some programs, or programatically (of course).

    :param weights: The weights to use for the net.
    :return:
    """
    global WEIGHTS
    global _net
    if _net is None or weights != WEIGHTS:
        if weights is not None:
            WEIGHTS = weights
        _net = caffe.Net(LAYERS, WEIGHTS, caffe.TEST)
    return _net 
Example #3
Source File: caffe2pytorch.py    From BDCN with MIT License 6 votes vote down vote up
def main():
	args = parse_args()
	sys.path.append(args.caffe_root)
	import caffe
	net = caffe.Net(args.caffe_proto, args.caffe_model, caffe.TEST)
	print dir(net.layers[1].blobs[0])
	# for i, x in enumerate(net._layer_names):
	# 	print x, net.layers[i].type,
	# 	if x in net.params:
	# 		print net.params[x][0].shape
	# 	print '\n'
	model = bulid(net)
	torch.save(model.state_dict(), args.caffe_proto.split('.')[0]+'.pth')
	f = open(args.caffe_proto.split('.')[0]+'.py', 'w')
	stdout = sys.stdout
	sys.stdout = f
	print 'model = ', model
	sys.stdout = stdout
	f.close() 
Example #4
Source File: predict.py    From cloudless with Apache License 2.0 6 votes vote down vote up
def _initialize_caffe(deploy_file, input_weight_file, training_mean_pickle, inference_width,
            inference_height):
    """
    Initializes Caffe to prepare to run some data through the model for inference.
    """
    caffe.set_mode_gpu()
    net = caffe.Net(deploy_file, input_weight_file, caffe.TEST)

    # input preprocessing: 'data' is the name of the input blob == net.inputs[0]
    transformer = caffe.io.Transformer({"data": net.blobs["data"].data.shape})
    # PIL.Image loads the data with the channel last.
    transformer.set_transpose("data", (2, 0, 1))
    # Mean pixel.
    transformer.set_mean("data", np.load(training_mean_pickle).mean(1).mean(1))
    # The reference model operates on images in [0, 255] range instead of [0, 1].
    transformer.set_raw_scale("data", 255)
    # The reference model has channels in BGR order instead of RGB.
    transformer.set_channel_swap("data", (2, 1, 0))

    net.blobs["data"].reshape(1, 3, inference_height, inference_width)

    return (net, transformer) 
Example #5
Source File: test.py    From PytorchConverter with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def TestCaffe(proto_path, model_path, inputs, LayerCheck, ModelInd):
    net = caffe.Net(proto_path, model_path, caffe.TEST)
    net.blobs['data'].data[...] = inputs
    print('input blob:')
    print(net.blobs['data'].data[...])

    net.forward()

    if LayerCheck == 'Softmax_1':
        PrintLabel(net.blobs[LayerCheck].data[0].flatten())
    else:
        print(net.blobs[LayerCheck].data[0][...].flatten())
        if (ModelInd == 17):
            result_img = net.blobs[LayerCheck].data[0] * 255
            result_img = result_img.astype(int)
            result_img = np.transpose(result_img, (1, 2, 0))
            result_img = result_img[..., ::-1]
            cv2.imwrite("AnimeNet_result.png", result_img)
        if (ModelInd == 91):
            result_img = net.blobs[LayerCheck].data[0] * 255
            result_img = result_img.astype(int)
            result_img = np.transpose(result_img, (1, 2, 0))
            result_img = result_img[..., ::-1]
            cv2.imwrite("Upsample_result.png", result_img) 
Example #6
Source File: colorize_image.py    From interactive-deep-colorization with MIT License 6 votes vote down vote up
def prep_net(self, gpu_id, prototxt_path='', caffemodel_path=''):
        import caffe
        print('gpu_id = %d, net_path = %s, model_path = %s' % (gpu_id, prototxt_path, caffemodel_path))
        if gpu_id == -1:
            caffe.set_mode_cpu()
        else:
            caffe.set_device(gpu_id)
            caffe.set_mode_gpu()
        self.gpu_id = gpu_id
        self.net = caffe.Net(prototxt_path, caffemodel_path, caffe.TEST)
        self.net_set = True

        # automatically set cluster centers
        if len(self.net.params[self.pred_ab_layer][0].data[...].shape) == 4 and self.net.params[self.pred_ab_layer][0].data[...].shape[1] == 313:
            print('Setting ab cluster centers in layer: %s' % self.pred_ab_layer)
            self.net.params[self.pred_ab_layer][0].data[:, :, 0, 0] = self.pts_in_hull.T

        # automatically set upsampling kernel
        for layer in self.net._layer_names:
            if layer[-3:] == '_us':
                print('Setting upsampling layer kernel: %s' % layer)
                self.net.params[layer][0].data[:, 0, :, :] = np.array(((.25, .5, .25, 0), (.5, 1., .5, 0), (.25, .5, .25, 0), (0, 0, 0, 0)))[np.newaxis, :, :]

    # ***** Call forward ***** 
Example #7
Source File: colorize_image.py    From interactive-deep-colorization with MIT License 6 votes vote down vote up
def __init__(self, Xd=256):
        print('ColorizeImageCaffe instantiated')
        ColorizeImageBase.__init__(self, Xd)
        self.l_norm = 1.
        self.ab_norm = 1.
        self.l_mean = 50.
        self.ab_mean = 0.
        self.mask_mult = 110.

        self.pred_ab_layer = 'pred_ab'  # predicted ab layer

        # Load grid properties
        self.pts_in_hull_path = './data/color_bins/pts_in_hull.npy'
        self.pts_in_hull = np.load(self.pts_in_hull_path)  # 313x2, in-gamut

    # ***** Net preparation ***** 
Example #8
Source File: extractor.py    From MMdnn with MIT License 6 votes vote down vote up
def inference(cls, architecture_name, architecture, path, image_path):
        if cls.sanity_check(architecture_name):
            import caffe
            import numpy as np
            net = caffe.Net(architecture[0], architecture[1], caffe.TEST)

            func = TestKit.preprocess_func['caffe'][architecture_name]
            img = func(image_path)
            img = np.transpose(img, (2, 0, 1))
            img = np.expand_dims(img, 0)
            net.blobs['data'].data[...] = img
            predict = np.squeeze(net.forward()[net._output_list[-1]][0])
            predict = np.squeeze(predict)
            return predict

        else:
            return None 
Example #9
Source File: phocnet_evaluator.py    From phocnet with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _load_pretrained_phocnet(self, phocnet_bin_path, gpu_id, debug_mode, deploy_proto_path, phoc_size):
        # create a deploy proto file
        self.logger.info('Saving PHOCNet deploy proto file to %s...', deploy_proto_path)
        mpg = ModelProtoGenerator(initialization='msra', use_cudnn_engine=gpu_id is not None)
        proto = mpg.get_phocnet(word_image_lmdb_path=None, phoc_lmdb_path=None, phoc_size=phoc_size, generate_deploy=True)
        with open(deploy_proto_path, 'w') as proto_file:
            proto_file.write(str(proto))
            
        # create the Caffe PHOCNet object
        self.logger.info('Creating PHOCNet...')
        if debug_mode:
            phocnet = caffe.Net(deploy_proto_path, phocnet_bin_path, caffe.TEST)
        else:
            with Suppressor():
                phocnet = caffe.Net(deploy_proto_path, phocnet_bin_path, caffe.TEST)
        return phocnet 
Example #10
Source File: loadcaffe.py    From dataflow with Apache License 2.0 6 votes vote down vote up
def load_caffe(model_desc, model_file):
    """
    Load a caffe model. You must be able to ``import caffe`` to use this
    function.

    Args:
        model_desc (str): path to caffe model description file (.prototxt).
        model_file (str): path to caffe model parameter file (.caffemodel).
    Returns:
        dict: the parameters.
    """
    with change_env('GLOG_minloglevel', '2'):
        import caffe
        caffe.set_mode_cpu()
        net = caffe.Net(model_desc, model_file, caffe.TEST)
    param_dict = CaffeLayerProcessor(net).process()
    logger.info("Model loaded from caffe. Params: " +
                ", ".join(sorted(param_dict.keys())))
    return param_dict 
Example #11
Source File: test.py    From ccnn with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, prototxt, caffemodel, n_scales):       
        # Load a precomputed caffe model
        self.net = caffe.Net(prototxt, caffemodel, caffe.TEST)
        
        # input preprocessing: 'data' is the name of the input blob == net.inputs[0]
        self.transformer = caffe.io.Transformer({'data': self.net.blobs['data_s0'].data.shape})
        self.transformer.set_transpose('data', (2, 0, 1)) # It's already RGB
        # Reshape net for the single input
        b_shape = self.net.blobs['data_s0'].data.shape
        self._n_scales = n_scales
        for s in range(n_scales):
            scale_name = 'data_s{}'.format(s)
            self.net.blobs[scale_name].reshape(b_shape[0],b_shape[1],b_shape[2],b_shape[3])

    # Probably it is not the eficient way to do it... 
Example #12
Source File: caffe_to_chainermodel.py    From fcn with MIT License 5 votes vote down vote up
def caffe_to_chainermodel(model, caffe_prototxt, caffemodel_path,
                          chainermodel_path):
    os.chdir(osp.dirname(caffe_prototxt))
    net = caffe.Net(caffe_prototxt, caffemodel_path, caffe.TEST)

    for name, param in net.params.iteritems():
        try:
            layer = getattr(model, name)
        except AttributeError:
            print('Skipping caffe layer: %s' % name)
            continue

        has_bias = True
        if len(param) == 1:
            has_bias = False

        print('{0}:'.format(name))
        # weight
        print('  - W: %s %s' % (param[0].data.shape, layer.W.data.shape))
        assert param[0].data.shape == layer.W.data.shape
        layer.W.data = param[0].data
        # bias
        if has_bias:
            print('  - b: %s %s' % (param[1].data.shape, layer.b.data.shape))
            assert param[1].data.shape == layer.b.data.shape
            layer.b.data = param[1].data
    S.save_npz(chainermodel_path, model) 
Example #13
Source File: vgg16d.py    From SSENet-pytorch with MIT License 5 votes vote down vote up
def convert_caffe_to_torch(caffemodel_path, prototxt_path='network/vgg16_20M.prototxt'):
    import caffe

    caffe_model = caffe.Net(prototxt_path, caffemodel_path, caffe.TEST)

    dict = {}
    for caffe_name in list(caffe_model.params.keys()):
        dict[caffe_name + '.weight'] = torch.from_numpy(caffe_model.params[caffe_name][0].data)
        dict[caffe_name + '.bias'] = torch.from_numpy(caffe_model.params[caffe_name][1].data)

    return dict 
Example #14
Source File: prediction_dual_path.py    From FaceAttribute-FAN with Apache License 2.0 5 votes vote down vote up
def load_model(gpu, model_path, prototxt_path, mean_file):
    if not os.path.isfile(model_path):
        raise IOError('%s model not found.\n' % model_path)
    caffe.set_mode_gpu()
    caffe.set_device(int(gpu))
    net = caffe.Net(prototxt_path, model_path, caffe.TEST)
    proto_data = open(mean_file, "rb").read()
    a = caffe.io.caffe_pb2.BlobProto.FromString(proto_data)
    mean = caffe.io.blobproto_to_array(a)[0]
    print('Loaded network {:s}'.format(model_path))
    return net, mean 
Example #15
Source File: verify_deploy.py    From PytorchToCaffe with MIT License 5 votes vote down vote up
def forward_caffe(protofile, weightfile, image):
    if args.cuda:
        caffe.set_device(0)
        caffe.set_mode_gpu()
    else:
        caffe.set_mode_cpu()
    net = caffe.Net(protofile, weightfile, caffe.TEST)
    net.blobs['blob1'].reshape(1, 3, args.height, args.width)
    net.blobs['blob1'].data[...] = image
    t0 = time.time()
    output = net.forward()
    t1 = time.time()
    return t1-t0, net.blobs, net.params 
Example #16
Source File: rtpose_shufflenetV2.py    From EverybodyDanceNow_reproduce_pytorch with MIT License 5 votes vote down vote up
def convert_to_caffe(self, name):
        caffe_net = caffe.NetSpec()
        layer = L.Input(shape=dict(dim=[1, 3, args.image_hw, args.image_hw]))
        caffe_net.tops['data'] = layer
        slim.generate_caffe_prototxt(self, caffe_net, layer)
        print(caffe_net.to_proto())
        with open(name + '.prototxt', 'wb') as f:
            f.write(str(caffe_net.to_proto()).encode())
        caffe_net = caffe.Net(name + '.prototxt', caffe.TEST)
        slim.convert_pytorch_to_caffe(self, caffe_net)
        caffe_net.save(name + '.caffemodel') 
Example #17
Source File: __init__.py    From saliency-2016-cvpr with MIT License 5 votes vote down vote up
def _create_net(specfile, modelfile):
    if not PYCAFFE_DIR in sys.path:
        sys.path.insert(0, PYCAFFE_DIR)
    import caffe
    return caffe.Net(specfile, modelfile, caffe.TEST) 
Example #18
Source File: FLOPs_and_size.py    From ThiNet_Code with MIT License 5 votes vote down vote up
def get_complexity(netspec=None, prototxt_file=None, mode=None):
    # One of netspec, or prototxt_path params should not be None
    assert (netspec is not None) or (prototxt_file is not None)

    if netspec is not None:
        prototxt_file = _create_file_from_netspec(netspec)

    net = caffe.Net(prototxt_file, caffe.TEST)

    total_params = 0
    total_flops = 0

    net_params = caffe_pb2.NetParameter()
    text_format.Merge(open(prototxt_file).read(), net_params)
    print '\n ########### output ###########'
    for layer in net_params.layer:
        if layer.name in net.params:

            params = net.params[layer.name][0].data.size
            # If convolution layer, multiply flops with receptive field
            # i.e. #params * datawidth * dataheight
            if layer.type == 'Convolution':  # 'conv' in layer:
                data_width = net.blobs[layer.name].data.shape[2]
                data_height = net.blobs[layer.name].data.shape[3]
                flops = net.params[layer.name][
                    0].data.size * data_width * data_height
                # print >> sys.stderr, layer.name, params, flops
            else:
                flops = net.params[layer.name][0].data.size
            flops *= 2 
            print('%s: #params: %s, #FLOPs: %s') % (
                layer.name,
                digit2string(params),
                digit2string(flops))
            total_params += params
            total_flops += flops

    if netspec is not None:
        os.remove(prototxt_file)

    return total_params, total_flops 
Example #19
Source File: caffe2pkl.py    From VNect with Apache License 2.0 5 votes vote down vote up
def load_net(proto_path, weights_path):
    """Load caffe model"""
    caffe.set_mode_cpu()
    return caffe.Net(proto_path, weights_path, caffe.TEST) 
Example #20
Source File: faceLocation.py    From FaceRecognition-RestApi with MIT License 5 votes vote down vote up
def initFaceDetector():
    minsize = 20
    caffe_model_path = "/home/duino/iactive/mtcnn/model"
    threshold = [0.6, 0.7, 0.7]
    factor = 0.709
    caffe.set_mode_cpu()
    PNet = caffe.Net(caffe_model_path + "/det1.prototxt", caffe_model_path + "/det1.caffemodel", caffe.TEST)
    RNet = caffe.Net(caffe_model_path + "/det2.prototxt", caffe_model_path + "/det2.caffemodel", caffe.TEST)
    ONet = caffe.Net(caffe_model_path + "/det3.prototxt", caffe_model_path + "/det3.caffemodel", caffe.TEST)
    return (minsize, PNet, RNet, ONet, threshold, factor) 
Example #21
Source File: model.py    From facade-segmentation with MIT License 5 votes vote down vote up
def net():
    """Delay loading the net until the last possible moment.

    Loading the net is SLOW and produces a ton of terminal garbage.
    Also we want to wait to load it until we have called some other
    caffe initializations code (caffe.set_mode_gpu(), caffe.set_device(0), etc)

    """
    global __net
    if __net is None:
        __net = caffe.Net(LAYERS, WEIGHTS, caffe.TEST)
    return __net 
Example #22
Source File: train_faster_rcnn_alt_opt.py    From face-py-faster-rcnn with MIT License 5 votes vote down vote up
def rpn_generate(queue=None, imdb_name=None, rpn_model_path=None, cfg=None,
                 rpn_test_prototxt=None):
    """Use a trained RPN to generate proposals.
    """

    cfg.TEST.RPN_PRE_NMS_TOP_N = -1     # no pre NMS filtering
    cfg.TEST.RPN_POST_NMS_TOP_N = 2000  # limit top boxes after NMS
    print 'RPN model: {}'.format(rpn_model_path)
    print('Using config:')
    pprint.pprint(cfg)

    import caffe
    _init_caffe(cfg)

    # NOTE: the matlab implementation computes proposals on flipped images, too.
    # We compute them on the image once and then flip the already computed
    # proposals. This might cause a minor loss in mAP (less proposal jittering).
    imdb = get_imdb(imdb_name)
    print 'Loaded dataset `{:s}` for proposal generation'.format(imdb.name)

    # Load RPN and configure output directory
    rpn_net = caffe.Net(rpn_test_prototxt, rpn_model_path, caffe.TEST)
    output_dir = get_output_dir(imdb)
    print 'Output will be saved to `{:s}`'.format(output_dir)
    # Generate proposals on the imdb
    rpn_proposals = imdb_proposals(rpn_net, imdb)
    # Write proposals to disk and send the proposal file path through the
    # multiprocessing queue
    rpn_net_name = os.path.splitext(os.path.basename(rpn_model_path))[0]
    rpn_proposals_path = os.path.join(
        output_dir, rpn_net_name + '_proposals.pkl')
    with open(rpn_proposals_path, 'wb') as f:
        cPickle.dump(rpn_proposals, f, cPickle.HIGHEST_PROTOCOL)
    print 'Wrote RPN proposals to {}'.format(rpn_proposals_path)
    queue.put({'proposal_path': rpn_proposals_path}) 
Example #23
Source File: surgery.py    From fcn with MIT License 5 votes vote down vote up
def transplant(new_net, net, suffix=''):
    """
    Transfer weights by copying matching parameters, coercing parameters of
    incompatible shape, and dropping unmatched parameters.

    The coercion is useful to convert fully connected layers to their
    equivalent convolutional layers, since the weights are the same and only
    the shapes are different.  In particular, equivalent fully connected and
    convolution layers have shapes O x I and O x I x H x W respectively for O
    outputs channels, I input channels, H kernel height, and W kernel width.

    Both  `net` to `new_net` arguments must be instantiated `caffe.Net`s.
    """
    for p in net.params:
        p_new = p + suffix
        if p_new not in new_net.params:
            print 'dropping', p
            continue
        for i in range(len(net.params[p])):
            if i > (len(new_net.params[p_new]) - 1):
                print 'dropping', p, i
                break
            if net.params[p][i].data.shape != new_net.params[p_new][i].data.shape:
                print 'coercing', p, i, 'from', net.params[p][i].data.shape, 'to', new_net.params[p_new][i].data.shape
            else:
                print 'copying', p, ' -> ', p_new, i
            new_net.params[p_new][i].data.flat = net.params[p][i].data.flat 
Example #24
Source File: test_caffe.py    From rl-attack-detection with MIT License 5 votes vote down vote up
def __init__(self, mean, weight, K, num_act, num_step=1, data_path='test'):
        self.K = K
        self.num_act = num_act
        self.num_step = num_step

        caffe.set_mode_gpu()
        caffe.set_device(0)

        test_net_file, net_proto = N.create_netfile(1, data_path, mean, K, K,
            1, num_act, num_step=self.num_step, mode='test')

        self.test_net = caffe.Net(test_net_file, caffe.TEST)
        self.test_net.copy_from(weight) 
Example #25
Source File: caffe_parser.py    From training_results_v0.6 with Apache License 2.0 5 votes vote down vote up
def read_caffemodel(prototxt_fname, caffemodel_fname):
    """Return a caffe_pb2.NetParameter object that defined in a binary
    caffemodel file
    """
    if use_caffe:
        caffe.set_mode_cpu()
        net = caffe.Net(prototxt_fname, caffemodel_fname, caffe.TEST)
        layer_names = net._layer_names
        layers = net.layers
        return (layers, layer_names)
    else:
        proto = caffe_pb2.NetParameter()
        with open(caffemodel_fname, 'rb') as f:
            proto.ParseFromString(f.read())
        return (get_layers(proto), None) 
Example #26
Source File: makeNet.py    From calc with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def view_output_size():
	"""
	View the dimmension of the descriptor both flattened into a vector, and in tensor form
	"""
	net = caffe.Net('proto/train.prototxt', caffe.TEST)
	otpt_raw = net.blobs['conv3'].data
	otpt = otpt_raw.copy()
	print 'output matrix shape: ', otpt.shape
	otpt_raw = net.blobs['descriptor'].data
	otpt = otpt_raw.copy()
	print 'output vector shape: ', otpt.shape 
Example #27
Source File: testNet.py    From calc with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def view_forward_pass(im1_fl, im2_fl, net_def_path='proto/deploy.prototxt', net_model_path='model/Ours.caffemodel'):

	"""
	View the forward pass of an image through the deployed net. 
	"""
	from matplotlib import rcParams
	caffe.set_mode_cpu()
	net = caffe.Net(net_def_path,1,weights= net_model_path)
	im1 = cv2.resize(cv2.cvtColor(cv2.imread(im1_fl), cv2.COLOR_BGR2GRAY), (160,120))
	im2 = cv2.resize(cv2.cvtColor(cv2.imread(im2_fl), cv2.COLOR_BGR2GRAY), (160,120))
	# Use caffe's transformer
	transformer = caffe.io.Transformer({'X1':(1,1,120,160)})	
	transformer.set_raw_scale('X1',1./255)
	net.blobs['X1'].data[...] = transformer.preprocess('X1', im1)
	net.forward()
	relu13 = np.copy(net.blobs['relu3'].data[0,0,:,:])

	net.blobs['X1'].data[...] = transformer.preprocess('X1', im2)
	net.forward()
	relu23 = net.blobs['relu3'].data[0,0,:,:]
		
	plt.axis('off')
	plt.imshow(relu13)
	plt.show()	

	plt.axis('off')
	plt.imshow(relu23)
	plt.show() 
Example #28
Source File: slim.py    From EverybodyDanceNow_reproduce_pytorch with MIT License 5 votes vote down vote up
def convert_to_caffe(self, name):
            caffe_net = caffe.NetSpec()
            layer = L.Input(shape=dict(dim=[1, 3, 224, 224]))
            caffe_net.tops['data'] = layer
            generate_caffe_prototxt(self, caffe_net, layer)
            print(caffe_net.to_proto())
            with open(name + '.prototxt', 'wb') as f:
                f.write(str(caffe_net.to_proto()))
            caffe_net = caffe.Net(name + '.prototxt', caffe.TEST)
            convert_pytorch_to_caffe(self, caffe_net)
            caffe_net.save(name + '.caffemodel') 
Example #29
Source File: model_caffe.py    From intermediate-cnn-features with Apache License 2.0 5 votes vote down vote up
def __init__(self, name, prototxt, caffemodel):
        """
          Class initializer.

          Args:
            name: name of the CNN network
            prototxt: path to prototxt file of the pre-trained CNN model
            caffemodel: path to caffemodel file of the pre-trained CNN model

          Raise:
            ValueError: if provided network name is not provided
        """
        self.net_name = name

        # intermediate convolutional layers to extract features
        if name == 'googlenet':
            self.desired_size = 224
            self.layers = ['inception_3a/output', 'inception_3b/output',
                      'inception_4a/output', 'inception_4b/output',
                      'inception_4c/output', 'inception_4d/output',
                      'inception_4e/output', 'inception_5a/output',
                      'inception_5b/output']
        elif name == 'resnet':
            self.desired_size = 224
            self.layers = ['res2c', 'res3b7', 'res4b35', 'res5c']
        elif name == 'vgg':
            self.desired_size = 224
            self.layers = ['conv2_1', 'conv2_2', 'conv3_1',
                           'conv3_2', 'conv3_3', 'conv4_1',
                           'conv4_2', 'conv4_3', 'conv5_1',
                           'conv5_2', 'conv5_3']
        else:
            raise ValueError('Network not found. Supported networks for Caffe framework: googlenet, vgg, resnet')

        # load network
        self.net = caffe.Net(prototxt, caffemodel, caffe.TEST)
        self.final_sz = np.sum(
            [self.net.blobs[layer].data.shape[1] for layer in self.layers]) 
Example #30
Source File: VNet.py    From VNet with GNU General Public License v3.0 5 votes vote down vote up
def test(self):
        self.dataManagerTest = DM.DataManager(self.params['ModelParams']['dirTest'], self.params['ModelParams']['dirResult'], self.params['DataManagerParams'])
        self.dataManagerTest.loadTestData()

        net = caffe.Net(self.params['ModelParams']['prototxtTest'],
                        os.path.join(self.params['ModelParams']['dirSnapshots'],"_iter_" + str(self.params['ModelParams']['snapshot']) + ".caffemodel"),
                        caffe.TEST)

        numpyImages = self.dataManagerTest.getNumpyImages()

        for key in numpyImages:
            mean = np.mean(numpyImages[key][numpyImages[key]>0])
            std = np.std(numpyImages[key][numpyImages[key]>0])

            numpyImages[key] -= mean
            numpyImages[key] /= std

        results = dict()

        for key in numpyImages:

            btch = np.reshape(numpyImages[key],[1,1,numpyImages[key].shape[0],numpyImages[key].shape[1],numpyImages[key].shape[2]])

            net.blobs['data'].data[...] = btch

            out = net.forward()
            l = out["labelmap"]
            labelmap = np.squeeze(l[0,1,:,:,:])

            results[key] = np.squeeze(labelmap)

            self.dataManagerTest.writeResultsFromNumpyLabel(np.squeeze(labelmap),key)