Python caffe2.python.workspace.RunNet() Examples
The following are 30
code examples of caffe2.python.workspace.RunNet().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
caffe2.python.workspace
, or try the search function
.
Example #1
Source File: test_batch_permutation_op.py From Detectron-Cascade-RCNN with Apache License 2.0 | 6 votes |
def _run_speed_test(self, iters=5, N=1024): """This function provides an example of how to benchmark custom operators using the Caffe2 'prof_dag' network execution type. Please note that for 'prof_dag' to work, Caffe2 must be compiled with profiling support using the `-DUSE_PROF=ON` option passed to `cmake` when building Caffe2. """ net = core.Net('test') net.Proto().type = 'prof_dag' net.Proto().num_workers = 2 Y = net.BatchPermutation(['X', 'I'], 'Y') Y_flat = net.FlattenToVec([Y], 'Y_flat') loss = net.AveragedLoss([Y_flat], 'loss') net.AddGradientOperators([loss]) workspace.CreateNet(net) X = np.random.randn(N, 256, 14, 14) for _i in range(iters): I = np.random.permutation(N) workspace.FeedBlob('X', X.astype(np.float32)) workspace.FeedBlob('I', I.astype(np.int32)) workspace.RunNet(net.Proto().name) np.testing.assert_allclose( workspace.FetchBlob('Y'), X[I], rtol=1e-5, atol=1e-08 )
Example #2
Source File: test_batch_permutation_op.py From seg_every_thing with Apache License 2.0 | 6 votes |
def _run_speed_test(self, iters=5, N=1024): """This function provides an example of how to benchmark custom operators using the Caffe2 'prof_dag' network execution type. Please note that for 'prof_dag' to work, Caffe2 must be compiled with profiling support using the `-DUSE_PROF=ON` option passed to `cmake` when building Caffe2. """ net = core.Net('test') net.Proto().type = 'prof_dag' net.Proto().num_workers = 2 Y = net.BatchPermutation(['X', 'I'], 'Y') Y_flat = net.FlattenToVec([Y], 'Y_flat') loss = net.AveragedLoss([Y_flat], 'loss') net.AddGradientOperators([loss]) workspace.CreateNet(net) X = np.random.randn(N, 256, 14, 14) for _i in range(iters): I = np.random.permutation(N) workspace.FeedBlob('X', X.astype(np.float32)) workspace.FeedBlob('I', I.astype(np.int32)) workspace.RunNet(net.Proto().name) np.testing.assert_allclose( workspace.FetchBlob('Y'), X[I], rtol=1e-5, atol=1e-08 )
Example #3
Source File: test_batch_permutation_op.py From Detectron-DA-Faster-RCNN with Apache License 2.0 | 6 votes |
def _run_speed_test(self, iters=5, N=1024): """This function provides an example of how to benchmark custom operators using the Caffe2 'prof_dag' network execution type. Please note that for 'prof_dag' to work, Caffe2 must be compiled with profiling support using the `-DUSE_PROF=ON` option passed to `cmake` when building Caffe2. """ net = core.Net('test') net.Proto().type = 'prof_dag' net.Proto().num_workers = 2 Y = net.BatchPermutation(['X', 'I'], 'Y') Y_flat = net.FlattenToVec([Y], 'Y_flat') loss = net.AveragedLoss([Y_flat], 'loss') net.AddGradientOperators([loss]) workspace.CreateNet(net) X = np.random.randn(N, 256, 14, 14) for _i in range(iters): I = np.random.permutation(N) workspace.FeedBlob('X', X.astype(np.float32)) workspace.FeedBlob('I', I.astype(np.int32)) workspace.RunNet(net.Proto().name) np.testing.assert_allclose( workspace.FetchBlob('Y'), X[I], rtol=1e-5, atol=1e-08 )
Example #4
Source File: test_batch_permutation_op.py From Clustered-Object-Detection-in-Aerial-Image with Apache License 2.0 | 6 votes |
def _run_speed_test(self, iters=5, N=1024): """This function provides an example of how to benchmark custom operators using the Caffe2 'prof_dag' network execution type. Please note that for 'prof_dag' to work, Caffe2 must be compiled with profiling support using the `-DUSE_PROF=ON` option passed to `cmake` when building Caffe2. """ net = core.Net('test') net.Proto().type = 'prof_dag' net.Proto().num_workers = 2 Y = net.BatchPermutation(['X', 'I'], 'Y') Y_flat = net.FlattenToVec([Y], 'Y_flat') loss = net.AveragedLoss([Y_flat], 'loss') net.AddGradientOperators([loss]) workspace.CreateNet(net) X = np.random.randn(N, 256, 14, 14) for _i in range(iters): I = np.random.permutation(N) workspace.FeedBlob('X', X.astype(np.float32)) workspace.FeedBlob('I', I.astype(np.int32)) workspace.RunNet(net.Proto().name) np.testing.assert_allclose( workspace.FetchBlob('Y'), X[I], rtol=1e-5, atol=1e-08 )
Example #5
Source File: test_batch_permutation_op.py From Detectron with Apache License 2.0 | 6 votes |
def _run_speed_test(self, iters=5, N=1024): """This function provides an example of how to benchmark custom operators using the Caffe2 'prof_dag' network execution type. Please note that for 'prof_dag' to work, Caffe2 must be compiled with profiling support using the `-DUSE_PROF=ON` option passed to `cmake` when building Caffe2. """ net = core.Net('test') net.Proto().type = 'prof_dag' net.Proto().num_workers = 2 Y = net.BatchPermutation(['X', 'I'], 'Y') Y_flat = net.FlattenToVec([Y], 'Y_flat') loss = net.AveragedLoss([Y_flat], 'loss') net.AddGradientOperators([loss]) workspace.CreateNet(net) X = np.random.randn(N, 256, 14, 14) for _i in range(iters): I = np.random.permutation(N) workspace.FeedBlob('X', X.astype(np.float32)) workspace.FeedBlob('I', I.astype(np.int32)) workspace.RunNet(net.Proto().name) np.testing.assert_allclose( workspace.FetchBlob('Y'), X[I], rtol=1e-5, atol=1e-08 )
Example #6
Source File: test_batch_permutation_op.py From CBNet with Apache License 2.0 | 6 votes |
def _run_speed_test(self, iters=5, N=1024): """This function provides an example of how to benchmark custom operators using the Caffe2 'prof_dag' network execution type. Please note that for 'prof_dag' to work, Caffe2 must be compiled with profiling support using the `-DUSE_PROF=ON` option passed to `cmake` when building Caffe2. """ net = core.Net('test') net.Proto().type = 'prof_dag' net.Proto().num_workers = 2 Y = net.BatchPermutation(['X', 'I'], 'Y') Y_flat = net.FlattenToVec([Y], 'Y_flat') loss = net.AveragedLoss([Y_flat], 'loss') net.AddGradientOperators([loss]) workspace.CreateNet(net) X = np.random.randn(N, 256, 14, 14) for _i in range(iters): I = np.random.permutation(N) workspace.FeedBlob('X', X.astype(np.float32)) workspace.FeedBlob('I', I.astype(np.int32)) workspace.RunNet(net.Proto().name) np.testing.assert_allclose( workspace.FetchBlob('Y'), X[I], rtol=1e-5, atol=1e-08 )
Example #7
Source File: test_batch_permutation_op.py From masktextspotter.caffe2 with Apache License 2.0 | 6 votes |
def _run_speed_test(self, iters=5, N=1024): """This function provides an example of how to benchmark custom operators using the Caffe2 'prof_dag' network execution type. Please note that for 'prof_dag' to work, Caffe2 must be compiled with profiling support using the `-DUSE_PROF=ON` option passed to `cmake` when building Caffe2. """ net = core.Net('test') net.Proto().type = 'prof_dag' net.Proto().num_workers = 2 Y = net.BatchPermutation(['X', 'I'], 'Y') Y_flat = net.FlattenToVec([Y], 'Y_flat') loss = net.AveragedLoss([Y_flat], 'loss') net.AddGradientOperators([loss]) workspace.CreateNet(net) X = np.random.randn(N, 256, 14, 14) for _i in range(iters): I = np.random.permutation(N) workspace.FeedBlob('X', X.astype(np.float32)) workspace.FeedBlob('I', I.astype(np.int32)) workspace.RunNet(net.Proto().name) np.testing.assert_allclose( workspace.FetchBlob('Y'), X[I], rtol=1e-5, atol=1e-08 )
Example #8
Source File: test_batch_permutation_op.py From NucleiDetectron with Apache License 2.0 | 6 votes |
def _run_speed_test(self, iters=5, N=1024): """This function provides an example of how to benchmark custom operators using the Caffe2 'prof_dag' network execution type. Please note that for 'prof_dag' to work, Caffe2 must be compiled with profiling support using the `-DUSE_PROF=ON` option passed to `cmake` when building Caffe2. """ net = core.Net('test') net.Proto().type = 'prof_dag' net.Proto().num_workers = 2 Y = net.BatchPermutation(['X', 'I'], 'Y') Y_flat = net.FlattenToVec([Y], 'Y_flat') loss = net.AveragedLoss([Y_flat], 'loss') net.AddGradientOperators([loss]) workspace.CreateNet(net) X = np.random.randn(N, 256, 14, 14) for _i in range(iters): I = np.random.permutation(N) workspace.FeedBlob('X', X.astype(np.float32)) workspace.FeedBlob('I', I.astype(np.int32)) workspace.RunNet(net.Proto().name) np.testing.assert_allclose( workspace.FetchBlob('Y'), X[I], rtol=1e-5, atol=1e-08 )
Example #9
Source File: test_batch_permutation_op.py From DetectAndTrack with Apache License 2.0 | 6 votes |
def _run_speed_test(self, iters=5, N=1024): """This function provides an example of how to benchmark custom operators using the Caffe2 'prof_dag' network execution type. Please note that for 'prof_dag' to work, Caffe2 must be compiled with profiling support using the `-DUSE_PROF=ON` option passed to `cmake` when building Caffe2. """ net = core.Net('test') net.Proto().type = 'prof_dag' net.Proto().num_workers = 2 Y = net.BatchPermutation(['X', 'I'], 'Y') Y_flat = net.FlattenToVec([Y], 'Y_flat') loss = net.AveragedLoss([Y_flat], 'loss') net.AddGradientOperators([loss]) workspace.CreateNet(net) X = np.random.randn(N, 256, 14, 14) for _i in range(iters): I = np.random.permutation(N) workspace.FeedBlob('X', X.astype(np.float32)) workspace.FeedBlob('I', I.astype(np.int32)) workspace.RunNet(net.Proto().name) np.testing.assert_allclose( workspace.FetchBlob('Y'), X[I], rtol=1e-5, atol=1e-08 )
Example #10
Source File: test_batch_permutation_op.py From KL-Loss with Apache License 2.0 | 6 votes |
def _run_speed_test(self, iters=5, N=1024): """This function provides an example of how to benchmark custom operators using the Caffe2 'prof_dag' network execution type. Please note that for 'prof_dag' to work, Caffe2 must be compiled with profiling support using the `-DUSE_PROF=ON` option passed to `cmake` when building Caffe2. """ net = core.Net('test') net.Proto().type = 'prof_dag' net.Proto().num_workers = 2 Y = net.BatchPermutation(['X', 'I'], 'Y') Y_flat = net.FlattenToVec([Y], 'Y_flat') loss = net.AveragedLoss([Y_flat], 'loss') net.AddGradientOperators([loss]) workspace.CreateNet(net) X = np.random.randn(N, 256, 14, 14) for _i in range(iters): I = np.random.permutation(N) workspace.FeedBlob('X', X.astype(np.float32)) workspace.FeedBlob('I', I.astype(np.int32)) workspace.RunNet(net.Proto().name) np.testing.assert_allclose( workspace.FetchBlob('Y'), X[I], rtol=1e-5, atol=1e-08 )
Example #11
Source File: test.py From seg_every_thing with Apache License 2.0 | 5 votes |
def im_conv_body_only(model, im, target_scale, target_max_size): """Runs `model.conv_body_net` on the given image `im`.""" im_blob, im_scale, _im_info = blob_utils.get_image_blob( im, target_scale, target_max_size ) workspace.FeedBlob(core.ScopedName('data'), im_blob) workspace.RunNet(model.conv_body_net.Proto().name) return im_scale
Example #12
Source File: test.py From Detectron-DA-Faster-RCNN with Apache License 2.0 | 5 votes |
def im_conv_body_only(model, im, target_scale, target_max_size): """Runs `model.conv_body_net` on the given image `im`.""" im_blob, im_scale, _im_info = blob_utils.get_image_blob( im, target_scale, target_max_size ) workspace.FeedBlob(core.ScopedName('data'), im_blob) workspace.RunNet(model.conv_body_net.Proto().name) return im_scale
Example #13
Source File: test.py From Detectron-Cascade-RCNN with Apache License 2.0 | 5 votes |
def im_conv_body_only(model, im, target_scale, target_max_size): """Runs `model.conv_body_net` on the given image `im`.""" im_blob, im_scale, _im_info = blob_utils.get_image_blob( im, target_scale, target_max_size ) workspace.FeedBlob(core.ScopedName('data'), im_blob) workspace.RunNet(model.conv_body_net.Proto().name) return im_scale
Example #14
Source File: test.py From Clustered-Object-Detection-in-Aerial-Image with Apache License 2.0 | 5 votes |
def im_conv_body_only(model, im, target_scale, target_max_size): """Runs `model.conv_body_net` on the given image `im`.""" im_blob, im_scale, _im_info = blob_utils.get_image_blob( im, target_scale, target_max_size ) workspace.FeedBlob(core.ScopedName('data'), im_blob) workspace.RunNet(model.conv_body_net.Proto().name) return im_scale
Example #15
Source File: test.py From Detectron-Cascade-RCNN with Apache License 2.0 | 5 votes |
def im_detect_keypoints(model, im_scale, boxes): """Infer instance keypoint poses. This function must be called after im_detect_bbox as it assumes that the Caffe2 workspace is already populated with the necessary blobs. Arguments: model (DetectionModelHelper): the detection model to use im_scales (list): image blob scales as returned by im_detect_bbox boxes (ndarray): R x 4 array of bounding box detections (e.g., as returned by im_detect_bbox) Returns: pred_heatmaps (ndarray): R x J x M x M array of keypoint location logits (softmax inputs) for each of the J keypoint types output by the network (must be processed by keypoint_results to convert into point predictions in the original image coordinate space) """ M = cfg.KRCNN.HEATMAP_SIZE if boxes.shape[0] == 0: pred_heatmaps = np.zeros((0, cfg.KRCNN.NUM_KEYPOINTS, M, M), np.float32) return pred_heatmaps inputs = {'keypoint_rois': _get_rois_blob(boxes, im_scale)} # Add multi-level rois for FPN if cfg.FPN.MULTILEVEL_ROIS: _add_multilevel_rois_for_test(inputs, 'keypoint_rois') for k, v in inputs.items(): workspace.FeedBlob(core.ScopedName(k), v) workspace.RunNet(model.keypoint_net.Proto().name) pred_heatmaps = workspace.FetchBlob(core.ScopedName('kps_score')).squeeze() # In case of 1 if pred_heatmaps.ndim == 3: pred_heatmaps = np.expand_dims(pred_heatmaps, axis=0) return pred_heatmaps
Example #16
Source File: test.py From KL-Loss with Apache License 2.0 | 5 votes |
def im_detect_keypoints(model, im_scale, boxes): """Infer instance keypoint poses. This function must be called after im_detect_bbox as it assumes that the Caffe2 workspace is already populated with the necessary blobs. Arguments: model (DetectionModelHelper): the detection model to use im_scales (list): image blob scales as returned by im_detect_bbox boxes (ndarray): R x 4 array of bounding box detections (e.g., as returned by im_detect_bbox) Returns: pred_heatmaps (ndarray): R x J x M x M array of keypoint location logits (softmax inputs) for each of the J keypoint types output by the network (must be processed by keypoint_results to convert into point predictions in the original image coordinate space) """ M = cfg.KRCNN.HEATMAP_SIZE if boxes.shape[0] == 0: pred_heatmaps = np.zeros((0, cfg.KRCNN.NUM_KEYPOINTS, M, M), np.float32) return pred_heatmaps inputs = {'keypoint_rois': _get_rois_blob(boxes, im_scale)} # Add multi-level rois for FPN if cfg.FPN.MULTILEVEL_ROIS: _add_multilevel_rois_for_test(inputs, 'keypoint_rois') for k, v in inputs.items(): workspace.FeedBlob(core.ScopedName(k), v) workspace.RunNet(model.keypoint_net.Proto().name) pred_heatmaps = workspace.FetchBlob(core.ScopedName('kps_score')).squeeze() # In case of 1 if pred_heatmaps.ndim == 3: pred_heatmaps = np.expand_dims(pred_heatmaps, axis=0) return pred_heatmaps
Example #17
Source File: test.py From Detectron with Apache License 2.0 | 5 votes |
def im_conv_body_only(model, im, target_scale, target_max_size): """Runs `model.conv_body_net` on the given image `im`.""" im_blob, im_scale, _im_info = blob_utils.get_image_blob( im, target_scale, target_max_size ) workspace.FeedBlob(core.ScopedName('data'), im_blob) workspace.RunNet(model.conv_body_net.Proto().name) return im_scale
Example #18
Source File: test.py From masktextspotter.caffe2 with Apache License 2.0 | 5 votes |
def im_conv_body_only(model, im): """Runs `model.conv_body_net` on the given image `im`.""" im_blob, im_scale_factors = _get_image_blob(im) workspace.FeedBlob(core.ScopedName('data'), im_blob) workspace.RunNet(model.conv_body_net.Proto().name) return im_scale_factors
Example #19
Source File: test.py From Detectron with Apache License 2.0 | 5 votes |
def im_detect_mask(model, im_scale, boxes): """Infer instance segmentation masks. This function must be called after im_detect_bbox as it assumes that the Caffe2 workspace is already populated with the necessary blobs. Arguments: model (DetectionModelHelper): the detection model to use im_scales (list): image blob scales as returned by im_detect_bbox boxes (ndarray): R x 4 array of bounding box detections (e.g., as returned by im_detect_bbox) Returns: pred_masks (ndarray): R x K x M x M array of class specific soft masks output by the network (must be processed by segm_results to convert into hard masks in the original image coordinate space) """ M = cfg.MRCNN.RESOLUTION if boxes.shape[0] == 0: pred_masks = np.zeros((0, M, M), np.float32) return pred_masks inputs = {'mask_rois': _get_rois_blob(boxes, im_scale)} # Add multi-level rois for FPN if cfg.FPN.MULTILEVEL_ROIS: _add_multilevel_rois_for_test(inputs, 'mask_rois') for k, v in inputs.items(): workspace.FeedBlob(core.ScopedName(k), v) workspace.RunNet(model.mask_net.Proto().name) # Fetch masks pred_masks = workspace.FetchBlob( core.ScopedName('mask_fcn_probs') ).squeeze() if cfg.MRCNN.CLS_SPECIFIC_MASK: pred_masks = pred_masks.reshape([-1, cfg.MODEL.NUM_CLASSES, M, M]) else: pred_masks = pred_masks.reshape([-1, 1, M, M]) return pred_masks
Example #20
Source File: test.py From Detectron with Apache License 2.0 | 5 votes |
def im_detect_keypoints(model, im_scale, boxes): """Infer instance keypoint poses. This function must be called after im_detect_bbox as it assumes that the Caffe2 workspace is already populated with the necessary blobs. Arguments: model (DetectionModelHelper): the detection model to use im_scales (list): image blob scales as returned by im_detect_bbox boxes (ndarray): R x 4 array of bounding box detections (e.g., as returned by im_detect_bbox) Returns: pred_heatmaps (ndarray): R x J x M x M array of keypoint location logits (softmax inputs) for each of the J keypoint types output by the network (must be processed by keypoint_results to convert into point predictions in the original image coordinate space) """ M = cfg.KRCNN.HEATMAP_SIZE if boxes.shape[0] == 0: pred_heatmaps = np.zeros((0, cfg.KRCNN.NUM_KEYPOINTS, M, M), np.float32) return pred_heatmaps inputs = {'keypoint_rois': _get_rois_blob(boxes, im_scale)} # Add multi-level rois for FPN if cfg.FPN.MULTILEVEL_ROIS: _add_multilevel_rois_for_test(inputs, 'keypoint_rois') for k, v in inputs.items(): workspace.FeedBlob(core.ScopedName(k), v) workspace.RunNet(model.keypoint_net.Proto().name) pred_heatmaps = workspace.FetchBlob(core.ScopedName('kps_score')).squeeze() # In case of 1 if pred_heatmaps.ndim == 3: pred_heatmaps = np.expand_dims(pred_heatmaps, axis=0) return pred_heatmaps
Example #21
Source File: test.py From Detectron-Cascade-RCNN with Apache License 2.0 | 5 votes |
def im_detect_mask(model, im_scale, boxes): """Infer instance segmentation masks. This function must be called after im_detect_bbox as it assumes that the Caffe2 workspace is already populated with the necessary blobs. Arguments: model (DetectionModelHelper): the detection model to use im_scales (list): image blob scales as returned by im_detect_bbox boxes (ndarray): R x 4 array of bounding box detections (e.g., as returned by im_detect_bbox) Returns: pred_masks (ndarray): R x K x M x M array of class specific soft masks output by the network (must be processed by segm_results to convert into hard masks in the original image coordinate space) """ M = cfg.MRCNN.RESOLUTION if boxes.shape[0] == 0: pred_masks = np.zeros((0, M, M), np.float32) return pred_masks inputs = {'mask_rois': _get_rois_blob(boxes, im_scale)} # Add multi-level rois for FPN if cfg.FPN.MULTILEVEL_ROIS: _add_multilevel_rois_for_test(inputs, 'mask_rois') for k, v in inputs.items(): workspace.FeedBlob(core.ScopedName(k), v) workspace.RunNet(model.mask_net.Proto().name) # Fetch masks pred_masks = workspace.FetchBlob( core.ScopedName('mask_fcn_probs') ).squeeze() if cfg.MRCNN.CLS_SPECIFIC_MASK: pred_masks = pred_masks.reshape([-1, cfg.MODEL.NUM_CLASSES, M, M]) else: pred_masks = pred_masks.reshape([-1, 1, M, M]) return pred_masks
Example #22
Source File: test.py From Detectron-DA-Faster-RCNN with Apache License 2.0 | 5 votes |
def im_detect_mask(model, im_scale, boxes): """Infer instance segmentation masks. This function must be called after im_detect_bbox as it assumes that the Caffe2 workspace is already populated with the necessary blobs. Arguments: model (DetectionModelHelper): the detection model to use im_scales (list): image blob scales as returned by im_detect_bbox boxes (ndarray): R x 4 array of bounding box detections (e.g., as returned by im_detect_bbox) Returns: pred_masks (ndarray): R x K x M x M array of class specific soft masks output by the network (must be processed by segm_results to convert into hard masks in the original image coordinate space) """ M = cfg.MRCNN.RESOLUTION if boxes.shape[0] == 0: pred_masks = np.zeros((0, M, M), np.float32) return pred_masks inputs = {'mask_rois': _get_rois_blob(boxes, im_scale)} # Add multi-level rois for FPN if cfg.FPN.MULTILEVEL_ROIS: _add_multilevel_rois_for_test(inputs, 'mask_rois') for k, v in inputs.items(): workspace.FeedBlob(core.ScopedName(k), v) workspace.RunNet(model.mask_net.Proto().name) # Fetch masks pred_masks = workspace.FetchBlob( core.ScopedName('mask_fcn_probs') ).squeeze() if cfg.MRCNN.CLS_SPECIFIC_MASK: pred_masks = pred_masks.reshape([-1, cfg.MODEL.NUM_CLASSES, M, M]) else: pred_masks = pred_masks.reshape([-1, 1, M, M]) return pred_masks
Example #23
Source File: test.py From Detectron-DA-Faster-RCNN with Apache License 2.0 | 5 votes |
def im_detect_keypoints(model, im_scale, boxes): """Infer instance keypoint poses. This function must be called after im_detect_bbox as it assumes that the Caffe2 workspace is already populated with the necessary blobs. Arguments: model (DetectionModelHelper): the detection model to use im_scales (list): image blob scales as returned by im_detect_bbox boxes (ndarray): R x 4 array of bounding box detections (e.g., as returned by im_detect_bbox) Returns: pred_heatmaps (ndarray): R x J x M x M array of keypoint location logits (softmax inputs) for each of the J keypoint types output by the network (must be processed by keypoint_results to convert into point predictions in the original image coordinate space) """ M = cfg.KRCNN.HEATMAP_SIZE if boxes.shape[0] == 0: pred_heatmaps = np.zeros((0, cfg.KRCNN.NUM_KEYPOINTS, M, M), np.float32) return pred_heatmaps inputs = {'keypoint_rois': _get_rois_blob(boxes, im_scale)} # Add multi-level rois for FPN if cfg.FPN.MULTILEVEL_ROIS: _add_multilevel_rois_for_test(inputs, 'keypoint_rois') for k, v in inputs.items(): workspace.FeedBlob(core.ScopedName(k), v) workspace.RunNet(model.keypoint_net.Proto().name) pred_heatmaps = workspace.FetchBlob(core.ScopedName('kps_score')).squeeze() # In case of 1 if pred_heatmaps.ndim == 3: pred_heatmaps = np.expand_dims(pred_heatmaps, axis=0) return pred_heatmaps
Example #24
Source File: test.py From CBNet with Apache License 2.0 | 5 votes |
def im_conv_body_only(model, im, target_scale, target_max_size): """Runs `model.conv_body_net` on the given image `im`.""" im_blob, im_scale, _im_info = blob_utils.get_image_blob( im, target_scale, target_max_size ) workspace.FeedBlob(core.ScopedName('data'), im_blob) workspace.RunNet(model.conv_body_net.Proto().name) return im_scale
Example #25
Source File: test.py From CBNet with Apache License 2.0 | 5 votes |
def im_detect_mask(model, im_scale, boxes): """Infer instance segmentation masks. This function must be called after im_detect_bbox as it assumes that the Caffe2 workspace is already populated with the necessary blobs. Arguments: model (DetectionModelHelper): the detection model to use im_scales (list): image blob scales as returned by im_detect_bbox boxes (ndarray): R x 4 array of bounding box detections (e.g., as returned by im_detect_bbox) Returns: pred_masks (ndarray): R x K x M x M array of class specific soft masks output by the network (must be processed by segm_results to convert into hard masks in the original image coordinate space) """ M = cfg.MRCNN.RESOLUTION if boxes.shape[0] == 0: pred_masks = np.zeros((0, M, M), np.float32) return pred_masks inputs = {'mask_rois': _get_rois_blob(boxes, im_scale)} # Add multi-level rois for FPN if cfg.FPN.MULTILEVEL_ROIS: _add_multilevel_rois_for_test(inputs, 'mask_rois') for k, v in inputs.items(): workspace.FeedBlob(core.ScopedName(k), v) workspace.RunNet(model.mask_net.Proto().name) # Fetch masks pred_masks = workspace.FetchBlob( core.ScopedName('mask_fcn_probs') ).squeeze() if cfg.MRCNN.CLS_SPECIFIC_MASK: pred_masks = pred_masks.reshape([-1, cfg.MODEL.NUM_CLASSES, M, M]) else: pred_masks = pred_masks.reshape([-1, 1, M, M]) return pred_masks
Example #26
Source File: test.py From CBNet with Apache License 2.0 | 5 votes |
def im_detect_keypoints(model, im_scale, boxes): """Infer instance keypoint poses. This function must be called after im_detect_bbox as it assumes that the Caffe2 workspace is already populated with the necessary blobs. Arguments: model (DetectionModelHelper): the detection model to use im_scales (list): image blob scales as returned by im_detect_bbox boxes (ndarray): R x 4 array of bounding box detections (e.g., as returned by im_detect_bbox) Returns: pred_heatmaps (ndarray): R x J x M x M array of keypoint location logits (softmax inputs) for each of the J keypoint types output by the network (must be processed by keypoint_results to convert into point predictions in the original image coordinate space) """ M = cfg.KRCNN.HEATMAP_SIZE if boxes.shape[0] == 0: pred_heatmaps = np.zeros((0, cfg.KRCNN.NUM_KEYPOINTS, M, M), np.float32) return pred_heatmaps inputs = {'keypoint_rois': _get_rois_blob(boxes, im_scale)} # Add multi-level rois for FPN if cfg.FPN.MULTILEVEL_ROIS: _add_multilevel_rois_for_test(inputs, 'keypoint_rois') for k, v in inputs.items(): workspace.FeedBlob(core.ScopedName(k), v) workspace.RunNet(model.keypoint_net.Proto().name) pred_heatmaps = workspace.FetchBlob(core.ScopedName('kps_score')).squeeze() # In case of 1 if pred_heatmaps.ndim == 3: pred_heatmaps = np.expand_dims(pred_heatmaps, axis=0) return pred_heatmaps
Example #27
Source File: benchmarks.py From dlcookbook-dlbs with Apache License 2.0 | 5 votes |
def run_n_times(model, num_warmup_batches, num_batches): """ Runs **model** multiple times (**num_warmup_batches** + **num_batches**). :param model: Caffe2's model helper class instances. :type model: :py:class:`caffe2.python.model_helper.ModelHelper` :param int num_warmup_batches: Number of warmup batches to process (do not contribute to computing average batch time) :param int num_batches: Number of batches to process (contribute to computing average batch time) :return: Batch times (excluding warmup batches) in seconds. :rtype: Numpy array of length = **num_batches**. """ net_name = model.net.Proto().name start_time = timeit.default_timer() if num_warmup_batches > 0: workspace.RunNet(net_name, num_iter=num_warmup_batches) print("Average warmup batch time %f ms across %d batches" %\ (1000.0*(timeit.default_timer() - start_time)/num_warmup_batches,\ num_warmup_batches)) else: print("Warning - no warmup iterations has been performed.") batch_times = np.zeros(num_batches) for i in range(num_batches): start_time = timeit.default_timer() workspace.RunNet(net_name, 1) batch_times[i] = timeit.default_timer() - start_time return batch_times
Example #28
Source File: test.py From NucleiDetectron with Apache License 2.0 | 5 votes |
def im_conv_body_only(model, im): """Runs `model.conv_body_net` on the given image `im`.""" im_blob, im_scale_factors = _get_image_blob(im) workspace.FeedBlob(core.ScopedName('data'), im_blob) workspace.RunNet(model.conv_body_net.Proto().name) return im_scale_factors
Example #29
Source File: test.py From DetectAndTrack with Apache License 2.0 | 5 votes |
def im_conv_body_only(model, im): """Runs `model.conv_body_net` on the given image `im`.""" im_blob, im_scale_factors = _get_image_blob(im) workspace.FeedBlob(core.ScopedName('data'), im_blob) workspace.RunNet(model.conv_body_net.Proto().name) return im_scale_factors
Example #30
Source File: rpn_generator.py From DetectAndTrack with Apache License 2.0 | 5 votes |
def im_proposals(model, im): """Generate RPN proposals on a single image.""" inputs = {} inputs['data'], inputs['im_info'] = _get_image_blob(im) for k, v in inputs.items(): workspace.FeedBlob(core.ScopedName(k), v.astype(np.float32, copy=False)) workspace.RunNet(model.net.Proto().name) scale = inputs['im_info'][0, 2] if cfg.FPN.FPN_ON and cfg.FPN.MULTILEVEL_RPN: k_max = cfg.FPN.RPN_MAX_LEVEL k_min = cfg.FPN.RPN_MIN_LEVEL rois_names = [ core.ScopedName('rpn_rois_fpn' + str(l)) for l in range(k_min, k_max + 1)] score_names = [ core.ScopedName('rpn_roi_probs_fpn' + str(l)) for l in range(k_min, k_max + 1)] blobs = workspace.FetchBlobs(rois_names + score_names) # Combine predictions across all levels and retain the top scoring boxes = np.concatenate(blobs[:len(rois_names)]) scores = np.concatenate(blobs[len(rois_names):]).squeeze() # TODO(rbg): NMS again? inds = np.argsort(-scores)[:cfg.TEST.RPN_POST_NMS_TOP_N] scores = scores[inds] boxes = boxes[inds, :] else: boxes, scores = workspace.FetchBlobs( [core.ScopedName('rpn_rois'), core.ScopedName('rpn_roi_probs')]) scores = scores.squeeze() # Column 0 is the batch index in the (batch ind, x1, y1, x2, y2) encoding, # so we remove it since we just want to return boxes boxes = boxes[:, 1:] / scale return boxes, scores