Python caffe2.python.workspace.FetchBlob() Examples

The following are 30 code examples of caffe2.python.workspace.FetchBlob(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module caffe2.python.workspace , or try the search function .
Example #1
Source File: net.py    From KL-Loss with Apache License 2.0 6 votes vote down vote up
def broadcast_parameters(model):
    """Copy parameter blobs from GPU 0 over the corresponding parameter blobs
    on GPUs 1 through cfg.NUM_GPUS - 1.
    """
    if cfg.NUM_GPUS == 1:
        # no-op if only running on a single GPU
        return

    def _do_broadcast(all_blobs):
        assert len(all_blobs) % cfg.NUM_GPUS == 0, \
            ('Unexpected value for NUM_GPUS. Make sure you are not '
             'running single-GPU inference with NUM_GPUS > 1.')
        blobs_per_gpu = int(len(all_blobs) / cfg.NUM_GPUS)
        for i in range(blobs_per_gpu):
            blobs = [p for p in all_blobs[i::blobs_per_gpu]]
            data = workspace.FetchBlob(blobs[0])
            logger.debug('Broadcasting {} to'.format(str(blobs[0])))
            for i, p in enumerate(blobs[1:]):
                logger.debug(' |-> {}'.format(str(p)))
                with c2_utils.CudaScope(i + 1):
                    workspace.FeedBlob(p, data)

    _do_broadcast(model.params)
    _do_broadcast([b + '_momentum' for b in model.TrainableParams()]) 
Example #2
Source File: test_ops_constant.py    From ngraph-python with Apache License 2.0 6 votes vote down vote up
def test_constant():
    workspace.ResetWorkspace()

    shape = [10, 10]
    val = random.random()
    net = core.Net("net")
    net.ConstantFill([], ["Y"], shape=shape, value=val, run_once=0, name="Y")

    # Execute via Caffe2
    workspace.RunNetOnce(net)

    # Import caffe2 network into ngraph
    importer = C2Importer()
    importer.parse_net_def(net.Proto(), verbose=False)

    # Get handle
    f_ng = importer.get_op_handle("Y")

    # Execute
    with ExecutorFactory() as ex:
        f_result = ex.executor(f_ng)()

        # compare Caffe2 and ngraph results
        assert(np.ma.allequal(f_result, workspace.FetchBlob("Y")))
        assert(np.isclose(f_result[0][0], val, atol=1e-6, rtol=0)) 
Example #3
Source File: dlrm_s_caffe2.py    From dlrm with MIT License 6 votes vote down vote up
def FetchBlobWrapper(self, tag, add_prefix=True, reduce_across=None, device_id=-1):
        if self.ndevices > 1 and add_prefix:
            # fetch from multiple devices
            vals = []
            for d in range(self.ndevices):
                if tag.__class__ == list:
                    tag_on_device = tag[d]
                else:
                    tag_on_device = "gpu_" + str(0) + "/" + tag
                val = workspace.FetchBlob(tag_on_device)
                vals.append(val)
            # reduce across devices
            if reduce_across == "add":
                return functools.reduce(operator.add, vals)
            elif reduce_across == "concat":
                return np.concatenate(vals)
            else:
                return vals
        else:
            # fetch from a single device (named or not)
            if device_id >= 0:
                tag_on_device = "gpu_" + str(device_id) + "/" + tag
                return workspace.FetchBlob(tag_on_device)
            else:
                return workspace.FetchBlob(tag) 
Example #4
Source File: test_restore_checkpoint.py    From KL-Loss with Apache License 2.0 6 votes vote down vote up
def get_params(model):
    blobs = {}  # gpu_0 blobs with unscoped_name as key
    all_blobs = {}  # all blobs with scoped name as key
    # Save all parameters
    for param in model.params:
        scoped_name = str(param)
        unscoped_name = c2_utils.UnscopeName(scoped_name)
        if 'gpu_0' in scoped_name:
            blobs[unscoped_name] = workspace.FetchBlob(scoped_name)
        all_blobs[scoped_name] = workspace.FetchBlob(scoped_name)
    for param in model.TrainableParams():
        scoped_name = str(param) + '_momentum'
        unscoped_name = c2_utils.UnscopeName(scoped_name)
        if 'gpu_0' in scoped_name:
            blobs[unscoped_name] = workspace.FetchBlob(scoped_name)
        all_blobs[scoped_name] = workspace.FetchBlob(scoped_name)
    return blobs, all_blobs 
Example #5
Source File: net.py    From Clustered-Object-Detection-in-Aerial-Image with Apache License 2.0 6 votes vote down vote up
def broadcast_parameters(model):
    """Copy parameter blobs from GPU 0 over the corresponding parameter blobs
    on GPUs 1 through cfg.NUM_GPUS - 1.
    """
    if cfg.NUM_GPUS == 1:
        # no-op if only running on a single GPU
        return

    def _do_broadcast(all_blobs):
        assert len(all_blobs) % cfg.NUM_GPUS == 0, \
            ('Unexpected value for NUM_GPUS. Make sure you are not '
             'running single-GPU inference with NUM_GPUS > 1.')
        blobs_per_gpu = int(len(all_blobs) / cfg.NUM_GPUS)
        for i in range(blobs_per_gpu):
            blobs = [p for p in all_blobs[i::blobs_per_gpu]]
            data = workspace.FetchBlob(blobs[0])
            logger.debug('Broadcasting {} to'.format(str(blobs[0])))
            for i, p in enumerate(blobs[1:]):
                logger.debug(' |-> {}'.format(str(p)))
                with c2_utils.CudaScope(i + 1):
                    workspace.FeedBlob(p, data)

    _do_broadcast(model.params)
    _do_broadcast([b + '_momentum' for b in model.TrainableParams()]) 
Example #6
Source File: dlrm_s_caffe2.py    From optimized-models with Apache License 2.0 6 votes vote down vote up
def FetchBlobWrapper(self, tag, add_prefix=True, reduce_across=None, device_id=-1):
        if self.ndevices > 1 and add_prefix:
            # fetch from multiple devices
            vals = []
            for d in range(self.ndevices):
                if tag.__class__ == list:
                    tag_on_device = tag[d]
                else:
                    tag_on_device = "gpu_" + str(0) + "/" + tag
                val = workspace.FetchBlob(tag_on_device)
                vals.append(val)
            # reduce across devices
            if reduce_across == "add":
                return functools.reduce(operator.add, vals)
            elif reduce_across == "concat":
                return np.concatenate(vals)
            else:
                return vals
        else:
            # fetch from a single device (named or not)
            if device_id >= 0:
                tag_on_device = "gpu_" + str(device_id) + "/" + tag
                return workspace.FetchBlob(tag_on_device)
            else:
                return workspace.FetchBlob(tag) 
Example #7
Source File: test_batch_permutation_op.py    From KL-Loss with Apache License 2.0 6 votes vote down vote up
def _run_speed_test(self, iters=5, N=1024):
        """This function provides an example of how to benchmark custom
        operators using the Caffe2 'prof_dag' network execution type. Please
        note that for 'prof_dag' to work, Caffe2 must be compiled with profiling
        support using the `-DUSE_PROF=ON` option passed to `cmake` when building
        Caffe2.
        """
        net = core.Net('test')
        net.Proto().type = 'prof_dag'
        net.Proto().num_workers = 2
        Y = net.BatchPermutation(['X', 'I'], 'Y')
        Y_flat = net.FlattenToVec([Y], 'Y_flat')
        loss = net.AveragedLoss([Y_flat], 'loss')
        net.AddGradientOperators([loss])
        workspace.CreateNet(net)

        X = np.random.randn(N, 256, 14, 14)
        for _i in range(iters):
            I = np.random.permutation(N)
            workspace.FeedBlob('X', X.astype(np.float32))
            workspace.FeedBlob('I', I.astype(np.int32))
            workspace.RunNet(net.Proto().name)
            np.testing.assert_allclose(
                workspace.FetchBlob('Y'), X[I], rtol=1e-5, atol=1e-08
            ) 
Example #8
Source File: test_batch_permutation_op.py    From KL-Loss with Apache License 2.0 6 votes vote down vote up
def _run_op_test(self, X, I, check_grad=False):
        with core.DeviceScope(core.DeviceOption(caffe2_pb2.CUDA, 0)):
            op = core.CreateOperator('BatchPermutation', ['X', 'I'], ['Y'])
            workspace.FeedBlob('X', X)
            workspace.FeedBlob('I', I)
        workspace.RunOperatorOnce(op)
        Y = workspace.FetchBlob('Y')

        if check_grad:
            gc = gradient_checker.GradientChecker(
                stepsize=0.1,
                threshold=0.001,
                device_option=core.DeviceOption(caffe2_pb2.CUDA, 0)
            )

            res, grad, grad_estimated = gc.CheckSimple(op, [X, I], 0, [0])
            self.assertTrue(res, 'Grad check failed')

        Y_ref = X[I]
        np.testing.assert_allclose(Y, Y_ref, rtol=1e-5, atol=1e-08) 
Example #9
Source File: test_spatial_narrow_as_op.py    From KL-Loss with Apache License 2.0 6 votes vote down vote up
def _run_test(self, A, B, check_grad=False):
        with core.DeviceScope(core.DeviceOption(caffe2_pb2.CUDA, 0)):
            op = core.CreateOperator('SpatialNarrowAs', ['A', 'B'], ['C'])
            workspace.FeedBlob('A', A)
            workspace.FeedBlob('B', B)
        workspace.RunOperatorOnce(op)
        C = workspace.FetchBlob('C')

        if check_grad:
            gc = gradient_checker.GradientChecker(
                stepsize=0.005,
                threshold=0.005,
                device_option=core.DeviceOption(caffe2_pb2.CUDA, 0)
            )

            res, grad, grad_estimated = gc.CheckSimple(op, [A, B], 0, [0])
            self.assertTrue(res, 'Grad check failed')

        dims = C.shape
        C_ref = A[:dims[0], :dims[1], :dims[2], :dims[3]]
        np.testing.assert_allclose(C, C_ref, rtol=1e-5, atol=1e-08) 
Example #10
Source File: test_spatial_narrow_as_op.py    From Clustered-Object-Detection-in-Aerial-Image with Apache License 2.0 6 votes vote down vote up
def _run_test(self, A, B, check_grad=False):
        with core.DeviceScope(core.DeviceOption(caffe2_pb2.CUDA, 0)):
            op = core.CreateOperator('SpatialNarrowAs', ['A', 'B'], ['C'])
            workspace.FeedBlob('A', A)
            workspace.FeedBlob('B', B)
        workspace.RunOperatorOnce(op)
        C = workspace.FetchBlob('C')

        if check_grad:
            gc = gradient_checker.GradientChecker(
                stepsize=0.005,
                threshold=0.005,
                device_option=core.DeviceOption(caffe2_pb2.CUDA, 0)
            )

            res, grad, grad_estimated = gc.CheckSimple(op, [A, B], 0, [0])
            self.assertTrue(res, 'Grad check failed')

        dims = C.shape
        C_ref = A[:dims[0], :dims[1], :dims[2], :dims[3]]
        np.testing.assert_allclose(C, C_ref, rtol=1e-5, atol=1e-08) 
Example #11
Source File: test_batch_permutation_op.py    From Clustered-Object-Detection-in-Aerial-Image with Apache License 2.0 6 votes vote down vote up
def _run_op_test(self, X, I, check_grad=False):
        with core.DeviceScope(core.DeviceOption(caffe2_pb2.CUDA, 0)):
            op = core.CreateOperator('BatchPermutation', ['X', 'I'], ['Y'])
            workspace.FeedBlob('X', X)
            workspace.FeedBlob('I', I)
        workspace.RunOperatorOnce(op)
        Y = workspace.FetchBlob('Y')

        if check_grad:
            gc = gradient_checker.GradientChecker(
                stepsize=0.1,
                threshold=0.001,
                device_option=core.DeviceOption(caffe2_pb2.CUDA, 0)
            )

            res, grad, grad_estimated = gc.CheckSimple(op, [X, I], 0, [0])
            self.assertTrue(res, 'Grad check failed')

        Y_ref = X[I]
        np.testing.assert_allclose(Y, Y_ref, rtol=1e-5, atol=1e-08) 
Example #12
Source File: test_batch_permutation_op.py    From Clustered-Object-Detection-in-Aerial-Image with Apache License 2.0 6 votes vote down vote up
def _run_speed_test(self, iters=5, N=1024):
        """This function provides an example of how to benchmark custom
        operators using the Caffe2 'prof_dag' network execution type. Please
        note that for 'prof_dag' to work, Caffe2 must be compiled with profiling
        support using the `-DUSE_PROF=ON` option passed to `cmake` when building
        Caffe2.
        """
        net = core.Net('test')
        net.Proto().type = 'prof_dag'
        net.Proto().num_workers = 2
        Y = net.BatchPermutation(['X', 'I'], 'Y')
        Y_flat = net.FlattenToVec([Y], 'Y_flat')
        loss = net.AveragedLoss([Y_flat], 'loss')
        net.AddGradientOperators([loss])
        workspace.CreateNet(net)

        X = np.random.randn(N, 256, 14, 14)
        for _i in range(iters):
            I = np.random.permutation(N)
            workspace.FeedBlob('X', X.astype(np.float32))
            workspace.FeedBlob('I', I.astype(np.int32))
            workspace.RunNet(net.Proto().name)
            np.testing.assert_allclose(
                workspace.FetchBlob('Y'), X[I], rtol=1e-5, atol=1e-08
            ) 
Example #13
Source File: run_add5_op.py    From tutorials with Apache License 2.0 6 votes vote down vote up
def run_add5_and_add5gradient_op(device):
    # clear the workspace before running the operator
    workspace.ResetWorkspace()
    add5 = core.CreateOperator("Add5",
                               ["X"],
                               ["Y"],
                               device_option=device)
    print("==> Running Add5 op:")
    workspace.FeedBlob("X", (np.random.rand(5, 5)), device_option=device)
    print("Input of Add5: ", workspace.FetchBlob("X"))
    workspace.RunOperatorOnce(add5)
    print("Output of Add5: ", workspace.FetchBlob("Y"))

    print("\n\n==> Running Add5Gradient op:")
    print("Input of Add5Gradient: ", workspace.FetchBlob("Y"))
    add5gradient = core.CreateOperator("Add5Gradient",
                                       ["Y"],
                                       ["Z"],
                                       device_option=device)
    workspace.RunOperatorOnce(add5gradient)
    print("Output of Add5Gradient: ", workspace.FetchBlob("Z")) 
Example #14
Source File: test_restore_checkpoint.py    From Clustered-Object-Detection-in-Aerial-Image with Apache License 2.0 6 votes vote down vote up
def get_params(model):
    blobs = {}  # gpu_0 blobs with unscoped_name as key
    all_blobs = {}  # all blobs with scoped name as key
    # Save all parameters
    for param in model.params:
        scoped_name = str(param)
        unscoped_name = c2_utils.UnscopeName(scoped_name)
        if 'gpu_0' in scoped_name:
            blobs[unscoped_name] = workspace.FetchBlob(scoped_name)
        all_blobs[scoped_name] = workspace.FetchBlob(scoped_name)
    for param in model.TrainableParams():
        scoped_name = str(param) + '_momentum'
        unscoped_name = c2_utils.UnscopeName(scoped_name)
        if 'gpu_0' in scoped_name:
            blobs[unscoped_name] = workspace.FetchBlob(scoped_name)
        all_blobs[scoped_name] = workspace.FetchBlob(scoped_name)
    return blobs, all_blobs 
Example #15
Source File: model_helper.py    From VMZ with Apache License 2.0 6 votes vote down vote up
def GetModelWeights(model, gpu_id=0):
    '''
    function that returns all the model weights in a dict
    '''
    model_ops = model.net.Proto().op
    master_gpu = 'gpu_{}'.format(gpu_id)
    param_ops = []
    for idx in range(len(model_ops)):
        op_type = model.net.Proto().op[idx].type
        op_input = model.net.Proto().op[idx].input[0]
        if op_type in ['Conv', 'FC'] and op_input.find(master_gpu) >= 0:
            param_ops.append(model.net.Proto().op[idx])

    weight_dict = {}
    for idx in range(len(param_ops)):
        # op_type = op.type
        op_inputs = param_ops[idx].input
        # op_output = param_ops[idx].output[0]
        for op_input in op_inputs:
            param_blob = op_input
            weights = np.array(workspace.FetchBlob(str(param_blob)))
            weight_dict[param_blob] = weights
    return weight_dict 
Example #16
Source File: model_loader.py    From VMZ with Apache License 2.0 6 votes vote down vote up
def BroacastParameters(model, src_gpu, gpus):

    log.info("Broadcasting parameters from gpu {} to gpu: {}".format(
        src_gpu, ','.join([str(g) for g in gpus]))
    )

    for param in model.params:
        if 'gpu_{}'.format(gpus[0]) in str(param):
            for i in gpus:
                blob = workspace.FetchBlob(str(param))
                target_blob_name = str(param).replace(
                    'gpu_{}'.format(src_gpu),
                    'gpu_{}'.format(i)
                )
                log.info('broadcast {} -> {}'.format(
                    str(param), target_blob_name)
                )
                workspace.FetchBlob(str(param))
                with core.DeviceScope(
                        core.DeviceOption(caffe2_pb2.CUDA, i)):
                    workspace.FeedBlob(target_blob_name, blob) 
Example #17
Source File: Multi-GPU_Training.py    From tutorials with Apache License 2.0 6 votes vote down vote up
def accuracy(model):
    accuracy = []
    prefix = model.net.Proto().name
    for device in model._devices:
        accuracy.append(
            np.asscalar(workspace.FetchBlob("gpu_{}/{}_accuracy".format(device, prefix))))
    return np.average(accuracy)


# In[ ]:


# SOLUTION for Part 11

# Start looping through epochs where we run the batches of images to cover the entire dataset
# Usually you would want to run a lot more epochs to increase your model's accuracy 
Example #18
Source File: test_ops_nn.py    From ngraph-python with Apache License 2.0 6 votes vote down vote up
def test_SquaredL2Distance():
    workspace.ResetWorkspace()
    shape = (10, 10)

    net = core.Net("net")
    Y = net.GivenTensorFill([], "Y", shape=shape, values=np.random.uniform(-1, 1, shape))
    T = net.GivenTensorFill([], "T", shape=shape, values=np.random.uniform(-1, 1, shape))
    net.SquaredL2Distance([Y, T], "dist")

    # Execute via Caffe2
    workspace.RunNetOnce(net)

    # Import caffe2 network into ngraph
    importer = C2Importer()
    importer.parse_net_def(net.Proto(), verbose=False)

    # Get handle
    f_ng = importer.get_op_handle("dist")

    # Execute
    with ExecutorFactory() as ex:
        f_result = ex.executor(f_ng)()

        assert(np.allclose(f_result, workspace.FetchBlob("dist"), equal_nan=False)) 
Example #19
Source File: Multi-GPU_Training.py    From tutorials with Apache License 2.0 6 votes vote down vote up
def accuracy(model):
    accuracy = []
    prefix = model.net.Proto().name
    for device in model._devices:
        accuracy.append(
            np.asscalar(workspace.FetchBlob("gpu_{}/{}_accuracy".format(device, prefix))))
    return np.average(accuracy)


# ## Part 11: Run Multi-GPU Training and Get Test Results
# You've come a long way. Now is the time to see it all pay off. Since you already ran ResNet once, you can glance at the code below and run it. The big difference this time is your model is parallelized! 
# 
# The additional components at the end deal with accuracy so you may want to dig into those specifics as a bonus task. You can try it again: just adjust the `num_epochs` value below, run the block, and see the results. You can also go back to Part 10 to reinitialize the model, and run this step again. (You may want to add `workspace.ResetWorkspace()` before you run the new models again.)
# 
# Go back and check the images/sec from when you ran single GPU. Note how you can scale up with a small amount of overhead. 
# 
# ### Task: How many GPUs would it take to train ImageNet in under a minute? 

# In[ ]:


# Start looping through epochs where we run the batches of images to cover the entire dataset
# Usually you would want to run a lot more epochs to increase your model's accuracy 
Example #20
Source File: test_ops_nn.py    From ngraph-python with Apache License 2.0 6 votes vote down vote up
def test_AveragedLoss():
    workspace.ResetWorkspace()
    shape = (32,)

    net = core.Net("net")
    X = net.GivenTensorFill([], "Y", shape=shape, values=np.random.uniform(-1, 1, shape))
    X.AveragedLoss([], ["loss"])

    # Execute via Caffe2
    workspace.RunNetOnce(net)

    # Import caffe2 network into ngraph
    importer = C2Importer()
    importer.parse_net_def(net.Proto(), verbose=False)

    # Get handle
    f_ng = importer.get_op_handle("loss")

    # Execute
    with ExecutorFactory() as ex:
        f_result = ex.executor(f_ng)()

        assert(np.allclose(f_result, workspace.FetchBlob("loss"), equal_nan=False)) 
Example #21
Source File: x2num.py    From tensorboardX with MIT License 5 votes vote down vote up
def prepare_caffe2(x):
    from caffe2.python import workspace
    x = workspace.FetchBlob(x)
    return x 
Example #22
Source File: x2num.py    From tensorboardX with MIT License 5 votes vote down vote up
def prepare_caffe2(x):
    from caffe2.python import workspace
    x = workspace.FetchBlob(x)
    return x 
Example #23
Source File: test_ops_nn.py    From ngraph-python with Apache License 2.0 5 votes vote down vote up
def test_convolution_nhwc_no_pad_no_bias():
    workspace.ResetWorkspace()

    # shape is in NCHW format
    # [batch, input_feature_map, spatial, output_feature_map, kernel, stride]
    n, ifm, spatial, ofm, kernel, stride = [2, 3, 8, 1, 2, 2]

    shape_x = (n, spatial, spatial, ifm)
    shape_w = (ofm, kernel, kernel, ifm)
    shape_b = (ofm, )

    data_x = [random.gauss(mu=0, sigma=10) for i in range(np.prod(shape_x))]
    data_w = [random.gauss(mu=0, sigma=10) for i in range(np.prod(shape_w))]
    data_b = [0. for i in range(np.prod(shape_b))]

    net = core.Net("net")
    X = net.GivenTensorFill([], ["X"], shape=shape_x, values=data_x, name="X")
    W = net.GivenTensorFill([], ["W"], shape=shape_w, values=data_w, name="W")
    B = net.GivenTensorFill([], ["B"], shape=shape_b, values=data_b, name="B")

    net.Conv([X, W, B], 'Y', kernel=kernel, stride=stride, order='NHWC')

    # Execute via Caffe2
    workspace.RunNetOnce(net)

    # Import caffe2 network into ngraph
    importer = C2Importer()
    importer.parse_net_def(net.Proto(), verbose=False)

    # Get handle
    f_ng = importer.get_op_handle("Y")

    # Execute
    with ExecutorFactory() as ex:
        f_result = ex.executor(f_ng)()

        # compare Caffe2 and ngraph results
        assert(np.allclose(f_result, workspace.FetchBlob("Y"), atol=1e-4, rtol=1e-3,
                           equal_nan=False)) 
Example #24
Source File: metrics.py    From video-long-term-feature-banks with Apache License 2.0 5 votes vote down vote up
def sum_multi_gpu_blob(blob_name):
    """Sum values of a blob from all GPUs."""
    value = 0
    num_gpus = cfg.NUM_GPUS
    root_gpu_id = cfg.ROOT_GPU_ID
    for idx in range(root_gpu_id, root_gpu_id + num_gpus):
        value += workspace.FetchBlob('gpu_{}/{}'.format(idx, blob_name))
    return value 
Example #25
Source File: test_ops_nn.py    From ngraph-python with Apache License 2.0 5 votes vote down vote up
def test_avgpool():
    workspace.ResetWorkspace()

    # shape is in NCHW format
    # [[shape], kernel, stride, caffe_padding_type]
    param_list = [[[1, 3, 10, 10], 2, 2, caffe2_legacy_pb2.NOTSET],
                  [[2, 3, 5, 5], 1, 1, caffe2_legacy_pb2.NOTSET],
                  [[2, 2, 7, 7], 3, 2, caffe2_legacy_pb2.NOTSET],
                  [[8, 5, 8, 8], 4, 4, caffe2_legacy_pb2.NOTSET],
                  [[8, 3, 4, 4], 3, 3, caffe2_legacy_pb2.VALID],
                  [[12, 6, 5, 5], 4, 3, caffe2_legacy_pb2.VALID],
                  [[8, 3, 4, 4], 3, 3, caffe2_legacy_pb2.SAME],
                  [[12, 6, 5, 5], 4, 3, caffe2_legacy_pb2.SAME]]

    for param_iter in param_list:
        shape, kernel, stride, pad_type = param_iter
        data1 = [random.gauss(mu=0, sigma=10) for i in range(np.prod(shape))]

        net = core.Net("net")
        net.GivenTensorFill([], ["X"], shape=shape, values=data1, name="X")
        net.AveragePool('X', 'Y', kernel=kernel, stride=stride, legacy_pad=pad_type)

        # Execute via Caffe2
        workspace.RunNetOnce(net)

        # Import caffe2 network into ngraph
        importer = C2Importer()
        importer.parse_net_def(net.Proto(), verbose=False)

        # Get handle
        f_ng = importer.get_op_handle("Y")

        # Execute
        with ExecutorFactory() as ex:
            f_result = ex.executor(f_ng)()

            # compare Caffe2 and ngraph results
            assert(np.allclose(f_result, workspace.FetchBlob("Y"),
                               atol=1e-4, rtol=1e-3, equal_nan=False)) 
Example #26
Source File: test_ops_nn.py    From ngraph-python with Apache License 2.0 5 votes vote down vote up
def test_convolution_nchw_no_pad_no_bias():
    workspace.ResetWorkspace()

    # shape is in NCHW format
    # [batch, input_feature_map, spatial, output_feature_map, kernel, stride]
    n, ifm, spatial, ofm, kernel, stride = [2, 3, 8, 1, 2, 2]

    shape_x = (n, ifm, spatial, spatial)
    shape_w = (ofm, ifm, kernel, kernel)
    shape_b = (ofm,)

    data_x = [random.gauss(mu=0, sigma=10) for i in range(np.prod(shape_x))]
    data_w = [random.gauss(mu=0, sigma=10) for i in range(np.prod(shape_w))]
    data_b = [0. for i in range(np.prod(shape_b))]

    net = core.Net("net")
    X = net.GivenTensorFill([], ["X"], shape=shape_x, values=data_x, name="X")
    W = net.GivenTensorFill([], ["W"], shape=shape_w, values=data_w, name="W")
    B = net.GivenTensorFill([], ["B"], shape=shape_b, values=data_b, name="B")

    net.Conv([X, W, B], 'Y', kernel=kernel, stride=stride, order='NCHW')

    # Execute via Caffe2
    workspace.RunNetOnce(net)

    # Import caffe2 network into ngraph
    importer = C2Importer()
    importer.parse_net_def(net.Proto(), verbose=False)

    # Get handle
    f_ng = importer.get_op_handle("Y")

    # Execute
    with ExecutorFactory() as ex:
        f_result = ex.executor(f_ng)()

        # compare Caffe2 and ngraph results
        assert (np.allclose(f_result, workspace.FetchBlob("Y"), atol=1e-4, rtol=1e-3,
                            equal_nan=False)) 
Example #27
Source File: common_caffe2.py    From optimized-models with Apache License 2.0 5 votes vote down vote up
def AddTensors(workspace, init_net, blob_names):
    """add tensors"""
    for blob_name in blob_names:
        blob = workspace.FetchBlob(str(blob_name))
        AddTensor(init_net, blob_name, blob) 
Example #28
Source File: metrics.py    From video-long-term-feature-banks with Apache License 2.0 5 votes vote down vote up
def compute_multi_gpu_topk_accuracy(top_k, split, suffix='', epic_type=''):
    """Get predictions and labels from GPUs and compute top-k."""

    aggr_batch_size = 0
    aggr_top_k_correct_hits = 0

    computed_metrics = {}

    for idx in range(cfg.ROOT_GPU_ID, cfg.ROOT_GPU_ID + cfg.NUM_GPUS):
        softmax = workspace.FetchBlob('gpu_{}/pred{}'.format(idx, epic_type))

        softmax = softmax.reshape((softmax.shape[0], -1))
        labels = workspace.FetchBlob('gpu_{}/labels{}{}'.format(
            idx, epic_type, suffix))

        assert labels.shape[0] == softmax.shape[0], "Batch size mismatch."

        aggr_batch_size += labels.shape[0]

        aggr_top_k_correct_hits += compute_topk_correct_hits(
            top_k, softmax, labels)

    # Normalize results.
    computed_metrics['topk_accuracy'] = \
        float(aggr_top_k_correct_hits) / aggr_batch_size

    return computed_metrics 
Example #29
Source File: metrics.py    From video-long-term-feature-banks with Apache License 2.0 5 votes vote down vote up
def get_multi_gpu_outputs(split, suffix):
    """Get predictions and labels from GPUs."""

    all_preds, all_labels = [], []
    all_original_boxes, all_metadata = [], []
    for idx in range(cfg.ROOT_GPU_ID, cfg.ROOT_GPU_ID + cfg.NUM_GPUS):

        softmax = workspace.FetchBlob('gpu_{}/pred'.format(idx))

        labels = workspace.FetchBlob('gpu_{}/labels{}'.format(idx, suffix))

        softmax = softmax.reshape((softmax.shape[0], -1))
        all_preds.append(softmax)
        all_labels.append(labels)

        if cfg.DATASET == 'ava':
            original_boxes = workspace.FetchBlob(
                'gpu_{}/original_boxes{}'.format(idx, suffix))
            metadata = workspace.FetchBlob('gpu_{}/metadata{}'.format(idx, suffix))

            all_original_boxes.append(original_boxes)
            all_metadata.append(metadata)

    if cfg.DATASET == 'ava':
        return (np.vstack(all_preds), np.vstack(all_labels),
                np.vstack(all_original_boxes), np.vstack(all_metadata))
    return np.vstack(all_preds),  np.vstack(all_labels), None, None 
Example #30
Source File: net.py    From Clustered-Object-Detection-in-Aerial-Image with Apache License 2.0 5 votes vote down vote up
def save_model_to_weights_file(weights_file, model):
    """Stash model weights in a dictionary and pickle them to a file. We map
    GPU device scoped names to unscoped names (e.g., 'gpu_0/conv1_w' ->
    'conv1_w').
    """
    logger.info(
        'Saving parameters and momentum to {}'.format(
            os.path.abspath(weights_file)))
    blobs = {}
    # Save all parameters
    for param in model.params:
        scoped_name = str(param)
        unscoped_name = c2_utils.UnscopeName(scoped_name)
        if unscoped_name not in blobs:
            logger.debug(' {:s} -> {:s}'.format(scoped_name, unscoped_name))
            blobs[unscoped_name] = workspace.FetchBlob(scoped_name)
    # Save momentum
    for param in model.TrainableParams():
        scoped_name = str(param) + '_momentum'
        unscoped_name = c2_utils.UnscopeName(scoped_name)
        if unscoped_name not in blobs:
            logger.debug(' {:s} -> {:s}'.format(scoped_name, unscoped_name))
            blobs[unscoped_name] = workspace.FetchBlob(scoped_name)
    # Save preserved blobs
    for scoped_name in workspace.Blobs():
        if scoped_name.startswith('__preserve__/'):
            unscoped_name = c2_utils.UnscopeName(scoped_name)
            if unscoped_name not in blobs:
                logger.debug(
                    ' {:s} -> {:s} (preserved)'.format(
                        scoped_name, unscoped_name))
                blobs[unscoped_name] = workspace.FetchBlob(scoped_name)
    cfg_yaml = yaml.dump(cfg)
    save_object(dict(blobs=blobs, cfg=cfg_yaml), weights_file)