Python caffe2.python.workspace.ResetWorkspace() Examples

The following are 30 code examples of caffe2.python.workspace.ResetWorkspace(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module caffe2.python.workspace , or try the search function .
Example #1
Source File: test_ops_unary.py    From ngraph-python with Apache License 2.0 6 votes vote down vote up
def test_exp():
    workspace.ResetWorkspace()

    shape = [2, 7]
    data = [
        1., 2., 3., 4., 1., 2., 3.,
        1., 2., 3., 4., 1., 2., 3.
    ]
    expected = [
        [2.71828, 7.3890, 20.08553, 54.59815, 2.71828, 7.3890, 20.08553],
        [2.71828, 7.3890, 20.08553, 54.59815, 2.71828, 7.3890, 20.08553],
    ]
    run_all_close_compare_initiated_with_random_gauss('Exp',
                                                      shape=shape,
                                                      data=data,
                                                      expected=expected) 
Example #2
Source File: test_ops_nn.py    From ngraph-python with Apache License 2.0 6 votes vote down vote up
def test_SquaredL2Distance():
    workspace.ResetWorkspace()
    shape = (10, 10)

    net = core.Net("net")
    Y = net.GivenTensorFill([], "Y", shape=shape, values=np.random.uniform(-1, 1, shape))
    T = net.GivenTensorFill([], "T", shape=shape, values=np.random.uniform(-1, 1, shape))
    net.SquaredL2Distance([Y, T], "dist")

    # Execute via Caffe2
    workspace.RunNetOnce(net)

    # Import caffe2 network into ngraph
    importer = C2Importer()
    importer.parse_net_def(net.Proto(), verbose=False)

    # Get handle
    f_ng = importer.get_op_handle("dist")

    # Execute
    with ExecutorFactory() as ex:
        f_result = ex.executor(f_ng)()

        assert(np.allclose(f_result, workspace.FetchBlob("dist"), equal_nan=False)) 
Example #3
Source File: test_ops_nn.py    From ngraph-python with Apache License 2.0 6 votes vote down vote up
def test_AveragedLoss():
    workspace.ResetWorkspace()
    shape = (32,)

    net = core.Net("net")
    X = net.GivenTensorFill([], "Y", shape=shape, values=np.random.uniform(-1, 1, shape))
    X.AveragedLoss([], ["loss"])

    # Execute via Caffe2
    workspace.RunNetOnce(net)

    # Import caffe2 network into ngraph
    importer = C2Importer()
    importer.parse_net_def(net.Proto(), verbose=False)

    # Get handle
    f_ng = importer.get_op_handle("loss")

    # Execute
    with ExecutorFactory() as ex:
        f_result = ex.executor(f_ng)()

        assert(np.allclose(f_result, workspace.FetchBlob("loss"), equal_nan=False)) 
Example #4
Source File: test_ops_constant.py    From ngraph-python with Apache License 2.0 6 votes vote down vote up
def test_constant():
    workspace.ResetWorkspace()

    shape = [10, 10]
    val = random.random()
    net = core.Net("net")
    net.ConstantFill([], ["Y"], shape=shape, value=val, run_once=0, name="Y")

    # Execute via Caffe2
    workspace.RunNetOnce(net)

    # Import caffe2 network into ngraph
    importer = C2Importer()
    importer.parse_net_def(net.Proto(), verbose=False)

    # Get handle
    f_ng = importer.get_op_handle("Y")

    # Execute
    with ExecutorFactory() as ex:
        f_result = ex.executor(f_ng)()

        # compare Caffe2 and ngraph results
        assert(np.ma.allequal(f_result, workspace.FetchBlob("Y")))
        assert(np.isclose(f_result[0][0], val, atol=1e-6, rtol=0)) 
Example #5
Source File: test_benchmarks.py    From dlcookbook-dlbs with Apache License 2.0 6 votes vote down vote up
def test_inference(self):
        """caffe2_benchmarks ->  TestCaffe2Benchmarks::test_inference    [Caffe2 CPU/GPU inference.]"""
        print("Testing inference")
        for params in itertools.product(self.models, self.batch_sizes, self.devices):
            if params[0] in self.gpu_skip_models:
                continue
            model = model_helper.ModelHelper(name=params[0])
            name, times = benchmark_inference(
                model,
                {'model':params[0], 'phase':'inference', 'batch_size':params[1],
                 'num_batches':self.num_batches, 'num_warmup_batches':self.num_warmup_iters,
                 'num_gpus':self.num_gpus, 'device':params[2], 'dtype':'float',
                 'enable_tensor_core':False}
            )
            self.assertEqual(len(times), self.num_batches)
            print("model=%s, name=%s, batch=%d, device=%s, time=%f" %\
                  (params[0], name, params[1], params[2], 1000.0*np.mean(times)))
            workspace.ResetWorkspace() 
Example #6
Source File: test_benchmarks.py    From dlcookbook-dlbs with Apache License 2.0 6 votes vote down vote up
def test_training_cpu(self):
        """caffe2_benchmarks ->  TestCaffe2Benchmarks::test_training_cpu [Caffe2 CPU training.]"""
        print("Testing CPU training")
        for params in itertools.product(self.models, self.batch_sizes):
            model = model_helper.ModelHelper(name=params[0])
            name, times = benchmark_training(
                model,
                {'model':params[0], 'phase':'training', 'batch_size':params[1],
                 'num_batches':self.num_batches, 'num_warmup_batches':self.num_warmup_iters,
                 'num_gpus':0, 'device':'cpu', 'dtype':'float',
                 'enable_tensor_core':False}
            )
            self.assertEqual(len(times), self.num_batches)
            print("model=%s, name=%s, batch=%d, device=cpu, time=%f" %\
                  (params[0], name, params[1], 1000.0*np.mean(times)))
            workspace.ResetWorkspace() 
Example #7
Source File: test_benchmarks.py    From dlcookbook-dlbs with Apache License 2.0 6 votes vote down vote up
def test_training_gpu(self):
        """caffe2_benchmarks ->  TestCaffe2Benchmarks::test_training_gpu [Caffe2 GPU training.]"""
        print("Testing GPU training")
        for params in itertools.product(self.models, self.batch_sizes, self.gpus):
            if params[0] in self.gpu_skip_models:
                continue
            model = model_helper.ModelHelper(name=params[0])
            name, times = benchmark_training(
                model,
                {'model':params[0], 'phase':'training', 'batch_size':params[1],
                 'num_batches':self.num_batches, 'num_warmup_batches':self.num_warmup_iters,
                 'num_gpus':len(params[2].split()), 'device':'gpu', 'dtype':'float',
                 'enable_tensor_core':False}
            )
            self.assertEqual(len(times), self.num_batches)
            print("model=%s, name=%s, batch=%d, gpus=%s, time=%f" %\
                  (params[0], name, params[1], params[2], 1000.0*np.mean(times)))
            workspace.ResetWorkspace() 
Example #8
Source File: run_add5_op.py    From tutorials with Apache License 2.0 6 votes vote down vote up
def run_add5_and_add5gradient_op(device):
    # clear the workspace before running the operator
    workspace.ResetWorkspace()
    add5 = core.CreateOperator("Add5",
                               ["X"],
                               ["Y"],
                               device_option=device)
    print("==> Running Add5 op:")
    workspace.FeedBlob("X", (np.random.rand(5, 5)), device_option=device)
    print("Input of Add5: ", workspace.FetchBlob("X"))
    workspace.RunOperatorOnce(add5)
    print("Output of Add5: ", workspace.FetchBlob("Y"))

    print("\n\n==> Running Add5Gradient op:")
    print("Input of Add5Gradient: ", workspace.FetchBlob("Y"))
    add5gradient = core.CreateOperator("Add5Gradient",
                                       ["Y"],
                                       ["Z"],
                                       device_option=device)
    workspace.RunOperatorOnce(add5gradient)
    print("Output of Add5Gradient: ", workspace.FetchBlob("Z")) 
Example #9
Source File: Multi-GPU_Training.py    From tutorials with Apache License 2.0 6 votes vote down vote up
def accuracy(model):
    accuracy = []
    prefix = model.net.Proto().name
    for device in model._devices:
        accuracy.append(
            np.asscalar(workspace.FetchBlob("gpu_{}/{}_accuracy".format(device, prefix))))
    return np.average(accuracy)


# ## Part 11: Run Multi-GPU Training and Get Test Results
# You've come a long way. Now is the time to see it all pay off. Since you already ran ResNet once, you can glance at the code below and run it. The big difference this time is your model is parallelized! 
# 
# The additional components at the end deal with accuracy so you may want to dig into those specifics as a bonus task. You can try it again: just adjust the `num_epochs` value below, run the block, and see the results. You can also go back to Part 10 to reinitialize the model, and run this step again. (You may want to add `workspace.ResetWorkspace()` before you run the new models again.)
# 
# Go back and check the images/sec from when you ran single GPU. Note how you can scale up with a small amount of overhead. 
# 
# ### Task: How many GPUs would it take to train ImageNet in under a minute? 

# In[ ]:


# Start looping through epochs where we run the batches of images to cover the entire dataset
# Usually you would want to run a lot more epochs to increase your model's accuracy 
Example #10
Source File: caffe2_network.py    From deep500 with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _teardown(self):
        workspace.ResetWorkspace() 
Example #11
Source File: train_net.py    From Clustered-Object-Detection-in-Aerial-Image with Apache License 2.0 5 votes vote down vote up
def test_model(model_file, multi_gpu_testing, opts=None):
    """Test a model."""
    # Clear memory before inference
    workspace.ResetWorkspace()
    # Run inference
    run_inference(
        model_file, multi_gpu_testing=multi_gpu_testing,
        check_expected_results=True,
    ) 
Example #12
Source File: test_ops_constant.py    From ngraph-python with Apache License 2.0 5 votes vote down vote up
def test_gaussianfill():
    workspace.ResetWorkspace()

    # Size of test matrix
    N = 100
    shape = [N, N]

    net = core.Net("net")
    net.GaussianFill([], ["Y"], shape=shape, mean=0.0, std=1.0, name="Y")

    # Execute via Caffe2
    workspace.RunNetOnce(net)

    # Import caffe2 network into ngraph
    importer = C2Importer()
    importer.parse_net_def(net.Proto(), verbose=False)

    # Get handle
    f_ng = importer.get_op_handle("Y")

    # Execute
    with ExecutorFactory() as ex:
        f_result = ex.executor(f_ng)()

        # get caffe result
        caffe_res = workspace.FetchBlob("Y")

        # Elementwise difference of the two random matrixes
        difference_res = caffe_res - f_result

        # standard deviation of Difference Matrix
        diffe_res_std = difference_res.std()

        # testing can only be approximate (so in rare cases may fail!!)
        # if fails once try to re-run a couple of times to make sure there is a problem)
        # the difference must be still gaussian and P(|m'-m|)<3*std = 99.73%, and
        # std(m) = std/N, having N*N elements
        assert(np.isclose(difference_res.mean(), 0, atol=3 * diffe_res_std / N, rtol=0)) 
Example #13
Source File: convert_pkl_to_pb.py    From seg_every_thing with Apache License 2.0 5 votes vote down vote up
def run_model_cfg(args, im, check_blobs):
    workspace.ResetWorkspace()
    model, _ = load_model(args)
    with c2_utils.NamedCudaScope(0):
        cls_boxes, cls_segms, cls_keyps = test_engine.im_detect_all(
            model, im, None, None,
        )

    boxes, segms, keypoints, classes = vis_utils.convert_from_cls_format(
        cls_boxes, cls_segms, cls_keyps)

    # sort the results based on score for comparision
    boxes, segms, keypoints, classes = _sort_results(
        boxes, segms, keypoints, classes)

    # write final results back to workspace
    def _ornone(res):
        return np.array(res) if res is not None else np.array([], dtype=np.float32)
    with c2_utils.NamedCudaScope(0):
        workspace.FeedBlob(core.ScopedName('result_boxes'), _ornone(boxes))
        workspace.FeedBlob(core.ScopedName('result_segms'), _ornone(segms))
        workspace.FeedBlob(core.ScopedName('result_keypoints'), _ornone(keypoints))
        workspace.FeedBlob(core.ScopedName('result_classids'), _ornone(classes))

    # get result blobs
    with c2_utils.NamedCudaScope(0):
        ret = _get_result_blobs(check_blobs)

    return ret 
Example #14
Source File: train_net.py    From seg_every_thing with Apache License 2.0 5 votes vote down vote up
def test_model(model_file, multi_gpu_testing, opts=None):
    """Test a model."""
    # Clear memory before inference
    workspace.ResetWorkspace()
    # Run inference
    run_inference(
        model_file, multi_gpu_testing=multi_gpu_testing,
        check_expected_results=True,
    ) 
Example #15
Source File: train_net.py    From DetectAndTrack with Apache License 2.0 5 votes vote down vote up
def test_model(model_file, multi_gpu_testing, opts=None):
    # All arguments to inference functions are passed via cfg
    cfg.TEST.WEIGHTS = model_file
    # Clear memory before inference
    workspace.ResetWorkspace()
    # Run inference
    test_net.main(multi_gpu_testing=multi_gpu_testing) 
Example #16
Source File: shared.py    From detectron2 with Apache License 2.0 5 votes vote down vote up
def __enter__(self):
        self.org_ws = workspace.CurrentWorkspace()
        if self.ws_name is not None:
            workspace.SwitchWorkspace(self.ws_name, True)
        if self.is_reset:
            workspace.ResetWorkspace()

        return workspace 
Example #17
Source File: shared.py    From detectron2 with Apache License 2.0 5 votes vote down vote up
def __exit__(self, *args):
        if self.is_cleanup:
            workspace.ResetWorkspace()
        if self.ws_name is not None:
            workspace.SwitchWorkspace(self.org_ws) 
Example #18
Source File: train_net.py    From masktextspotter.caffe2 with Apache License 2.0 5 votes vote down vote up
def test_model(model_file, multi_gpu_testing, opts=None):
    """Test a model."""
    # All arguments to inference functions are passed via cfg
    cfg.TEST.WEIGHTS = model_file
    # Clear memory before inference
    workspace.ResetWorkspace()
    # Run inference
    test_net.main(multi_gpu_testing=multi_gpu_testing) 
Example #19
Source File: convert_pkl_to_pb.py    From Detectron-Cascade-RCNN with Apache License 2.0 5 votes vote down vote up
def run_model_cfg(args, im, check_blobs):
    workspace.ResetWorkspace()
    model, _ = load_model(args)
    with c2_utils.NamedCudaScope(0):
        cls_boxes, cls_segms, cls_keyps = test_engine.im_detect_all(
            model, im, None, None,
        )

    boxes, segms, keypoints, classes = vis_utils.convert_from_cls_format(
        cls_boxes, cls_segms, cls_keyps)

    # sort the results based on score for comparision
    boxes, segms, keypoints, classes = _sort_results(
        boxes, segms, keypoints, classes)

    # write final results back to workspace
    def _ornone(res):
        return np.array(res) if res is not None else np.array([], dtype=np.float32)
    with c2_utils.NamedCudaScope(0):
        workspace.FeedBlob(core.ScopedName('result_boxes'), _ornone(boxes))
        workspace.FeedBlob(core.ScopedName('result_segms'), _ornone(segms))
        workspace.FeedBlob(core.ScopedName('result_keypoints'), _ornone(keypoints))
        workspace.FeedBlob(core.ScopedName('result_classids'), _ornone(classes))

    # get result blobs
    with c2_utils.NamedCudaScope(0):
        ret = _get_result_blobs(check_blobs)

    return ret 
Example #20
Source File: train_net.py    From Detectron-Cascade-RCNN with Apache License 2.0 5 votes vote down vote up
def test_model(model_file, multi_gpu_testing, opts=None):
    """Test a model."""
    # Clear memory before inference
    workspace.ResetWorkspace()
    # Run inference
    run_inference(
        model_file, multi_gpu_testing=multi_gpu_testing,
        check_expected_results=True,
    ) 
Example #21
Source File: convert_pkl_to_pb.py    From Detectron with Apache License 2.0 5 votes vote down vote up
def run_model_cfg(args, im, check_blobs):
    workspace.ResetWorkspace()
    model, _ = load_model(args)
    with c2_utils.NamedCudaScope(0):
        cls_boxes, cls_segms, cls_keyps = test_engine.im_detect_all(
            model, im, None, None
        )

    boxes, segms, keypoints, classes = vis_utils.convert_from_cls_format(
        cls_boxes, cls_segms, cls_keyps
    )

    # sort the results based on score for comparision
    boxes, segms, keypoints, classes = _sort_results(boxes, segms, keypoints, classes)

    # write final results back to workspace
    def _ornone(res):
        return np.array(res) if res is not None else np.array([], dtype=np.float32)

    with c2_utils.NamedCudaScope(0):
        workspace.FeedBlob(core.ScopedName("result_boxes"), _ornone(boxes))
        workspace.FeedBlob(core.ScopedName("result_segms"), _ornone(segms))
        workspace.FeedBlob(core.ScopedName("result_keypoints"), _ornone(keypoints))
        workspace.FeedBlob(core.ScopedName("result_classids"), _ornone(classes))

    # get result blobs
    with c2_utils.NamedCudaScope(0):
        ret = _get_result_blobs(check_blobs)

    return ret 
Example #22
Source File: train_net.py    From Detectron with Apache License 2.0 5 votes vote down vote up
def test_model(model_file, multi_gpu_testing, opts=None):
    """Test a model."""
    # Clear memory before inference
    workspace.ResetWorkspace()
    # Run inference
    run_inference(
        model_file, multi_gpu_testing=multi_gpu_testing,
        check_expected_results=True,
    ) 
Example #23
Source File: convert_pkl_to_pb.py    From Detectron-DA-Faster-RCNN with Apache License 2.0 5 votes vote down vote up
def run_model_cfg(args, im, check_blobs):
    workspace.ResetWorkspace()
    model, _ = load_model(args)
    with c2_utils.NamedCudaScope(0):
        cls_boxes, cls_segms, cls_keyps = test_engine.im_detect_all(
            model, im, None, None,
        )

    boxes, segms, keypoints, classes = vis_utils.convert_from_cls_format(
        cls_boxes, cls_segms, cls_keyps)

    # sort the results based on score for comparision
    boxes, segms, keypoints, classes = _sort_results(
        boxes, segms, keypoints, classes)

    # write final results back to workspace
    def _ornone(res):
        return np.array(res) if res is not None else np.array([], dtype=np.float32)
    with c2_utils.NamedCudaScope(0):
        workspace.FeedBlob(core.ScopedName('result_boxes'), _ornone(boxes))
        workspace.FeedBlob(core.ScopedName('result_segms'), _ornone(segms))
        workspace.FeedBlob(core.ScopedName('result_keypoints'), _ornone(keypoints))
        workspace.FeedBlob(core.ScopedName('result_classids'), _ornone(classes))

    # get result blobs
    with c2_utils.NamedCudaScope(0):
        ret = _get_result_blobs(check_blobs)

    return ret 
Example #24
Source File: train_net.py    From Detectron-DA-Faster-RCNN with Apache License 2.0 5 votes vote down vote up
def test_model(model_file, multi_gpu_testing, opts=None):
    """Test a model."""
    # Clear memory before inference
    workspace.ResetWorkspace()
    # Run inference
    run_inference(
        model_file, multi_gpu_testing=multi_gpu_testing,
        check_expected_results=True,
    ) 
Example #25
Source File: convert_pkl_to_pb.py    From CBNet with Apache License 2.0 5 votes vote down vote up
def run_model_cfg(args, im, check_blobs):
    workspace.ResetWorkspace()
    model, _ = load_model(args)
    with c2_utils.NamedCudaScope(0):
        cls_boxes, cls_segms, cls_keyps = test_engine.im_detect_all(
            model, im, None, None,
        )

    boxes, segms, keypoints, classes = vis_utils.convert_from_cls_format(
        cls_boxes, cls_segms, cls_keyps)

    # sort the results based on score for comparision
    boxes, segms, keypoints, classes = _sort_results(
        boxes, segms, keypoints, classes)

    # write final results back to workspace
    def _ornone(res):
        return np.array(res) if res is not None else np.array([], dtype=np.float32)
    with c2_utils.NamedCudaScope(0):
        workspace.FeedBlob(core.ScopedName('result_boxes'), _ornone(boxes))
        workspace.FeedBlob(core.ScopedName('result_segms'), _ornone(segms))
        workspace.FeedBlob(core.ScopedName('result_keypoints'), _ornone(keypoints))
        workspace.FeedBlob(core.ScopedName('result_classids'), _ornone(classes))

    # get result blobs
    with c2_utils.NamedCudaScope(0):
        ret = _get_result_blobs(check_blobs)

    return ret 
Example #26
Source File: train_net.py    From CBNet with Apache License 2.0 5 votes vote down vote up
def test_model(model_file, multi_gpu_testing, opts=None):
    """Test a model."""
    # Clear memory before inference
    workspace.ResetWorkspace()
    # Run inference
    run_inference(
        model_file, multi_gpu_testing=multi_gpu_testing,
        check_expected_results=True,
    ) 
Example #27
Source File: test_benchmarks.py    From dlcookbook-dlbs with Apache License 2.0 5 votes vote down vote up
def setUp(self):
        workspace.ResetWorkspace()
        self.models = [
            'deep_mnist', 'eng_acoustic_model', 'sensor_net', 'alexnet',
            'vgg11', 'vgg13', 'vgg16', 'vgg19',
            'googlenet',
            'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152',
            'resnet200', 'resnet269']
        self.batch_sizes = [1, 2, 4]
        self.num_warmup_iters = 1
        self.num_batches = 1
        self.num_gpus = 1
        self.devices = ['gpu', 'cpu']
        self.gpus = ['0']
        self.gpu_skip_models = ['resnet200', 'resnet269'] # May be too deep to fit in my GPU memory 
Example #28
Source File: update-models-from-caffe2.py    From onnx-fb-universe with MIT License 5 votes vote down vote up
def generate_test_output_data(caffe2_init_net, caffe2_predict_net, inputs):
    p = c2_workspace.Predictor(caffe2_init_net, caffe2_predict_net)
    inputs_map = {input[0]:input[1] for input in inputs}

    output = p.run(inputs_map)
    c2_workspace.ResetWorkspace()
    return output 
Example #29
Source File: convert_pkl_to_pb.py    From NucleiDetectron with Apache License 2.0 5 votes vote down vote up
def run_model_cfg(args, im, check_blobs):
    workspace.ResetWorkspace()
    model, _ = load_model(args)
    with c2_utils.NamedCudaScope(0):
        cls_boxes, cls_segms, cls_keyps = test_engine.im_detect_all(
            model, im, None, None,
        )

    boxes, segms, keypoints, classes = vis_utils.convert_from_cls_format(
        cls_boxes, cls_segms, cls_keyps)

    # sort the results based on score for comparision
    boxes, segms, keypoints, classes = _sort_results(
        boxes, segms, keypoints, classes)

    # write final results back to workspace
    def _ornone(res):
        return np.array(res) if res is not None else np.array([], dtype=np.float32)
    with c2_utils.NamedCudaScope(0):
        workspace.FeedBlob(core.ScopedName('result_boxes'), _ornone(boxes))
        workspace.FeedBlob(core.ScopedName('result_segms'), _ornone(segms))
        workspace.FeedBlob(core.ScopedName('result_keypoints'), _ornone(keypoints))
        workspace.FeedBlob(core.ScopedName('result_classids'), _ornone(classes))

    # get result blobs
    with c2_utils.NamedCudaScope(0):
        ret = _get_result_blobs(check_blobs)

    return ret 
Example #30
Source File: train_net.py    From NucleiDetectron with Apache License 2.0 5 votes vote down vote up
def test_model(model_file, multi_gpu_testing, opts=None):
    """Test a model."""
    # All arguments to inference functions are passed via cfg
    cfg.TEST.WEIGHTS = model_file
    # Clear memory before inference
    workspace.ResetWorkspace()
    # Run inference
    test_net.main(multi_gpu_testing=multi_gpu_testing)