Python caffe2.proto.caffe2_pb2.TensorProto() Examples
The following are 11
code examples of caffe2.proto.caffe2_pb2.TensorProto().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
caffe2.proto.caffe2_pb2
, or try the search function
.
Example #1
Source File: __init__.py From NNEF-Tools with Apache License 2.0 | 5 votes |
def dtype_name_to_id(name): return TensorProto.DataType.Value(name)
Example #2
Source File: __init__.py From NNEF-Tools with Apache License 2.0 | 5 votes |
def dtype_id_to_name(dtype_int): return fixstr(TensorProto.DataType.Name(dtype_int))
Example #3
Source File: pickle_caffe_blobs.py From KL-Loss with Apache License 2.0 | 4 votes |
def remove_spatial_bn_layers(caffenet, caffenet_weights): # Layer types associated with spatial batch norm remove_types = ['BatchNorm', 'Scale'] def _remove_layers(net): for i in reversed(range(len(net.layer))): if net.layer[i].type in remove_types: net.layer.pop(i) # First remove layers from caffenet proto _remove_layers(caffenet) # We'll return these so we can save the batch norm parameters bn_layers = [ layer for layer in caffenet_weights.layer if layer.type in remove_types ] _remove_layers(caffenet_weights) def _create_tensor(arr, shape, name): t = caffe2_pb2.TensorProto() t.name = name t.data_type = caffe2_pb2.TensorProto.FLOAT t.dims.extend(shape.dim) t.float_data.extend(arr) assert len(t.float_data) == np.prod(t.dims), 'Data size, shape mismatch' return t bn_tensors = [] for (bn, scl) in zip(bn_layers[0::2], bn_layers[1::2]): assert bn.name[len('bn'):] == scl.name[len('scale'):], 'Pair mismatch' blob_out = 'res' + bn.name[len('bn'):] + '_bn' bn_mean = np.asarray(bn.blobs[0].data) bn_var = np.asarray(bn.blobs[1].data) scale = np.asarray(scl.blobs[0].data) bias = np.asarray(scl.blobs[1].data) std = np.sqrt(bn_var + 1e-5) new_scale = scale / std new_bias = bias - bn_mean * scale / std new_scale_tensor = _create_tensor( new_scale, bn.blobs[0].shape, blob_out + '_s' ) new_bias_tensor = _create_tensor( new_bias, bn.blobs[0].shape, blob_out + '_b' ) bn_tensors.extend([new_scale_tensor, new_bias_tensor]) return bn_tensors
Example #4
Source File: pickle_caffe_blobs.py From Clustered-Object-Detection-in-Aerial-Image with Apache License 2.0 | 4 votes |
def remove_spatial_bn_layers(caffenet, caffenet_weights): # Layer types associated with spatial batch norm remove_types = ['BatchNorm', 'Scale'] def _remove_layers(net): for i in reversed(range(len(net.layer))): if net.layer[i].type in remove_types: net.layer.pop(i) # First remove layers from caffenet proto _remove_layers(caffenet) # We'll return these so we can save the batch norm parameters bn_layers = [ layer for layer in caffenet_weights.layer if layer.type in remove_types ] _remove_layers(caffenet_weights) def _create_tensor(arr, shape, name): t = caffe2_pb2.TensorProto() t.name = name t.data_type = caffe2_pb2.TensorProto.FLOAT t.dims.extend(shape.dim) t.float_data.extend(arr) assert len(t.float_data) == np.prod(t.dims), 'Data size, shape mismatch' return t bn_tensors = [] for (bn, scl) in zip(bn_layers[0::2], bn_layers[1::2]): assert bn.name[len('bn'):] == scl.name[len('scale'):], 'Pair mismatch' blob_out = 'res' + bn.name[len('bn'):] + '_bn' bn_mean = np.asarray(bn.blobs[0].data) bn_var = np.asarray(bn.blobs[1].data) scale = np.asarray(scl.blobs[0].data) bias = np.asarray(scl.blobs[1].data) std = np.sqrt(bn_var + 1e-5) new_scale = scale / std new_bias = bias - bn_mean * scale / std new_scale_tensor = _create_tensor( new_scale, bn.blobs[0].shape, blob_out + '_s' ) new_bias_tensor = _create_tensor( new_bias, bn.blobs[0].shape, blob_out + '_b' ) bn_tensors.extend([new_scale_tensor, new_bias_tensor]) return bn_tensors
Example #5
Source File: pickle_caffe_blobs.py From seg_every_thing with Apache License 2.0 | 4 votes |
def remove_spatial_bn_layers(caffenet, caffenet_weights): # Layer types associated with spatial batch norm remove_types = ['BatchNorm', 'Scale'] def _remove_layers(net): for i in reversed(range(len(net.layer))): if net.layer[i].type in remove_types: net.layer.pop(i) # First remove layers from caffenet proto _remove_layers(caffenet) # We'll return these so we can save the batch norm parameters bn_layers = [ layer for layer in caffenet_weights.layer if layer.type in remove_types ] _remove_layers(caffenet_weights) def _create_tensor(arr, shape, name): t = caffe2_pb2.TensorProto() t.name = name t.data_type = caffe2_pb2.TensorProto.FLOAT t.dims.extend(shape.dim) t.float_data.extend(arr) assert len(t.float_data) == np.prod(t.dims), 'Data size, shape mismatch' return t bn_tensors = [] for (bn, scl) in zip(bn_layers[0::2], bn_layers[1::2]): assert bn.name[len('bn'):] == scl.name[len('scale'):], 'Pair mismatch' blob_out = 'res' + bn.name[len('bn'):] + '_bn' bn_mean = np.asarray(bn.blobs[0].data) bn_var = np.asarray(bn.blobs[1].data) scale = np.asarray(scl.blobs[0].data) bias = np.asarray(scl.blobs[1].data) std = np.sqrt(bn_var + 1e-5) new_scale = scale / std new_bias = bias - bn_mean * scale / std new_scale_tensor = _create_tensor( new_scale, bn.blobs[0].shape, blob_out + '_s' ) new_bias_tensor = _create_tensor( new_bias, bn.blobs[0].shape, blob_out + '_b' ) bn_tensors.extend([new_scale_tensor, new_bias_tensor]) return bn_tensors
Example #6
Source File: pickle_caffe_blobs.py From masktextspotter.caffe2 with Apache License 2.0 | 4 votes |
def remove_spatial_bn_layers(caffenet, caffenet_weights): # Layer types associated with spatial batch norm remove_types = ['BatchNorm', 'Scale'] def _remove_layers(net): for i in reversed(range(len(net.layer))): if net.layer[i].type in remove_types: net.layer.pop(i) # First remove layers from caffenet proto _remove_layers(caffenet) # We'll return these so we can save the batch norm parameters bn_layers = [ layer for layer in caffenet_weights.layer if layer.type in remove_types ] _remove_layers(caffenet_weights) def _create_tensor(arr, shape, name): t = caffe2_pb2.TensorProto() t.name = name t.data_type = caffe2_pb2.TensorProto.FLOAT t.dims.extend(shape.dim) t.float_data.extend(arr) assert len(t.float_data) == np.prod(t.dims), 'Data size, shape mismatch' return t bn_tensors = [] for (bn, scl) in zip(bn_layers[0::2], bn_layers[1::2]): assert bn.name[len('bn'):] == scl.name[len('scale'):], 'Pair mismatch' blob_out = 'res' + bn.name[len('bn'):] + '_bn' bn_mean = np.asarray(bn.blobs[0].data) bn_var = np.asarray(bn.blobs[1].data) scale = np.asarray(scl.blobs[0].data) bias = np.asarray(scl.blobs[1].data) std = np.sqrt(bn_var + 1e-5) new_scale = scale / std new_bias = bias - bn_mean * scale / std new_scale_tensor = _create_tensor( new_scale, bn.blobs[0].shape, blob_out + '_s' ) new_bias_tensor = _create_tensor( new_bias, bn.blobs[0].shape, blob_out + '_b' ) bn_tensors.extend([new_scale_tensor, new_bias_tensor]) return bn_tensors
Example #7
Source File: pickle_caffe_blobs.py From Detectron-Cascade-RCNN with Apache License 2.0 | 4 votes |
def remove_spatial_bn_layers(caffenet, caffenet_weights): # Layer types associated with spatial batch norm remove_types = ['BatchNorm', 'Scale'] def _remove_layers(net): for i in reversed(range(len(net.layer))): if net.layer[i].type in remove_types: net.layer.pop(i) # First remove layers from caffenet proto _remove_layers(caffenet) # We'll return these so we can save the batch norm parameters bn_layers = [ layer for layer in caffenet_weights.layer if layer.type in remove_types ] _remove_layers(caffenet_weights) def _create_tensor(arr, shape, name): t = caffe2_pb2.TensorProto() t.name = name t.data_type = caffe2_pb2.TensorProto.FLOAT t.dims.extend(shape.dim) t.float_data.extend(arr) assert len(t.float_data) == np.prod(t.dims), 'Data size, shape mismatch' return t bn_tensors = [] for (bn, scl) in zip(bn_layers[0::2], bn_layers[1::2]): assert bn.name[len('bn'):] == scl.name[len('scale'):], 'Pair mismatch' blob_out = 'res' + bn.name[len('bn'):] + '_bn' bn_mean = np.asarray(bn.blobs[0].data) bn_var = np.asarray(bn.blobs[1].data) scale = np.asarray(scl.blobs[0].data) bias = np.asarray(scl.blobs[1].data) std = np.sqrt(bn_var + 1e-5) new_scale = scale / std new_bias = bias - bn_mean * scale / std new_scale_tensor = _create_tensor( new_scale, bn.blobs[0].shape, blob_out + '_s' ) new_bias_tensor = _create_tensor( new_bias, bn.blobs[0].shape, blob_out + '_b' ) bn_tensors.extend([new_scale_tensor, new_bias_tensor]) return bn_tensors
Example #8
Source File: pickle_caffe_blobs.py From Detectron with Apache License 2.0 | 4 votes |
def remove_spatial_bn_layers(caffenet, caffenet_weights): # Layer types associated with spatial batch norm remove_types = ['BatchNorm', 'Scale'] def _remove_layers(net): for i in reversed(range(len(net.layer))): if net.layer[i].type in remove_types: net.layer.pop(i) # First remove layers from caffenet proto _remove_layers(caffenet) # We'll return these so we can save the batch norm parameters bn_layers = [ layer for layer in caffenet_weights.layer if layer.type in remove_types ] _remove_layers(caffenet_weights) def _create_tensor(arr, shape, name): t = caffe2_pb2.TensorProto() t.name = name t.data_type = caffe2_pb2.TensorProto.FLOAT t.dims.extend(shape.dim) t.float_data.extend(arr) assert len(t.float_data) == np.prod(t.dims), 'Data size, shape mismatch' return t bn_tensors = [] for (bn, scl) in zip(bn_layers[0::2], bn_layers[1::2]): assert bn.name[len('bn'):] == scl.name[len('scale'):], 'Pair mismatch' blob_out = 'res' + bn.name[len('bn'):] + '_bn' bn_mean = np.asarray(bn.blobs[0].data) bn_var = np.asarray(bn.blobs[1].data) scale = np.asarray(scl.blobs[0].data) bias = np.asarray(scl.blobs[1].data) std = np.sqrt(bn_var + 1e-5) new_scale = scale / std new_bias = bias - bn_mean * scale / std new_scale_tensor = _create_tensor( new_scale, bn.blobs[0].shape, blob_out + '_s' ) new_bias_tensor = _create_tensor( new_bias, bn.blobs[0].shape, blob_out + '_b' ) bn_tensors.extend([new_scale_tensor, new_bias_tensor]) return bn_tensors
Example #9
Source File: pickle_caffe_blobs.py From Detectron-DA-Faster-RCNN with Apache License 2.0 | 4 votes |
def remove_spatial_bn_layers(caffenet, caffenet_weights): # Layer types associated with spatial batch norm remove_types = ['BatchNorm', 'Scale'] def _remove_layers(net): for i in reversed(range(len(net.layer))): if net.layer[i].type in remove_types: net.layer.pop(i) # First remove layers from caffenet proto _remove_layers(caffenet) # We'll return these so we can save the batch norm parameters bn_layers = [ layer for layer in caffenet_weights.layer if layer.type in remove_types ] _remove_layers(caffenet_weights) def _create_tensor(arr, shape, name): t = caffe2_pb2.TensorProto() t.name = name t.data_type = caffe2_pb2.TensorProto.FLOAT t.dims.extend(shape.dim) t.float_data.extend(arr) assert len(t.float_data) == np.prod(t.dims), 'Data size, shape mismatch' return t bn_tensors = [] for (bn, scl) in zip(bn_layers[0::2], bn_layers[1::2]): assert bn.name[len('bn'):] == scl.name[len('scale'):], 'Pair mismatch' blob_out = 'res' + bn.name[len('bn'):] + '_bn' bn_mean = np.asarray(bn.blobs[0].data) bn_var = np.asarray(bn.blobs[1].data) scale = np.asarray(scl.blobs[0].data) bias = np.asarray(scl.blobs[1].data) std = np.sqrt(bn_var + 1e-5) new_scale = scale / std new_bias = bias - bn_mean * scale / std new_scale_tensor = _create_tensor( new_scale, bn.blobs[0].shape, blob_out + '_s' ) new_bias_tensor = _create_tensor( new_bias, bn.blobs[0].shape, blob_out + '_b' ) bn_tensors.extend([new_scale_tensor, new_bias_tensor]) return bn_tensors
Example #10
Source File: pickle_caffe_blobs.py From CBNet with Apache License 2.0 | 4 votes |
def remove_spatial_bn_layers(caffenet, caffenet_weights): # Layer types associated with spatial batch norm remove_types = ['BatchNorm', 'Scale'] def _remove_layers(net): for i in reversed(range(len(net.layer))): if net.layer[i].type in remove_types: net.layer.pop(i) # First remove layers from caffenet proto _remove_layers(caffenet) # We'll return these so we can save the batch norm parameters bn_layers = [ layer for layer in caffenet_weights.layer if layer.type in remove_types ] _remove_layers(caffenet_weights) def _create_tensor(arr, shape, name): t = caffe2_pb2.TensorProto() t.name = name t.data_type = caffe2_pb2.TensorProto.FLOAT t.dims.extend(shape.dim) t.float_data.extend(arr) assert len(t.float_data) == np.prod(t.dims), 'Data size, shape mismatch' return t bn_tensors = [] for (bn, scl) in zip(bn_layers[0::2], bn_layers[1::2]): assert bn.name[len('bn'):] == scl.name[len('scale'):], 'Pair mismatch' blob_out = 'res' + bn.name[len('bn'):] + '_bn' bn_mean = np.asarray(bn.blobs[0].data) bn_var = np.asarray(bn.blobs[1].data) scale = np.asarray(scl.blobs[0].data) bias = np.asarray(scl.blobs[1].data) std = np.sqrt(bn_var + 1e-5) new_scale = scale / std new_bias = bias - bn_mean * scale / std new_scale_tensor = _create_tensor( new_scale, bn.blobs[0].shape, blob_out + '_s' ) new_bias_tensor = _create_tensor( new_bias, bn.blobs[0].shape, blob_out + '_b' ) bn_tensors.extend([new_scale_tensor, new_bias_tensor]) return bn_tensors
Example #11
Source File: pickle_caffe_blobs.py From NucleiDetectron with Apache License 2.0 | 4 votes |
def remove_spatial_bn_layers(caffenet, caffenet_weights): # Layer types associated with spatial batch norm remove_types = ['BatchNorm', 'Scale'] def _remove_layers(net): for i in reversed(range(len(net.layer))): if net.layer[i].type in remove_types: net.layer.pop(i) # First remove layers from caffenet proto _remove_layers(caffenet) # We'll return these so we can save the batch norm parameters bn_layers = [ layer for layer in caffenet_weights.layer if layer.type in remove_types ] _remove_layers(caffenet_weights) def _create_tensor(arr, shape, name): t = caffe2_pb2.TensorProto() t.name = name t.data_type = caffe2_pb2.TensorProto.FLOAT t.dims.extend(shape.dim) t.float_data.extend(arr) assert len(t.float_data) == np.prod(t.dims), 'Data size, shape mismatch' return t bn_tensors = [] for (bn, scl) in zip(bn_layers[0::2], bn_layers[1::2]): assert bn.name[len('bn'):] == scl.name[len('scale'):], 'Pair mismatch' blob_out = 'res' + bn.name[len('bn'):] + '_bn' bn_mean = np.asarray(bn.blobs[0].data) bn_var = np.asarray(bn.blobs[1].data) scale = np.asarray(scl.blobs[0].data) bias = np.asarray(scl.blobs[1].data) std = np.sqrt(bn_var + 1e-5) new_scale = scale / std new_bias = bias - bn_mean * scale / std new_scale_tensor = _create_tensor( new_scale, bn.blobs[0].shape, blob_out + '_s' ) new_bias_tensor = _create_tensor( new_bias, bn.blobs[0].shape, blob_out + '_b' ) bn_tensors.extend([new_scale_tensor, new_bias_tensor]) return bn_tensors