Python collections.OrderedDict() Examples

The following are 30 code examples of collections.OrderedDict(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module collections , or try the search function .
Example #1
Source File: nq_eval.py    From natural-questions with Apache License 2.0 7 votes vote down vote up
def compute_f1(answer_stats, prefix=''):
  """Computes F1, precision, recall for a list of answer scores.

  Args:
    answer_stats: List of per-example scores.
    prefix (''): Prefix to prepend to score dictionary.

  Returns:
    Dictionary mapping string names to scores.
  """

  has_gold, has_pred, is_correct, _ = list(zip(*answer_stats))
  precision = safe_divide(sum(is_correct), sum(has_pred))
  recall = safe_divide(sum(is_correct), sum(has_gold))
  f1 = safe_divide(2 * precision * recall, precision + recall)

  return OrderedDict({
      prefix + 'n': len(answer_stats),
      prefix + 'f1': f1,
      prefix + 'precision': precision,
      prefix + 'recall': recall
  }) 
Example #2
Source File: dist_utils.py    From mmdetection with Apache License 2.0 6 votes vote down vote up
def _allreduce_coalesced(tensors, world_size, bucket_size_mb=-1):
    if bucket_size_mb > 0:
        bucket_size_bytes = bucket_size_mb * 1024 * 1024
        buckets = _take_tensors(tensors, bucket_size_bytes)
    else:
        buckets = OrderedDict()
        for tensor in tensors:
            tp = tensor.type()
            if tp not in buckets:
                buckets[tp] = []
            buckets[tp].append(tensor)
        buckets = buckets.values()

    for bucket in buckets:
        flat_tensors = _flatten_dense_tensors(bucket)
        dist.all_reduce(flat_tensors)
        flat_tensors.div_(world_size)
        for tensor, synced in zip(
                bucket, _unflatten_dense_tensors(flat_tensors, bucket)):
            tensor.copy_(synced) 
Example #3
Source File: madry_mnist_model.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def fprop(self, x):

        output = OrderedDict()
        # first convolutional layer
        h_conv1 = tf.nn.relu(self._conv2d(x, self.W_conv1) + self.b_conv1)
        h_pool1 = self._max_pool_2x2(h_conv1)

        # second convolutional layer
        h_conv2 = tf.nn.relu(
            self._conv2d(h_pool1, self.W_conv2) + self.b_conv2)
        h_pool2 = self._max_pool_2x2(h_conv2)

        # first fully connected layer

        h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
        h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, self.W_fc1) + self.b_fc1)

        # output layer
        logits = tf.matmul(h_fc1, self.W_fc2) + self.b_fc2

        output = deterministic_dict(locals())
        del output["self"]
        output[self.O_PROBS] = tf.nn.softmax(logits=logits)

        return output 
Example #4
Source File: main.py    From ciocheck with MIT License 6 votes vote down vote up
def __init__(self, cmd_root, cli_args, folders=None, files=None):
        """Main tool runner."""
        # Run options
        self.cmd_root = cmd_root  # Folder on which the command was executed
        self.config = load_config(cmd_root, cli_args)
        self.file_manager = FileManager(folders=folders, files=files)
        self.folders = folders
        self.files = files
        self.all_results = OrderedDict()
        self.all_tools = {}
        self.test_results = None
        self.failed_checks = set()

        self.check = self.config.get_value('check')
        self.enforce = self.config.get_value('enforce')
        self.diff_mode = self.config.get_value('diff_mode')
        self.file_mode = self.config.get_value('file_mode')
        self.branch = self.config.get_value('branch')
        self.disable_formatters = cli_args.disable_formatters
        self.disable_linters = cli_args.disable_linters
        self.disable_tests = cli_args.disable_tests 
Example #5
Source File: dataloader_m.py    From models with MIT License 6 votes vote down vote up
def read_cpg_profiles(filenames, log=None, *args, **kwargs):
    """Read methylation profiles.

    Input files can be gzip compressed.

    Returns
    -------
    dict
        `dict (key, value)`, where `key` is the output name and `value` the CpG
        table.
    """

    cpg_profiles = OrderedDict()
    for filename in filenames:
        if log:
            log(filename)
        #cpg_file = dat.GzipFile(filename, 'r')
        cpg_file = get_fh(filename, 'r')
        output_name = split_ext(filename)
        cpg_profile = dat.read_cpg_profile(cpg_file, sort=True, *args, **kwargs)
        cpg_profiles[output_name] = cpg_profile
        cpg_file.close()
    return cpg_profiles 
Example #6
Source File: dataloader_m.py    From models with MIT License 6 votes vote down vote up
def map_cpg_tables(cpg_tables, chromo, chromo_pos):
    """Maps values from cpg_tables to `chromo_pos`.

    Positions in `cpg_tables` for `chromo`  must be a subset of `chromo_pos`.
    Inserts `dat.CPG_NAN` for uncovered positions.
    """
    chromo_pos.sort()
    mapped_tables = OrderedDict()
    for name, cpg_table in six.iteritems(cpg_tables):
        cpg_table = cpg_table.loc[cpg_table.chromo == chromo]
        cpg_table = cpg_table.sort_values('pos')
        mapped_table = map_values(cpg_table.value.values,
                                  cpg_table.pos.values,
                                  chromo_pos)
        assert len(mapped_table) == len(chromo_pos)
        mapped_tables[name] = mapped_table
    return mapped_tables 
Example #7
Source File: nq_eval.py    From natural-questions with Apache License 2.0 6 votes vote down vote up
def get_metrics_with_answer_stats(long_answer_stats, short_answer_stats):
  """Generate metrics dict using long and short answer stats."""

  def _get_metric_dict(answer_stats, prefix=''):
    """Compute all metrics for a set of answer statistics."""
    opt_result, pr_table = compute_pr_curves(
        answer_stats, targets=[0.5, 0.75, 0.9])
    f1, precision, recall, threshold = opt_result
    metrics = OrderedDict({
        'best-threshold-f1': f1,
        'best-threshold-precision': precision,
        'best-threshold-recall': recall,
        'best-threshold': threshold,
    })
    for target, recall, precision, _ in pr_table:
      metrics['recall-at-precision>={:.2}'.format(target)] = recall
      metrics['precision-at-precision>={:.2}'.format(target)] = precision

    # Add prefix before returning.
    return dict([(prefix + k, v) for k, v in six.iteritems(metrics)])

  metrics = _get_metric_dict(long_answer_stats, 'long-')
  metrics.update(_get_metric_dict(short_answer_stats, 'short-'))
  return metrics 
Example #8
Source File: regnet2mmdet.py    From mmdetection with Apache License 2.0 6 votes vote down vote up
def convert(src, dst):
    """Convert keys in pycls pretrained RegNet models to mmdet style."""
    # load caffe model
    regnet_model = torch.load(src)
    blobs = regnet_model['model_state']
    # convert to pytorch style
    state_dict = OrderedDict()
    converted_names = set()
    for key, weight in blobs.items():
        if 'stem' in key:
            convert_stem(key, weight, state_dict, converted_names)
        elif 'head' in key:
            convert_head(key, weight, state_dict, converted_names)
        elif key.startswith('s'):
            convert_reslayer(key, weight, state_dict, converted_names)

    # check if all layers are converted
    for key in blobs:
        if key not in converted_names:
            print(f'not converted: {key}')
    # save checkpoint
    checkpoint = dict()
    checkpoint['state_dict'] = state_dict
    torch.save(checkpoint, dst) 
Example #9
Source File: test_wrappers.py    From mmdetection with Apache License 2.0 6 votes vote down vote up
def test_max_pool_2d():
    test_cases = OrderedDict([('in_w', [10, 20]), ('in_h', [10, 20]),
                              ('in_channel', [1, 3]), ('out_channel', [1, 3]),
                              ('kernel_size', [3, 5]), ('stride', [1, 2]),
                              ('padding', [0, 1]), ('dilation', [1, 2])])

    for in_h, in_w, in_cha, out_cha, k, s, p, d in product(
            *list(test_cases.values())):
        # wrapper op with 0-dim input
        x_empty = torch.randn(0, in_cha, in_h, in_w, requires_grad=True)
        wrapper = MaxPool2d(k, stride=s, padding=p, dilation=d)
        wrapper_out = wrapper(x_empty)

        # torch op with 3-dim input as shape reference
        x_normal = torch.randn(3, in_cha, in_h, in_w)
        ref = nn.MaxPool2d(k, stride=s, padding=p, dilation=d)
        ref_out = ref(x_normal)

        assert wrapper_out.shape[0] == 0
        assert wrapper_out.shape[1:] == ref_out.shape[1:]

        assert torch.equal(wrapper(x_normal), ref_out) 
Example #10
Source File: tools.py    From ciocheck with MIT License 6 votes vote down vote up
def parse_coverage(self):
        """Parse .coverage json report generated by coverage."""
        coverage_string = ("!coverage.py: This is a private format, don't "
                           "read it directly!")
        coverage_path = os.path.join(self.cmd_root, '.coverage')

        covered_lines = {}
        if os.path.isfile(coverage_path):
            with open(coverage_path, 'r') as file_obj:
                data = file_obj.read()
                data = data.replace(coverage_string, '')

            cov = json.loads(data)
            covered_lines = OrderedDict()
            lines = cov['lines']
            for path in sorted(lines):
                covered_lines[path] = lines[path]
        return covered_lines 
Example #11
Source File: track.py    From svviz with MIT License 6 votes vote down vote up
def __init__(self, chromPartsCollection, pixelWidth, dividerSize=25):
        # length is in genomic coordinates, starts is in pixels
        self.dividerSize = dividerSize
        self.partsToLengths = collections.OrderedDict()
        self.partsToStartPixels = collections.OrderedDict()
        self.chromPartsCollection = chromPartsCollection

        for part in chromPartsCollection:
            self.partsToLengths[part.id] = len(part)

        self.pixelWidth = pixelWidth

        totalLength = sum(self.partsToLengths.values()) + (len(self.partsToLengths)-1)*dividerSize
        self.basesPerPixel = totalLength / float(pixelWidth)

        curStart = 0
        for regionID in self.partsToLengths:
            self.partsToStartPixels[regionID] = curStart
            curStart += (self.partsToLengths[regionID]+dividerSize) / self.basesPerPixel 
Example #12
Source File: datahub.py    From svviz with MIT License 6 votes vote down vote up
def __init__(self):
        self.args = None
        self.alignDistance = 0
        self.samples = collections.OrderedDict()
        self.genome = None
        self.sources = {}
        self.annotationSets = collections.OrderedDict()

        # for storing axes, annotations, etc, by allele
        self.alleleTracks = collections.defaultdict(collections.OrderedDict)
        self.trackCompositor = None

        self.dotplots = {}
        self.info = {}

        self.reset() 
Example #13
Source File: 10_two_layer_net.py    From deep-learning-note with MIT License 6 votes vote down vote up
def __init__(self, input_size, hidden_size, output_size, weight_init_std=0.01):
        # 初始化权重
        self.params = {}
        # 用高斯分布初始化
        self.params['W1'] = weight_init_std * np.random.randn(input_size, hidden_size)
        self.params['b1'] = np.zeros(hidden_size)
        self.params['W2'] = weight_init_std * np.random.randn(hidden_size, output_size)
        self.params['b2'] = np.zeros(output_size)

        # 生成层
        self.layers = OrderedDict()
        self.layers['Affine1'] = Affine(self.params['W1'], self.params['b1'])
        self.layers['Relu1'] = Relu()
        self.layers['Affine2'] = Affine(self.params['W2'], self.params['b2'])

        self.lastLayer = SoftmaxWithLoss() 
Example #14
Source File: tfutil.py    From disentangling_conditional_gans with MIT License 6 votes vote down vote up
def _init_fields(self):
        self.name               = None          # User-specified name, defaults to build func name if None.
        self.scope              = None          # Unique TF graph scope, derived from the user-specified name.
        self.static_kwargs      = dict()        # Arguments passed to the user-supplied build func.
        self.num_inputs         = 0             # Number of input tensors.
        self.num_outputs        = 0             # Number of output tensors.
        self.input_shapes       = [[]]          # Input tensor shapes (NC or NCHW), including minibatch dimension.
        self.output_shapes      = [[]]          # Output tensor shapes (NC or NCHW), including minibatch dimension.
        self.input_shape        = []            # Short-hand for input_shapes[0].
        self.output_shape       = []            # Short-hand for output_shapes[0].
        self.input_templates    = []            # Input placeholders in the template graph.
        self.output_templates   = []            # Output tensors in the template graph.
        self.input_names        = []            # Name string for each input.
        self.output_names       = []            # Name string for each output.
        self.vars               = OrderedDict() # All variables (localname => var).
        self.trainables         = OrderedDict() # Trainable variables (localname => var).
        self._build_func        = None          # User-supplied build function that constructs the network.
        self._build_func_name   = None          # Name of the build function.
        self._build_module_src  = None          # Full source code of the module containing the build function.
        self._run_cache         = dict()        # Cached graph data for Network.run(). 
Example #15
Source File: multi_layer_net.py    From deep-learning-note with MIT License 6 votes vote down vote up
def __init__(self, input_size, hidden_size_list, output_size,
                 activation='relu', weight_init_std='relu', weight_decay_lambda=0):
        self.input_size = input_size
        self.output_size = output_size
        self.hidden_size_list = hidden_size_list
        self.hidden_layer_num = len(hidden_size_list)
        self.weight_decay_lambda = weight_decay_lambda
        self.params = {}

        # 初始化权重
        self.__init_weight(weight_init_std)

        # 生成层
        activation_layer = {'sigmoid': Sigmoid, 'relu': Relu}
        self.layers = OrderedDict()
        for idx in range(1, self.hidden_layer_num+1):
            self.layers['Affine' + str(idx)] = Affine(self.params['W' + str(idx)],
                                                      self.params['b' + str(idx)])
            self.layers['Activation_function' + str(idx)] = activation_layer[activation]()

        idx = self.hidden_layer_num + 1
        self.layers['Affine' + str(idx)] = Affine(self.params['W' + str(idx)],
            self.params['b' + str(idx)])

        self.last_layer = SoftmaxWithLoss() 
Example #16
Source File: urlextract.py    From video2commons with GNU General Public License v3.0 6 votes vote down vote up
def escape_wikitext(wikitext):
    """Escape wikitext for use in file description."""
    rep = OrderedDict([
        ('{|', '{{(}}|'),
        ('|}', '|{{)}}'),
        ('||', '||'),
        ('|', '|'),
        ('[[', '{{!((}}'),
        (']]', '{{))!}}'),
        ('{{', '{{((}}'),
        ('}}', '{{))}}'),
        ('{', '{{(}}'),
        ('}', '{{)}}'),
    ])
    rep = dict((re.escape(k), v) for k, v in rep.iteritems())
    pattern = re.compile("|".join(rep.keys()))
    return pattern.sub(lambda m: rep[re.escape(m.group(0))], wikitext)


# Source: mediawiki.Title.js@9df363d 
Example #17
Source File: simple_cnn.py    From deep-learning-note with MIT License 5 votes vote down vote up
def __init__(self, input_dim=(1, 28, 28),
                 conv_param={'filter_num': 30, 'filter_size': 5, 'pad': 0, 'stride': 1},
                 hidden_size=100, output_size=10, weight_init_std=0.01):
        filter_num = conv_param['filter_num']
        filter_size = conv_param['filter_size']
        filter_pad = conv_param['pad']
        filter_stride = conv_param['stride']
        input_size = input_dim[1]
        conv_output_size = (input_size - filter_size + 2 * filter_pad) / filter_stride + 1
        pool_output_size = int(filter_num * (conv_output_size / 2) * (conv_output_size / 2))

        # 初始化权重
        self.params = {}
        self.params['W1'] = weight_init_std * \
                            np.random.randn(filter_num, input_dim[0], filter_size, filter_size)
        self.params['b1'] = np.zeros(filter_num)
        self.params['W2'] = weight_init_std * \
                            np.random.randn(pool_output_size, hidden_size)
        self.params['b2'] = np.zeros(hidden_size)
        self.params['W3'] = weight_init_std * \
                            np.random.randn(hidden_size, output_size)
        self.params['b3'] = np.zeros(output_size)

        # 生成层
        self.layers = OrderedDict()
        self.layers['Conv1'] = Convolution(self.params['W1'], self.params['b1'],
                                           conv_param['stride'], conv_param['pad'])
        self.layers['Relu1'] = Relu()
        self.layers['Pool1'] = Pooling(pool_h=2, pool_w=2, stride=2)
        self.layers['Affine1'] = Affine(self.params['W2'], self.params['b2'])
        self.layers['Relu2'] = Relu()
        self.layers['Affine2'] = Affine(self.params['W3'], self.params['b3'])

        self.last_layer = SoftmaxWithLoss() 
Example #18
Source File: 19_cnn.py    From deep-learning-note with MIT License 5 votes vote down vote up
def __init__(self, input_dim=(1, 28, 28),
                 conv_param={'filter_num': 30, 'filter_size': 5, 'pad': 0, 'stride': 1},
                 hidden_size=100, output_size=10, weight_init_std=0.01):
        filter_num = conv_param['filter_num']
        filter_size = conv_param['filter_size']
        filter_pad = conv_param['pad']
        filter_stride = conv_param['stride']
        input_size = input_dim[1]
        conv_output_size = (input_size - filter_size + 2 * filter_pad) / filter_stride + 1
        pool_output_size = int(filter_num * (conv_output_size / 2) * (conv_output_size / 2))

        # 初始化权重
        self.params = {}
        self.params['W1'] = weight_init_std * \
                            np.random.randn(filter_num, input_dim[0], filter_size, filter_size)
        self.params['b1'] = np.zeros(filter_num)
        self.params['W2'] = weight_init_std * \
                            np.random.randn(pool_output_size, hidden_size)
        self.params['b2'] = np.zeros(hidden_size)
        self.params['W3'] = weight_init_std * \
                            np.random.randn(hidden_size, output_size)
        self.params['b3'] = np.zeros(output_size)

        # 生成层
        self.layers = OrderedDict()
        self.layers['Conv1'] = Convolution(self.params['W1'], self.params['b1'],
                                           conv_param['stride'], conv_param['pad'])
        self.layers['Relu1'] = Relu()
        self.layers['Pool1'] = Pooling(pool_h=2, pool_w=2, stride=2)
        self.layers['Affine1'] = Affine(self.params['W2'], self.params['b2'])
        self.layers['Relu2'] = Relu()
        self.layers['Affine2'] = Affine(self.params['W3'], self.params['b3'])

        self.last_layer = SoftmaxWithLoss() 
Example #19
Source File: Load_Pretrained_Embed.py    From pytorch_NER_BiLSTM_CNN_CRF with Apache License 2.0 5 votes vote down vote up
def convert_list2dict(convert_list):
    """
    :param convert_list:  list type
    :return:  dict type
    """
    list_dict = OrderedDict()
    list_lower = []
    for index, word in enumerate(convert_list):
        list_lower.append(word.lower())
        list_dict[word] = index
    assert len(list_lower) == len(list_dict)
    return list_dict, list_lower 
Example #20
Source File: evaluation.py    From VSE-C with MIT License 5 votes vote down vote up
def __init__(self):
        # to keep the order of logged variables deterministic
        self.meters = OrderedDict() 
Example #21
Source File: model.py    From VSE-C with MIT License 5 votes vote down vote up
def load_state_dict(self, state_dict):
        """Copies parameters. overwritting the default one to
        accept state_dict from Full model
        """
        own_state = self.state_dict()
        new_state = OrderedDict()
        for name, param in list(state_dict.items()):
            if name in own_state:
                new_state[name] = param

        super(EncoderImagePrecomp, self).load_state_dict(new_state) 
Example #22
Source File: tools.py    From ciocheck with MIT License 5 votes vote down vote up
def setup_pytest_coverage_args(self, paths):
        """Setup pytest-cov arguments and config file path."""
        if isinstance(paths, (dict, OrderedDict)):
            paths = list(sorted(paths.keys()))

        for path in paths:
            if os.path.isdir(path):
                cov = '--cov={0}'.format(path)
                coverage_args = [cov]
                break
        else:
            coverage_args = []

        coverage_config_file = os.path.join(self.cmd_root,
                                            COVERAGE_CONFIGURATION_FILE)
        if os.path.isfile(coverage_config_file):
            cov_config = ['--cov-config', coverage_config_file]
            coverage_args = cov_config + coverage_args

        if PY2:
            # xdist appears to lock up the test suite with python2, maybe due
            # to an interaction with coverage
            enable_xdist = []
        else:
            enable_xdist = ['-n', str(cpu_count())]

        self.pytest_args = ['--json={0}'.format(self.REPORT_FILE)]
        self.pytest_args = self.pytest_args + enable_xdist
        self.pytest_args = self.pytest_args + coverage_args 
Example #23
Source File: runner.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def proc_fvals(self, fvals):
        """
        Postprocess the outputs of the Session.run(). Move the outputs of
        sub-graphs to next ones and return the output of the last sub-graph.

        :param fvals: A list of fetched values returned by Session.run()
        :return: A dictionary of fetched values returned by the last sub-graph.
        """
        inputs = self.inputs
        outputs = self.outputs

        # Move data to the next sub-graph for the next step
        cur = 0
        for i in range(len(inputs)-1):
            if not self.active_gpus[i]:
                self.next_vals[i+1] = None
                continue
            self.next_vals[i+1] = OrderedDict()
            for k in outputs[i]:
                self.next_vals[i+1][k] = fvals[cur]
                cur += 1
            if i == 0:
                self.next_vals[0] = None

        # Return the output of the last sub-graph
        last_fvals = OrderedDict()
        if self.active_gpus[-1]:
            assert cur+len(outputs[-1]) == len(fvals)
            for k in outputs[-1]:
                last_fvals[k] = fvals[cur]
                cur += 1
        return last_fvals 
Example #24
Source File: detectron2pytorch.py    From mmdetection with Apache License 2.0 5 votes vote down vote up
def convert(src, dst, depth):
    """Convert keys in detectron pretrained ResNet models to pytorch style."""
    # load arch_settings
    if depth not in arch_settings:
        raise ValueError('Only support ResNet-50 and ResNet-101 currently')
    block_nums = arch_settings[depth]
    # load caffe model
    caffe_model = mmcv.load(src, encoding='latin1')
    blobs = caffe_model['blobs'] if 'blobs' in caffe_model else caffe_model
    # convert to pytorch style
    state_dict = OrderedDict()
    converted_names = set()
    convert_conv_fc(blobs, state_dict, 'conv1', 'conv1', converted_names)
    convert_bn(blobs, state_dict, 'res_conv1_bn', 'bn1', converted_names)
    for i in range(1, len(block_nums) + 1):
        for j in range(block_nums[i - 1]):
            if j == 0:
                convert_conv_fc(blobs, state_dict, f'res{i + 1}_{j}_branch1',
                                f'layer{i}.{j}.downsample.0', converted_names)
                convert_bn(blobs, state_dict, f'res{i + 1}_{j}_branch1_bn',
                           f'layer{i}.{j}.downsample.1', converted_names)
            for k, letter in enumerate(['a', 'b', 'c']):
                convert_conv_fc(blobs, state_dict,
                                f'res{i + 1}_{j}_branch2{letter}',
                                f'layer{i}.{j}.conv{k+1}', converted_names)
                convert_bn(blobs, state_dict,
                           f'res{i + 1}_{j}_branch2{letter}_bn',
                           f'layer{i}.{j}.bn{k + 1}', converted_names)
    # check if all layers are converted
    for key in blobs:
        if key not in converted_names:
            print(f'Not Convert: {key}')
    # save checkpoint
    checkpoint = dict()
    checkpoint['state_dict'] = state_dict
    torch.save(checkpoint, dst) 
Example #25
Source File: test_wrappers.py    From mmdetection with Apache License 2.0 5 votes vote down vote up
def test_conv2d():
    """
    CommandLine:
        xdoctest -m tests/test_wrappers.py test_conv2d
    """

    test_cases = OrderedDict([('in_w', [10, 20]), ('in_h', [10, 20]),
                              ('in_channel', [1, 3]), ('out_channel', [1, 3]),
                              ('kernel_size', [3, 5]), ('stride', [1, 2]),
                              ('padding', [0, 1]), ('dilation', [1, 2])])

    # train mode
    for in_h, in_w, in_cha, out_cha, k, s, p, d in product(
            *list(test_cases.values())):
        # wrapper op with 0-dim input
        x_empty = torch.randn(0, in_cha, in_h, in_w)
        torch.manual_seed(0)
        wrapper = Conv2d(in_cha, out_cha, k, stride=s, padding=p, dilation=d)
        wrapper_out = wrapper(x_empty)

        # torch op with 3-dim input as shape reference
        x_normal = torch.randn(3, in_cha, in_h, in_w).requires_grad_(True)
        torch.manual_seed(0)
        ref = nn.Conv2d(in_cha, out_cha, k, stride=s, padding=p, dilation=d)
        ref_out = ref(x_normal)

        assert wrapper_out.shape[0] == 0
        assert wrapper_out.shape[1:] == ref_out.shape[1:]

        wrapper_out.sum().backward()
        assert wrapper.weight.grad is not None
        assert wrapper.weight.grad.shape == wrapper.weight.shape

        assert torch.equal(wrapper(x_normal), ref_out)

    # eval mode
    x_empty = torch.randn(0, in_cha, in_h, in_w)
    wrapper = Conv2d(in_cha, out_cha, k, stride=s, padding=p, dilation=d)
    wrapper.eval()
    wrapper(x_empty) 
Example #26
Source File: base.py    From mmdetection with Apache License 2.0 5 votes vote down vote up
def _parse_losses(self, losses):
        """Parse the raw outputs (losses) of the network.

        Args:
            losses (dict): Raw output of the network, which usually contain
                losses and other necessary infomation.

        Returns:
            tuple[Tensor, dict]: (loss, log_vars), loss is the loss tensor
                which may be a weighted sum of all losses, log_vars contains
                all the variables to be sent to the logger.
        """
        log_vars = OrderedDict()
        for loss_name, loss_value in losses.items():
            if isinstance(loss_value, torch.Tensor):
                log_vars[loss_name] = loss_value.mean()
            elif isinstance(loss_value, list):
                log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value)
            else:
                raise TypeError(
                    f'{loss_name} is not a tensor or list of tensors')

        loss = sum(_value for _key, _value in log_vars.items()
                   if 'loss' in _key)

        log_vars['loss'] = loss
        for loss_name, loss_value in log_vars.items():
            # reduce loss when distributed training
            if dist.is_available() and dist.is_initialized():
                loss_value = loss_value.data.clone()
                dist.all_reduce(loss_value.div_(dist.get_world_size()))
            log_vars[loss_name] = loss_value.item()

        return loss, log_vars 
Example #27
Source File: test.py    From pyprnt with MIT License 5 votes vote down vote up
def test_dict_with_newline(self):
        # For python version 3.4 and below
        menu = collections.OrderedDict()
        menu["kimchi"] = 5000
        menu["Ice\nCream"] = "1 €\n1.08 $"
        testee = prnt(menu, output=True, width=50)
        expect = "┌──────────┬───────────┐\n│kimchi    │5000       │\n│Ice\\nCream│1 €\\n1.08 $│\n└──────────┴───────────┘"
        self.assertEqual(testee, expect) 
Example #28
Source File: test.py    From pyprnt with MIT License 5 votes vote down vote up
def test_dict_basic(self):
        # For python version 3.4 and below
        menu = collections.OrderedDict()
        menu["kimchi"] = 5000
        menu["Ice Cream"] = 100
        testee = prnt(menu, output=True, width=50)
        expect = "┌─────────┬────┐\n│kimchi   │5000│\n│Ice Cream│100 │\n└─────────┴────┘"
        self.assertEqual(testee, expect) 
Example #29
Source File: tfutil.py    From disentangling_conditional_gans with MIT License 5 votes vote down vote up
def _init_graph(self):
        # Collect inputs.
        self.input_names = []
        for param in inspect.signature(self._build_func).parameters.values():
            if param.kind == param.POSITIONAL_OR_KEYWORD and param.default is param.empty:
                self.input_names.append(param.name)
        self.num_inputs = len(self.input_names)
        assert self.num_inputs >= 1

        # Choose name and scope.
        if self.name is None:
            self.name = self._build_func_name
        self.scope = tf.get_default_graph().unique_name(self.name.replace('/', '_'), mark_as_used=False)
        
        # Build template graph.
        with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE):
            assert tf.get_variable_scope().name == self.scope
            with absolute_name_scope(self.scope): # ignore surrounding name_scope
                with tf.control_dependencies(None): # ignore surrounding control_dependencies
                    self.input_templates = [tf.placeholder(tf.float32, name=name) for name in self.input_names]
                    out_expr = self._build_func(*self.input_templates, is_template_graph=True, **self.static_kwargs)
            
        # Collect outputs.
        assert is_tf_expression(out_expr) or isinstance(out_expr, tuple)
        self.output_templates = [out_expr] if is_tf_expression(out_expr) else list(out_expr)
        self.output_names = [t.name.split('/')[-1].split(':')[0] for t in self.output_templates]
        self.num_outputs = len(self.output_templates)
        assert self.num_outputs >= 1
        
        # Populate remaining fields.
        self.input_shapes   = [shape_to_list(t.shape) for t in self.input_templates]
        self.output_shapes  = [shape_to_list(t.shape) for t in self.output_templates]
        self.input_shape    = self.input_shapes[0]
        self.output_shape   = self.output_shapes[0]
        self.vars           = OrderedDict([(self.get_var_localname(var), var) for var in tf.global_variables(self.scope + '/')])
        self.trainables     = OrderedDict([(self.get_var_localname(var), var) for var in tf.trainable_variables(self.scope + '/')])

    # Run initializers for all variables defined by this network. 
Example #30
Source File: tfutil.py    From disentangling_conditional_gans with MIT License 5 votes vote down vote up
def __init__(
        self,
        name                = 'Train',
        tf_optimizer        = 'tf.train.AdamOptimizer',
        learning_rate       = 0.001,
        use_loss_scaling    = False,
        loss_scaling_init   = 64.0,
        loss_scaling_inc    = 0.0005,
        loss_scaling_dec    = 1.0,
        **kwargs):

        # Init fields.
        self.name               = name
        self.learning_rate      = tf.convert_to_tensor(learning_rate)
        self.id                 = self.name.replace('/', '.')
        self.scope              = tf.get_default_graph().unique_name(self.id)
        self.optimizer_class    = import_obj(tf_optimizer)
        self.optimizer_kwargs   = dict(kwargs)
        self.use_loss_scaling   = use_loss_scaling
        self.loss_scaling_init  = loss_scaling_init
        self.loss_scaling_inc   = loss_scaling_inc
        self.loss_scaling_dec   = loss_scaling_dec
        self._grad_shapes       = None          # [shape, ...]
        self._dev_opt           = OrderedDict() # device => optimizer
        self._dev_grads         = OrderedDict() # device => [[(grad, var), ...], ...]
        self._dev_ls_var        = OrderedDict() # device => variable (log2 of loss scaling factor)
        self._updates_applied   = False

    # Register the gradients of the given loss function with respect to the given variables.
    # Intended to be called once per GPU.