Python collections.OrderedDict() Examples

The following are code examples for showing how to use collections.OrderedDict(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: BERT-Classification-Tutorial   Author: Socialbird-AILab   File: modeling.py    Apache License 2.0 6 votes vote down vote up
def get_assignment_map_from_checkpoint(tvars, init_checkpoint):
    """Compute the union of the current variables and checkpoint variables."""
    assignment_map = {}
    initialized_variable_names = {}

    name_to_variable = collections.OrderedDict()
    for var in tvars:
        name = var.name
        m = re.match("^(.*):\\d+$", name)
        if m is not None:
            name = m.group(1)
        name_to_variable[name] = var

    init_vars = tf.train.list_variables(init_checkpoint)

    assignment_map = collections.OrderedDict()
    for x in init_vars:
        (name, var) = (x[0], x[1])
        if name not in name_to_variable:
            continue
        assignment_map[name] = name
        initialized_variable_names[name] = 1
        initialized_variable_names[name + ":0"] = 1

    return (assignment_map, initialized_variable_names) 
Example 2
Project: PEAKachu   Author: tbischler   File: window.py    ISC License 6 votes vote down vote up
def __init__(self, w_size, step_size, replicon_dict, max_proc, stat_test,
                 norm_method, size_factors, het_p_val_threshold,
                 rep_pair_p_val_threshold, padj_threshold, mad_multiplier,
                 fc_cutoff, pairwise_replicates, output_folder):
        self._lib_dict = OrderedDict()
        self._replicon_dict = replicon_dict  # own copy of replicon_dict
        self._max_proc = max_proc
        self._w_size = w_size
        self._step_size = step_size
        self._stat_test = stat_test
        self._norm_method = norm_method
        self._size_factors = size_factors
        self._het_p_val_threshold = het_p_val_threshold
        self._rep_pair_p_val_threshold = rep_pair_p_val_threshold
        self._padj_threshold = padj_threshold
        self._mad_multiplier = mad_multiplier
        self._fc_cutoff = fc_cutoff
        self._pairwise_replicates = pairwise_replicates
        self._output_folder = output_folder
        if not exists(self._output_folder):
            makedirs(self._output_folder) 
Example 3
Project: autolims   Author: scottbecker   File: views.py    MIT License 6 votes vote down vote up
def get_context_data(self, *args, **kwargs):
    
        context_data = super(ContainerAuthenticatingView, self).get_context_data(*args, **kwargs)    
    
        context_data.update({
            'metadata': OrderedDict([
                ('Type', self.container.container_type_id),
                ('ID', self.container.id),
                ('Desired Storage Temp', self.container.storage_condition),
                ('Expires At', self.container.expires_at),
                ('Barcode', self.container.barcode) 
            ]),
            'container': self.container,
            'runs': self.container.runs.all().order_by('-id'),
            'aliquots': self.container.aliquots.all().order_by('well_idx')
        })
        
        return context_data   
    
    

# ----------------------------
# ----- API Views ------------
# ---------------------------- 
Example 4
Project: pyblish-win   Author: pyblish   File: test_decode.py    GNU Lesser General Public License v3.0 6 votes vote down vote up
def test_object_pairs_hook(self):
        s = '{"xkd":1, "kcw":2, "art":3, "hxm":4, "qrt":5, "pad":6, "hoy":7}'
        p = [("xkd", 1), ("kcw", 2), ("art", 3), ("hxm", 4),
             ("qrt", 5), ("pad", 6), ("hoy", 7)]
        self.assertEqual(self.loads(s), eval(s))
        self.assertEqual(self.loads(s, object_pairs_hook=lambda x: x), p)
        self.assertEqual(self.json.load(StringIO(s),
                                        object_pairs_hook=lambda x: x), p)
        od = self.loads(s, object_pairs_hook=OrderedDict)
        self.assertEqual(od, OrderedDict(p))
        self.assertEqual(type(od), OrderedDict)
        # the object_pairs_hook takes priority over the object_hook
        self.assertEqual(self.loads(s, object_pairs_hook=OrderedDict,
                                    object_hook=lambda x: None),
                         OrderedDict(p))
        # check that empty objects literals work (see #17368)
        self.assertEqual(self.loads('{}', object_pairs_hook=OrderedDict),
                         OrderedDict())
        self.assertEqual(self.loads('{"empty": {}}',
                                    object_pairs_hook=OrderedDict),
                         OrderedDict([('empty', OrderedDict())])) 
Example 5
Project: pyblish-win   Author: pyblish   File: argparse.py    GNU Lesser General Public License v3.0 6 votes vote down vote up
def __init__(self,
                 option_strings,
                 prog,
                 parser_class,
                 dest=SUPPRESS,
                 help=None,
                 metavar=None):

        self._prog_prefix = prog
        self._parser_class = parser_class
        self._name_parser_map = _collections.OrderedDict()
        self._choices_actions = []

        super(_SubParsersAction, self).__init__(
            option_strings=option_strings,
            dest=dest,
            nargs=PARSER,
            choices=self._name_parser_map,
            help=help,
            metavar=metavar) 
Example 6
Project: xadmin_bugfix   Author: vip68   File: actions.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def get_actions(self):
        if self.actions is None:
            return OrderedDict()

        actions = [self.get_action(action) for action in self.global_actions]

        for klass in self.admin_view.__class__.mro()[::-1]:
            class_actions = getattr(klass, 'actions', [])
            if not class_actions:
                continue
            actions.extend(
                [self.get_action(action) for action in class_actions])

        # get_action might have returned None, so filter any of those out.
        actions = filter(None, actions)
        if six.PY3:
            actions = list(actions)

        # Convert the actions into a OrderedDict keyed by name.
        actions = OrderedDict([
            (name, (ac, name, desc, icon))
            for ac, name, desc, icon in actions
        ])

        return actions 
Example 7
Project: natural-questions   Author: google-research-datasets   File: nq_eval.py    Apache License 2.0 6 votes vote down vote up
def compute_f1(answer_stats, prefix=''):
  """Computes F1, precision, recall for a list of answer scores.

  Args:
    answer_stats: List of per-example scores.
    prefix (''): Prefix to prepend to score dictionary.

  Returns:
    Dictionary mapping string names to scores.
  """

  has_gold, has_pred, is_correct, _ = list(zip(*answer_stats))
  precision = safe_divide(sum(is_correct), sum(has_pred))
  recall = safe_divide(sum(is_correct), sum(has_gold))
  f1 = safe_divide(2 * precision * recall, precision + recall)

  return OrderedDict({
      prefix + 'n': len(answer_stats),
      prefix + 'f1': f1,
      prefix + 'precision': precision,
      prefix + 'recall': recall
  }) 
Example 8
Project: natural-questions   Author: google-research-datasets   File: nq_eval.py    Apache License 2.0 6 votes vote down vote up
def get_metrics_with_answer_stats(long_answer_stats, short_answer_stats):
  """Generate metrics dict using long and short answer stats."""

  def _get_metric_dict(answer_stats, prefix=''):
    """Compute all metrics for a set of answer statistics."""
    opt_result, pr_table = compute_pr_curves(
        answer_stats, targets=[0.5, 0.75, 0.9])
    f1, precision, recall, threshold = opt_result
    metrics = OrderedDict({
        'best-threshold-f1': f1,
        'best-threshold-precision': precision,
        'best-threshold-recall': recall,
        'best-threshold': threshold,
    })
    for target, recall, precision, _ in pr_table:
      metrics['recall-at-precision>={:.2}'.format(target)] = recall
      metrics['precision-at-precision>={:.2}'.format(target)] = precision

    # Add prefix before returning.
    return dict([(prefix + k, v) for k, v in six.iteritems(metrics)])

  metrics = _get_metric_dict(long_answer_stats, 'long-')
  metrics.update(_get_metric_dict(short_answer_stats, 'short-'))
  return metrics 
Example 9
Project: claxon   Author: vanatteveldt   File: ml.py    GNU General Public License v3.0 6 votes vote down vote up
def get_todo(session: Session, model: Language, n=10) -> OrderedDict:
    """Populate the queue of documents to code"""
    done = {a.document_id for a in Annotation.objects.filter(document__gold=False, label=session.label)}
    todo = Document.objects.filter(gold=False).exclude(pk__in=done)
    if session.query:
        todo = todo.filter(text__icontains=session.query)
    todo = list(todo.values_list("id", flat=True))
    logging.debug("{ntodo} documents in todo (query: {q}, done={ndone})"
                  .format(ntodo=len(todo), ndone=len(done), q=session.query))
    if len(todo) > settings.N_SAMPLE:
        todo = sample(todo, settings.N_SAMPLE)

    tc = model.get_pipe("textcat")
    tokens = [get_tokens(model, doc_id) for doc_id in todo]
    scores = [d.cats[session.label.label] for d in tc.pipe(tokens)]
    uncertainty = [abs(score - 0.5) for score in scores]
    index = list(argsort(uncertainty))[:n]

    return OrderedDict((todo[i], scores[i]) for i in index) 
Example 10
Project: prediction-constrained-topic-models   Author: dtak   File: train_and_eval_sklearn_binary_classifier.py    MIT License 6 votes vote down vote up
def make_param_dict_generator(param_grid_dict):
    ''' Make iterable that will loop thru each combo of params

    Example
    -------
    >>> pgD = OrderedDict()
    >>> pgD['C'] = np.asarray([1,2,3])
    >>> pgD['alpha'] = np.asarray([0.5, 2.5])
    >>> gen = make_param_dict_generator(pgD)
    >>> gen.next()
    OrderedDict([('C', 1), ('alpha', 0.5)])
    >>> gen.next()
    OrderedDict([('C', 1), ('alpha', 2.5)])
    >>> gen.next()
    OrderedDict([('C', 2), ('alpha', 0.5)])
    '''
    list_of_keys = param_grid_dict.keys()
    list_of_grids = param_grid_dict.values()
    for list_of_vals in itertools.product(*list_of_grids):
        yield OrderedDict(zip(list_of_keys, list_of_vals)) 
Example 11
Project: django-xadmin   Author: MarkHoo   File: actions.py    MIT License 6 votes vote down vote up
def get_actions(self):
        if self.actions is None:
            return OrderedDict()

        actions = [self.get_action(action) for action in self.global_actions]

        for klass in self.admin_view.__class__.mro()[::-1]:
            class_actions = getattr(klass, 'actions', [])
            if not class_actions:
                continue
            actions.extend(
                [self.get_action(action) for action in class_actions])

        # get_action might have returned None, so filter any of those out.
        actions = filter(None, actions)
        if six.PY3:
            actions = list(actions)

        # Convert the actions into a OrderedDict keyed by name.
        actions = OrderedDict([
            (name, (ac, name, desc, icon))
            for ac, name, desc, icon in actions
        ])

        return actions 
Example 12
Project: django-xadmin   Author: MarkHoo   File: actions.py    MIT License 6 votes vote down vote up
def get_actions(self):
        if self.actions is None:
            return OrderedDict()

        actions = [self.get_action(action) for action in self.global_actions]

        for klass in self.admin_view.__class__.mro()[::-1]:
            class_actions = getattr(klass, 'actions', [])
            if not class_actions:
                continue
            actions.extend(
                [self.get_action(action) for action in class_actions])

        # get_action might have returned None, so filter any of those out.
        actions = filter(None, actions)
        if six.PY3:
            actions = list(actions)

        # Convert the actions into a OrderedDict keyed by name.
        actions = OrderedDict([
            (name, (ac, name, desc, icon))
            for ac, name, desc, icon in actions
        ])

        return actions 
Example 13
Project: slidoc   Author: mitotic   File: sdprint.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def get_roster(sheet_url, hmac_key, session_name):
    # Returns roster as OrderedDict or None if no roster sheet
    user = 'admin'
    user_token = sliauth.gen_auth_token(hmac_key, user, 'admin', prefixed=True)

    get_params = {'sheet': ROSTER_SHEET, 'id': session_name, 'admin': user, 'token': user_token,
                  'get': '1', 'getheaders': '1', 'all': '1'}
    retval = http_post(sheet_url, get_params)

    if retval['result'] != 'success':
        if retval['error'].startswith('Error:NOSHEET:'):
            return None
        else:
            sys.exit("Error in accessing roster session '%s': %s" % (session_name, retval['error']))
    all_rows = retval.get('value')
    headers = retval['headers']
    if headers[1] != 'id' or headers[0] != 'name':
        sys.exit('Incorrect roster headers: '+str(headers))
    statusCol = 1 + headers.index('status')
    return OrderedDict( (row[1], row[0]) for row in all_rows if row[statusCol-1] != 'dropped') 
Example 14
Project: slidoc   Author: mitotic   File: sdserver.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def get_session_props(cls):
        propStr = Global.site_settings.get('session_properties','')
        if propStr.strip():
            propsList = propStr.strip().split(';')
        else:
            propsList = cls.defaultSessionTypes
        propsDict = OrderedDict()
        for sessionProp in propsList:
            comps = sessionProp.strip().split(':')
            sessionName = comps[0].strip()
            if sessionName:
                props = {}
                if len(comps) > 1 and comps[1].strip().isdigit():
                    props['pace'] = int(comps[1].strip())
                else:
                    props['pace'] = 0

                props['access'] = comps[2].strip().lower() if len(comps) > 2 else ''
                props['description'] = comps[3].strip() if len(comps) > 3 else ''
                    
                propsDict[sessionName] = props
        return propsDict 
Example 15
Project: slidoc   Author: mitotic   File: slidoc.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __init__(self, http_post_func=None, return_html=False, error_exit=False):
        self.http_post = http_post_func or sliauth.http_post
        self.return_html = return_html
        self.error_exit = error_exit
        self.primary_tags = defaultdict(OrderedDict)
        self.sec_tags = defaultdict(OrderedDict)
        self.primary_qtags = defaultdict(OrderedDict)
        self.sec_qtags = defaultdict(OrderedDict)

        self.all_tags = {}

        self.questions = OrderedDict()
        self.concept_questions = defaultdict(list)

        self.ref_tracker = dict()
        self.ref_counter = defaultdict(int)
        self.chapter_ref_counter = defaultdict(int)

        self.dup_ref_tracker = set() 
Example 16
Project: slidoc   Author: mitotic   File: sdproxy.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def makeRosterMap(colName, lowercase=False, unique=False):
    # Return map of other IDs from colName to roster ID
    # If unique, raise exception for duplicated values in colName
    colValues = lookupRoster(colName) or {}
    rosterMap = OrderedDict()
    for userId, otherIds in colValues.items():
        if colName == 'name':
            comps = [otherIds]
        elif colName == 'altid':
            comps = [str(otherIds)]
        else:
            comps = otherIds.strip().split(',')
        for otherId in comps:
            otherId = otherId.strip()
            if lowercase:
                otherId = otherId.lower()
            if otherId:
                if unique and otherId in rosterMap:
                    raise Exception('Duplicate occurrence of %s value %s' % (colName, otherId))
                rosterMap[otherId] = userId
    return rosterMap 
Example 17
Project: disentangling_conditional_gans   Author: zalandoresearch   File: tfutil.py    MIT License 6 votes vote down vote up
def _init_fields(self):
        self.name               = None          # User-specified name, defaults to build func name if None.
        self.scope              = None          # Unique TF graph scope, derived from the user-specified name.
        self.static_kwargs      = dict()        # Arguments passed to the user-supplied build func.
        self.num_inputs         = 0             # Number of input tensors.
        self.num_outputs        = 0             # Number of output tensors.
        self.input_shapes       = [[]]          # Input tensor shapes (NC or NCHW), including minibatch dimension.
        self.output_shapes      = [[]]          # Output tensor shapes (NC or NCHW), including minibatch dimension.
        self.input_shape        = []            # Short-hand for input_shapes[0].
        self.output_shape       = []            # Short-hand for output_shapes[0].
        self.input_templates    = []            # Input placeholders in the template graph.
        self.output_templates   = []            # Output tensors in the template graph.
        self.input_names        = []            # Name string for each input.
        self.output_names       = []            # Name string for each output.
        self.vars               = OrderedDict() # All variables (localname => var).
        self.trainables         = OrderedDict() # Trainable variables (localname => var).
        self._build_func        = None          # User-supplied build function that constructs the network.
        self._build_func_name   = None          # Name of the build function.
        self._build_module_src  = None          # Full source code of the module containing the build function.
        self._run_cache         = dict()        # Cached graph data for Network.run(). 
Example 18
Project: mmdetection   Author: open-mmlab   File: train.py    Apache License 2.0 6 votes vote down vote up
def parse_losses(losses):
    log_vars = OrderedDict()
    for loss_name, loss_value in losses.items():
        if isinstance(loss_value, torch.Tensor):
            log_vars[loss_name] = loss_value.mean()
        elif isinstance(loss_value, list):
            log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value)
        else:
            raise TypeError(
                '{} is not a tensor or list of tensors'.format(loss_name))

    loss = sum(_value for _key, _value in log_vars.items() if 'loss' in _key)

    log_vars['loss'] = loss
    for loss_name, loss_value in log_vars.items():
        # reduce loss when distributed training
        if dist.is_initialized():
            loss_value = loss_value.data.clone()
            dist.all_reduce(loss_value.div_(dist.get_world_size()))
        log_vars[loss_name] = loss_value.item()

    return loss, log_vars 
Example 19
Project: mmdetection   Author: open-mmlab   File: dist_utils.py    Apache License 2.0 6 votes vote down vote up
def _allreduce_coalesced(tensors, world_size, bucket_size_mb=-1):
    if bucket_size_mb > 0:
        bucket_size_bytes = bucket_size_mb * 1024 * 1024
        buckets = _take_tensors(tensors, bucket_size_bytes)
    else:
        buckets = OrderedDict()
        for tensor in tensors:
            tp = tensor.type()
            if tp not in buckets:
                buckets[tp] = []
            buckets[tp].append(tensor)
        buckets = buckets.values()

    for bucket in buckets:
        flat_tensors = _flatten_dense_tensors(bucket)
        dist.all_reduce(flat_tensors)
        flat_tensors.div_(world_size)
        for tensor, synced in zip(
                bucket, _unflatten_dense_tensors(flat_tensors, bucket)):
            tensor.copy_(synced) 
Example 20
Project: mmdetection   Author: open-mmlab   File: upgrade_model_version.py    Apache License 2.0 6 votes vote down vote up
def convert(in_file, out_file):
    """Convert keys in checkpoints.

    There can be some breaking changes during the development of mmdetection,
    and this tool is used for upgrading checkpoints trained with old versions
    to the latest one.
    """
    checkpoint = torch.load(in_file)
    in_state_dict = checkpoint.pop('state_dict')
    out_state_dict = OrderedDict()
    for key, val in in_state_dict.items():
        # Use ConvModule instead of nn.Conv2d in RetinaNet
        # cls_convs.0.weight -> cls_convs.0.conv.weight
        m = re.search(r'(cls_convs|reg_convs).\d.(weight|bias)', key)
        if m is not None:
            param = m.groups()[1]
            new_key = key.replace(param, 'conv.{}'.format(param))
            out_state_dict[new_key] = val
            continue

        out_state_dict[key] = val
    checkpoint['state_dict'] = out_state_dict
    torch.save(checkpoint, out_file) 
Example 21
Project: neural-fingerprinting   Author: StephanZheng   File: madry_mnist_model.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def fprop(self, x):

        output = OrderedDict()
        # first convolutional layer
        h_conv1 = tf.nn.relu(self._conv2d(x, self.W_conv1) + self.b_conv1)
        h_pool1 = self._max_pool_2x2(h_conv1)

        # second convolutional layer
        h_conv2 = tf.nn.relu(
            self._conv2d(h_pool1, self.W_conv2) + self.b_conv2)
        h_pool2 = self._max_pool_2x2(h_conv2)

        # first fully connected layer

        h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
        h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, self.W_fc1) + self.b_fc1)

        # output layer
        logits = tf.matmul(h_fc1, self.W_fc2) + self.b_fc2

        output = deterministic_dict(locals())
        del output["self"]
        output[self.O_PROBS] = tf.nn.softmax(logits=logits)

        return output 
Example 22
Project: models   Author: kipoi   File: dataloader_m.py    MIT License 6 votes vote down vote up
def read_cpg_profiles(filenames, log=None, *args, **kwargs):
    """Read methylation profiles.

    Input files can be gzip compressed.

    Returns
    -------
    dict
        `dict (key, value)`, where `key` is the output name and `value` the CpG
        table.
    """

    cpg_profiles = OrderedDict()
    for filename in filenames:
        if log:
            log(filename)
        #cpg_file = dat.GzipFile(filename, 'r')
        cpg_file = get_fh(filename, 'r')
        output_name = split_ext(filename)
        cpg_profile = dat.read_cpg_profile(cpg_file, sort=True, *args, **kwargs)
        cpg_profiles[output_name] = cpg_profile
        cpg_file.close()
    return cpg_profiles 
Example 23
Project: models   Author: kipoi   File: dataloader_m.py    MIT License 6 votes vote down vote up
def map_cpg_tables(cpg_tables, chromo, chromo_pos):
    """Maps values from cpg_tables to `chromo_pos`.

    Positions in `cpg_tables` for `chromo`  must be a subset of `chromo_pos`.
    Inserts `dat.CPG_NAN` for uncovered positions.
    """
    chromo_pos.sort()
    mapped_tables = OrderedDict()
    for name, cpg_table in six.iteritems(cpg_tables):
        cpg_table = cpg_table.loc[cpg_table.chromo == chromo]
        cpg_table = cpg_table.sort_values('pos')
        mapped_table = map_values(cpg_table.value.values,
                                  cpg_table.pos.values,
                                  chromo_pos)
        assert len(mapped_table) == len(chromo_pos)
        mapped_tables[name] = mapped_table
    return mapped_tables 
Example 24
Project: BERT-Classification-Tutorial   Author: Socialbird-AILab   File: tokenization.py    Apache License 2.0 5 votes vote down vote up
def load_vocab(vocab_file):
    """Loads a vocabulary file into a dictionary."""
    vocab = collections.OrderedDict()
    index = 0
    with tf.gfile.GFile(vocab_file, "r") as reader:
        while True:
            token = convert_to_unicode(reader.readline())
            if not token:
                break
            token = token.strip()
            vocab[token] = index
            index += 1
    return vocab 
Example 25
Project: BERT-Classification-Tutorial   Author: Socialbird-AILab   File: run_classifier.py    Apache License 2.0 5 votes vote down vote up
def file_based_convert_examples_to_features(
        examples, label_list, max_seq_length, tokenizer, output_file):
    """Convert a set of `InputExample`s to a TFRecord file."""

    writer = tf.python_io.TFRecordWriter(output_file)
    
    label_map = {}
    for (i, label) in enumerate(sorted(label_list)):
        label_map[label] = i
        
    for (ex_index, example) in enumerate(examples):
        if ex_index % 10000 == 0:
            tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))

        feature = convert_single_example(ex_index, example, label_map,
                                                    max_seq_length, tokenizer)

        def create_int_feature(values):
            f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
            return f

        features = collections.OrderedDict()
        features["input_ids"] = create_int_feature(feature.input_ids)
        features["input_mask"] = create_int_feature(feature.input_mask)
        features["segment_ids"] = create_int_feature(feature.segment_ids)
        features["label_ids"] = create_int_feature([feature.label_id])

        tf_example = tf.train.Example(features=tf.train.Features(feature=features))
        writer.write(tf_example.SerializeToString())
    return label_map 
Example 26
Project: PEAKachu   Author: tbischler   File: adaptive.py    ISC License 5 votes vote down vote up
def __init__(self, replicon_dict, max_proc, padj_threshold, mad_multiplier,
                 fc_cutoff, output_folder):
        self._lib_dict = OrderedDict()
        self._replicon_dict = replicon_dict  # own copy of replicon_dict
        self._max_proc = max_proc
        self._padj_threshold = padj_threshold
        self._mad_multiplier = mad_multiplier
        self._fc_cutoff = fc_cutoff
        self._output_folder = output_folder
        if not exists(self._output_folder):
            makedirs(self._output_folder) 
Example 27
Project: incubator-spot   Author: apache   File: flow_oa.py    Apache License 2.0 5 votes vote down vote up
def _initialize_members(self,date,limit,logger): 

        # get logger if exists. if not, create new instance.
        self._logger = logging.getLogger('OA.Flow') if logger else Util.get_logger('OA.Flow',create_file=False)

        # initialize required parameters.
        self._scrtip_path = os.path.dirname(os.path.abspath(__file__))
        self._date = date
        self._table_name = "flow"
        self._flow_results = []
        self._limit = limit
        self._data_path = None
        self._ipynb_path = None
        self._ingest_summary_path = None
        self._flow_scores = []
        self._results_delimiter = '\t'
        

        # get app configuration.
        self._spot_conf = Util.get_spot_conf()

        # # get scores fields conf
        conf_file = "{0}/flow_conf.json".format(self._scrtip_path)
        self._conf = json.loads(open (conf_file).read(),object_pairs_hook=OrderedDict)

        # initialize data engine
        self._db = self._spot_conf.get('conf', 'DBNAME').replace("'", "").replace('"', '') 
Example 28
Project: incubator-spot   Author: apache   File: proxy_oa.py    Apache License 2.0 5 votes vote down vote up
def _initialize_members(self,date,limit,logger):

        # get logger if exists. if not, create new instance.
        self._logger = logging.getLogger('OA.PROXY') if logger else Util.get_logger('OA.PROXY',create_file=False)

        # initialize required parameters.
        self._scrtip_path = os.path.dirname(os.path.abspath(__file__))
        self._date = date
        self._table_name = "proxy"
        self._proxy_results = []
        self._limit = limit
        self._data_path = None
        self._ipynb_path = None
        self._ingest_summary_path = None
        self._proxy_scores = []
        self._proxy_scores_headers = []
        self._proxy_extra_columns = []
        self._results_delimiter = '\t'

        # get app configuration.
        self._spot_conf = Util.get_spot_conf()

        # get scores fields conf
        conf_file = "{0}/proxy_conf.json".format(self._scrtip_path)
        self._conf = json.loads(open (conf_file).read(),object_pairs_hook=OrderedDict)

        # initialize data engine
        self._db = self._spot_conf.get('conf', 'DBNAME').replace("'", "").replace('"', '') 
Example 29
Project: incubator-spot   Author: apache   File: dns_oa.py    Apache License 2.0 5 votes vote down vote up
def _initialize_members(self,date,limit,logger):
        
        # get logger if exists. if not, create new instance.
        self._logger = logging.getLogger('OA.DNS') if logger else Util.get_logger('OA.DNS',create_file=False)

        # initialize required parameters.
        self._scrtip_path = os.path.dirname(os.path.abspath(__file__))
        self._date = date
        self._table_name = "dns"
        self._dns_results = []
        self._limit = limit
        self._data_path = None
        self._ipynb_path = None
        self._ingest_summary_path = None
        self._dns_scores = []
        self._dns_scores_headers = []
        self._results_delimiter = '\t'
        self._details_limit = 250

        # get app configuration.
        self._spot_conf = Util.get_spot_conf()

        # get scores fields conf
        conf_file = "{0}/dns_conf.json".format(self._scrtip_path)
        self._conf = json.loads(open (conf_file).read(),object_pairs_hook=OrderedDict)

        # initialize data engine
        self._db = self._spot_conf.get('conf', 'DBNAME').replace("'", "").replace('"', '') 
Example 30
Project: jumpserver-python-sdk   Author: jumpserver   File: models.py    GNU General Public License v2.0 5 votes vote down vote up
def protocols_as_dict(self):
        d = OrderedDict()
        for i in self.protocols:
            if '/' not in i:
                continue
            name, port = i.split('/')[:2]
            if not all([name, port]):
                continue
            d[name] = int(port)
        return d 
Example 31
Project: cat-bbs   Author: aleju   File: plotting.py    MIT License 5 votes vote down vote up
def __init__(self):
        self.line_groups = OrderedDict() 
Example 32
Project: cat-bbs   Author: aleju   File: plotting.py    MIT License 5 votes vote down vote up
def __init__(self, group_name, line_names, increasing=True):
        self.group_name = group_name
        self.lines = OrderedDict([(name, Line()) for name in line_names])
        self.increasing = increasing 
Example 33
Project: pyblish-win   Author: pyblish   File: util.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def _ordered_count(iterable):
    'Return dict of element counts, in the order they were first seen'
    c = OrderedDict()
    for elem in iterable:
        c[elem] = c.get(elem, 0) + 1
    return c 
Example 34
Project: pyblish-win   Author: pyblish   File: test_unicode.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def test_object_pairs_hook_with_unicode(self):
        s = u'{"xkd":1, "kcw":2, "art":3, "hxm":4, "qrt":5, "pad":6, "hoy":7}'
        p = [(u"xkd", 1), (u"kcw", 2), (u"art", 3), (u"hxm", 4),
             (u"qrt", 5), (u"pad", 6), (u"hoy", 7)]
        self.assertEqual(self.loads(s), eval(s))
        self.assertEqual(self.loads(s, object_pairs_hook = lambda x: x), p)
        od = self.loads(s, object_pairs_hook = OrderedDict)
        self.assertEqual(od, OrderedDict(p))
        self.assertEqual(type(od), OrderedDict)
        # the object_pairs_hook takes priority over the object_hook
        self.assertEqual(self.loads(s,
                                    object_pairs_hook = OrderedDict,
                                    object_hook = lambda x: None),
                         OrderedDict(p)) 
Example 35
Project: pyblish-win   Author: pyblish   File: test_encode_basestring_ascii.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def test_ordered_dict(self):
        # See issue 6105
        items = [('one', 1), ('two', 2), ('three', 3), ('four', 4), ('five', 5)]
        s = self.dumps(OrderedDict(items))
        self.assertEqual(s, '{"one": 1, "two": 2, "three": 3, "four": 4, "five": 5}') 
Example 36
Project: xadmin_bugfix   Author: vip68   File: list.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def get_ordering_field_columns(self):
        """
        Returns a OrderedDict of ordering field column numbers and asc/desc
        """

        # We must cope with more than one column having the same underlying sort
        # field, so we base things on column numbers.
        ordering = self._get_default_ordering()
        ordering_fields = OrderedDict()
        if ORDER_VAR not in self.params or not self.params[ORDER_VAR]:
            # for ordering specified on ModelAdmin or model Meta, we don't know
            # the right column numbers absolutely, because there might be more
            # than one column associated with that ordering, so we guess.
            for field in ordering:
                if field.startswith('-'):
                    field = field[1:]
                    order_type = 'desc'
                else:
                    order_type = 'asc'
                for attr in self.list_display:
                    if self.get_ordering_field(attr) == field:
                        ordering_fields[field] = order_type
                        break
        else:
            for p in self.params[ORDER_VAR].split('.'):
                none, pfx, field_name = p.rpartition('-')
                ordering_fields[field_name] = 'desc' if pfx == '-' else 'asc'
        return ordering_fields 
Example 37
Project: xadmin_bugfix   Author: vip68   File: ajax.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def get_response(self, __):
        if self.request.GET.get('_format') == 'html':
            self.admin_view.detail_template = 'xadmin/views/quick_detail.html'
            return __()

        form = self.admin_view.form_obj
        layout = form.helper.layout

        results = []

        for p, f in layout.get_field_names():
            result = self.admin_view.get_field_result(f)
            results.append((result.label, result.val))

        return self.render_response(OrderedDict(results)) 
Example 38
Project: xadmin_bugfix   Author: vip68   File: wizard.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def get_form_list(self):
        if not hasattr(self, '_form_list'):
            init_form_list = OrderedDict()

            assert len(
                self.wizard_form_list) > 0, 'at least one form is needed'

            for i, form in enumerate(self.wizard_form_list):
                init_form_list[smart_text(form[0])] = form[1]

            self._form_list = init_form_list

        return self._form_list

    # Plugin replace methods 
Example 39
Project: serious-django-graphene   Author: serioeseGmbH   File: __init__.py    MIT License 5 votes vote down vote up
def fields_for_form(form, only_fields, exclude_fields):
    fields = OrderedDict()
    for name, field in form.fields.items():
        is_not_in_only = only_fields and name not in only_fields
        is_excluded = name in exclude_fields
        if is_not_in_only or is_excluded:
            continue

        fields[name] = convert_form_field(field)
    return fields 
Example 40
Project: aospy   Author: spencerahill   File: calc.py    Apache License 2.0 5 votes vote down vote up
def _apply_all_time_reductions(self, data):
        """Apply all requested time reductions to the data."""
        logging.info(self._print_verbose("Applying desired time-"
                                         "reduction methods."))
        reduc_specs = [r.split('.') for r in self.dtype_out_time]
        reduced = {}
        for reduc, specs in zip(self.dtype_out_time, reduc_specs):
            func = specs[-1]
            if 'reg' in specs:
                reduced.update({reduc: self.region_calcs(data, func)})
            else:
                reduced.update({reduc: self._time_reduce(data, func)})
        return OrderedDict(sorted(reduced.items(), key=lambda t: t[0])) 
Example 41
Project: RF-Monitor   Author: EarToEarOak   File: file.py    GNU General Public License v2.0 5 votes vote down vote up
def save_recordings(filename, freq, gain, cal, dynP, monitors):

    jsonMonitors = []
    for monitor in monitors:
        jsonMonitor = OrderedDict()
        jsonMonitor['Colour'] = monitor.get_colour()
        jsonMonitor['Enabled'] = monitor.get_enabled()
        jsonMonitor['Dynamic'] = monitor.get_dynamic()
        jsonMonitor['Alert'] = monitor.get_alert()
        jsonMonitor['Frequency'] = int(monitor.get_frequency() * 1e6)
        jsonMonitor['Threshold'] = monitor.get_threshold()
        jsonMonitor['Signals'] = [signal.to_list()
                                  for signal in monitor.get_signals()]
        jsonMonitor['Periods'] = [period.to_list()
                                  for period in monitor.get_periods()]
        jsonMonitors.append(jsonMonitor)

    fileData = OrderedDict()
    fileData['Version'] = VERSION
    fileData['Frequency'] = freq * 1e6
    fileData['Gain'] = gain
    fileData['Calibration'] = cal
    fileData['DynamicPercentile'] = dynP
    fileData['Monitors'] = jsonMonitors

    data = [APP_NAME, fileData]

    handle = open(filename, 'wb')
    handle.write(json.dumps(data, indent=4))
    handle.close() 
Example 42
Project: RF-Monitor   Author: EarToEarOak   File: file.py    GNU General Public License v2.0 5 votes vote down vote up
def format_recording(freq, recording):
    record = OrderedDict()
    record['Start'] = recording.start
    record['End'] = recording.end
    record['Level'] = recording.level
    if recording.location is not None:
        record['location'] = recording.location

    signal = OrderedDict()
    signal['Frequency'] = int(freq * 1e6)
    signal['Signal'] = record

    return json.dumps(signal) 
Example 43
Project: Mzo-Cli   Author: jamesstidard   File: accounts.py    MIT License 5 votes vote down vote up
def accounts(ctx):
    url = 'https://api.monzo.com/accounts'
    headers = {'Authorization': f'Bearer {ctx.obj.access_token}'}

    resp = await ctx.obj.http.get(url, headers=headers)
    accounts_ = (await resp.json())['accounts']

    choices = OrderedDict((a['id'], f"{a['description']}") for a in accounts_) 
Example 44
Project: prediction-constrained-topic-models   Author: dtak   File: select_best_runs_and_snapshots.py    MIT License 5 votes vote down vote up
def load_default_column_name_dict(output_data_type='binary'):
    if output_data_type == 'binary':
        json_fpath = os.path.expandvars(
            "$PC_REPO_DIR/pc_toolbox/utils_snapshots/snapshot_perf_metrics__binary_outcomes.json")
        with open(json_fpath, 'r') as fd:
            j = json.load(fd)
            kv_pairs = [
                (a['name'], a['type'])
                    for a in j['resources'][0]['schema']['fields']]
            return OrderedDict(kv_pairs) 
Example 45
Project: django-xadmin   Author: MarkHoo   File: list.py    MIT License 5 votes vote down vote up
def get_ordering_field_columns(self):
        """
        Returns a OrderedDict of ordering field column numbers and asc/desc
        """

        # We must cope with more than one column having the same underlying sort
        # field, so we base things on column numbers.
        ordering = self._get_default_ordering()
        ordering_fields = OrderedDict()
        if ORDER_VAR not in self.params or not self.params[ORDER_VAR]:
            # for ordering specified on ModelAdmin or model Meta, we don't know
            # the right column numbers absolutely, because there might be more
            # than one column associated with that ordering, so we guess.
            for field in ordering:
                if field.startswith('-'):
                    field = field[1:]
                    order_type = 'desc'
                else:
                    order_type = 'asc'
                for attr in self.list_display:
                    if self.get_ordering_field(attr) == field:
                        ordering_fields[field] = order_type
                        break
        else:
            for p in self.params[ORDER_VAR].split('.'):
                none, pfx, field_name = p.rpartition('-')
                ordering_fields[field_name] = 'desc' if pfx == '-' else 'asc'
        return ordering_fields 
Example 46
Project: django-xadmin   Author: MarkHoo   File: ajax.py    MIT License 5 votes vote down vote up
def get_response(self, __):
        if self.request.GET.get('_format') == 'html':
            self.admin_view.detail_template = 'xadmin/views/quick_detail.html'
            return __()

        form = self.admin_view.form_obj
        layout = form.helper.layout

        results = []

        for p, f in layout.get_field_names():
            result = self.admin_view.get_field_result(f)
            results.append((result.label, result.val))

        return self.render_response(OrderedDict(results)) 
Example 47
Project: django-xadmin   Author: MarkHoo   File: wizard.py    MIT License 5 votes vote down vote up
def get_form_list(self):
        if not hasattr(self, '_form_list'):
            init_form_list = OrderedDict()

            assert len(
                self.wizard_form_list) > 0, 'at least one form is needed'

            for i, form in enumerate(self.wizard_form_list):
                init_form_list[smart_text(form[0])] = form[1]

            self._form_list = init_form_list

        return self._form_list

    # Plugin replace methods 
Example 48
Project: django-xadmin   Author: MarkHoo   File: list.py    MIT License 5 votes vote down vote up
def get_ordering_field_columns(self):
        """
        Returns a OrderedDict of ordering field column numbers and asc/desc
        """

        # We must cope with more than one column having the same underlying sort
        # field, so we base things on column numbers.
        ordering = self._get_default_ordering()
        ordering_fields = OrderedDict()
        if ORDER_VAR not in self.params or not self.params[ORDER_VAR]:
            # for ordering specified on ModelAdmin or model Meta, we don't know
            # the right column numbers absolutely, because there might be more
            # than one column associated with that ordering, so we guess.
            for field in ordering:
                if field.startswith('-'):
                    field = field[1:]
                    order_type = 'desc'
                else:
                    order_type = 'asc'
                for attr in self.list_display:
                    if self.get_ordering_field(attr) == field:
                        ordering_fields[field] = order_type
                        break
        else:
            for p in self.params[ORDER_VAR].split('.'):
                none, pfx, field_name = p.rpartition('-')
                ordering_fields[field_name] = 'desc' if pfx == '-' else 'asc'
        return ordering_fields 
Example 49
Project: django-xadmin   Author: MarkHoo   File: ajax.py    MIT License 5 votes vote down vote up
def get_response(self, __):
        if self.request.GET.get('_format') == 'html':
            self.admin_view.detail_template = 'xadmin/views/quick_detail.html'
            return __()

        form = self.admin_view.form_obj
        layout = form.helper.layout

        results = []

        for p, f in layout.get_field_names():
            result = self.admin_view.get_field_result(f)
            results.append((result.label, result.val))

        return self.render_response(OrderedDict(results)) 
Example 50
Project: django-xadmin   Author: MarkHoo   File: wizard.py    MIT License 5 votes vote down vote up
def get_form_list(self):
        if not hasattr(self, '_form_list'):
            init_form_list = OrderedDict()

            assert len(
                self.wizard_form_list) > 0, 'at least one form is needed'

            for i, form in enumerate(self.wizard_form_list):
                init_form_list[smart_text(form[0])] = form[1]

            self._form_list = init_form_list

        return self._form_list

    # Plugin replace methods 
Example 51
Project: django-xadmin   Author: MarkHoo   File: list.py    MIT License 5 votes vote down vote up
def get_ordering_field_columns(self):
        """
        Returns a OrderedDict of ordering field column numbers and asc/desc
        """

        # We must cope with more than one column having the same underlying sort
        # field, so we base things on column numbers.
        ordering = self._get_default_ordering()
        ordering_fields = OrderedDict()
        if ORDER_VAR not in self.params or not self.params[ORDER_VAR]:
            # for ordering specified on ModelAdmin or model Meta, we don't know
            # the right column numbers absolutely, because there might be more
            # than one column associated with that ordering, so we guess.
            for field in ordering:
                if field.startswith('-'):
                    field = field[1:]
                    order_type = 'desc'
                else:
                    order_type = 'asc'
                for attr in self.list_display:
                    if self.get_ordering_field(attr) == field:
                        ordering_fields[field] = order_type
                        break
        else:
            for p in self.params[ORDER_VAR].split('.'):
                none, pfx, field_name = p.rpartition('-')
                ordering_fields[field_name] = 'desc' if pfx == '-' else 'asc'
        return ordering_fields 
Example 52
Project: django-xadmin   Author: MarkHoo   File: ajax.py    MIT License 5 votes vote down vote up
def get_response(self, __):
        if self.request.GET.get('_format') == 'html':
            self.admin_view.detail_template = 'xadmin/views/quick_detail.html'
            return __()

        form = self.admin_view.form_obj
        layout = form.helper.layout

        results = []

        for p, f in layout.get_field_names():
            result = self.admin_view.get_field_result(f)
            results.append((result.label, result.val))

        return self.render_response(OrderedDict(results)) 
Example 53
Project: pypette   Author: csurfer   File: pipes.py    MIT License 5 votes vote down vote up
def __init__(self, name, gate=Gate.FAIL_FAST):
        """Constructor.

        :param name: Name given to the pipe object for identification.
        :type name: str
        """
        self.name = "Pipe({})".format(name)
        self.job_map = OrderedDict()
        self.thread_map = OrderedDict()
        self.gate = gate.value
        self.dependent_on = [] 
Example 54
Project: gpu-mux   Author: google   File: gpumux.py    Apache License 2.0 5 votes vote down vote up
def get_gpus():
    cmd_outut = subprocess.check_output(['nvidia-smi', '--list-gpus']).decode()
    gpus = collections.OrderedDict()
    for x in cmd_outut.split('\n'):
        if not x:
            continue
        expr = R_GPU.match(x)
        gpus[int(expr.group('id'))] = expr.group('model')

    gpus = apply_gpu_preferences(gpus)
    print('GPUs available %d' % len(gpus))
    for k, v in gpus.items():
        print('%-2d %s' % (k, v))
    return gpus 
Example 55
Project: slidoc   Author: mitotic   File: md2md.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __init__(self, cmd_args, images_zipdata=None, files_url=''):
        self.cmd_args = cmd_args
        self.files_url = files_url
        self.arg_check(cmd_args)
        self.images_zipfile = None
        self.images_map = {}
        self.content_zip_bytes = None
        self.content_zip = None
        self.content_image_paths = set()
        if images_zipdata:
            self.images_zipfile = zipfile.ZipFile(io.BytesIO(images_zipdata), 'r')
            self.images_map = dict( (os.path.basename(fpath), fpath) for fpath in self.images_zipfile.namelist() if os.path.basename(fpath))
        if 'zip' in self.cmd_args.images:
            self.content_zip_bytes = io.BytesIO()
            self.content_zip = zipfile.ZipFile(self.content_zip_bytes, 'w')

        self.skipping_notes = False
        self.cells_buffer = []
        self.buffered_markdown = []
        self.output = []
        self.imported_links = {}
        self.imported_defs = OrderedDict()
        self.imported_refs = OrderedDict()
        self.exported_refs = OrderedDict()
        self.image_refs = {}
        self.old_defs = OrderedDict()
        self.new_defs = OrderedDict()
        self.filedir = ''
        self.filename = ''
        self.fname = ''
        self.renumber_count = cmd_args.renumber 
Example 56
Project: slidoc   Author: mitotic   File: sdproxy.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def lookupRoster(field, userId=None, regular=False):
    # If not userId, return all entries for field as a dict
    # if regular, only for names defined and not starting with #)
    rosterSheet = getSheet(ROSTER_SHEET)
    if not rosterSheet:
        return None

    headers = rosterSheet.getHeaders()
    if not headers or headers[:4] != MIN_HEADERS:
        raise Exception('CUSTOM:Error: Invalid headers in roster_slidoc; first four should be "'+', '.join(MIN_HEADERS)+'", but found "'+', '.join(headers or [])+'"')

    colIndex = indexColumns(rosterSheet)
    if not colIndex.get(field):
        return None

    if userId:
        rowIndex = indexRows(rosterSheet, colIndex['id'], 2)
        if not rowIndex.get(userId):
            return None
        return lookupValues(userId, [field], ROSTER_SHEET, True)[0]

    idVals = getColumns('id', rosterSheet, 1, 2)
    names = getColumns('name', rosterSheet, 1, 2)
    fieldVals = getColumns(field, rosterSheet, 1, 2)
    fieldDict = OrderedDict()
    for j, idVal in enumerate(idVals):
        if regular and (not names[j] or names[j].startswith('#')):
            continue
        fieldDict[idVal] = fieldVals[j]
    return fieldDict 
Example 57
Project: django-project-template   Author: kagxin   File: viewsets.py    MIT License 5 votes vote down vote up
def get_extra_action_url_map(self):
        """
        Build a map of {names: urls} for the extra actions.

        This method will noop if `detail` was not provided as a view initkwarg.
        """
        action_urls = OrderedDict()

        # exit early if `detail` has not been provided
        if self.detail is None:
            return action_urls

        # filter for the relevant extra actions
        actions = [
            action for action in self.get_extra_actions()
            if action.detail == self.detail
        ]

        for action in actions:
            try:
                url_name = '%s-%s' % (self.basename, action.url_name)
                url = reverse(url_name, self.args, self.kwargs, request=self.request)
                view = self.__class__(**action.kwargs)
                action_urls[view.get_view_name()] = url
            except NoReverseMatch:
                pass  # URL requires additional arguments, ignore

        return action_urls 
Example 58
Project: icme2019   Author: ShenDezhou   File: din.py    MIT License 5 votes vote down vote up
def get_input(feature_dim_dict, seq_feature_list, seq_max_len):
    sparse_input,dense_input = create_singlefeat_dict(feature_dim_dict)
    user_behavior_input = OrderedDict()
    for i,feat in enumerate(seq_feature_list):
        user_behavior_input[feat] = Input(shape=(seq_max_len,), name='seq_' + str(i) + '-' + feat)

    user_behavior_length = Input(shape=(1,), name='seq_length')

    return sparse_input, dense_input, user_behavior_input, user_behavior_length 
Example 59
Project: icme2019   Author: ShenDezhou   File: input_embedding.py    MIT License 5 votes vote down vote up
def create_singlefeat_dict(feature_dim_dict, prefix=''):
    sparse_input = OrderedDict()
    for i, feat in enumerate(feature_dim_dict["sparse"]):
        sparse_input[feat.name] = Input(
            shape=(1,), name=prefix+'sparse_' + str(i) + '-' + feat.name)

    dense_input = OrderedDict()

    for i, feat in enumerate(feature_dim_dict["dense"]):
        dense_input[feat] = Input(
            shape=(1,), name=prefix+'dense_' + str(i) + '-' + feat.name)

    return sparse_input, dense_input 
Example 60
Project: rpm2swidtag   Author: swidtags   File: __init__.py    Apache License 2.0 5 votes vote down vote up
def _push_to_dict_array(d, key, subkey, value):
	if key:
		if key not in d:
			d[key] = OrderedDict()
		d = d[key]
	if subkey not in d:
		d[subkey] = OrderedDict()
	d[subkey][value] = True 
Example 61
Project: rpm2swidtag   Author: swidtags   File: __init__.py    Apache License 2.0 5 votes vote down vote up
def __init__(self):
		self.by_tags = {}
		self.by_filenames = OrderedDict()
		self._cache_supplemental = None 
Example 62
Project: fortran_input_reader   Author: miroi   File: update.py    MIT License 5 votes vote down vote up
def parse_cmake_module(s_in, defaults={}):

    parsed_config = collections.defaultdict(lambda: None)

    if 'autocmake.cfg configuration::' not in s_in:
        return parsed_config

    s_out = []
    is_rst_line = False
    for line in s_in.split('\n'):
        if is_rst_line:
            if len(line) > 0:
                if line[0] != '#':
                    is_rst_line = False
            else:
                is_rst_line = False
        if is_rst_line:
            s_out.append(line[2:])
        if '#.rst:' in line:
            is_rst_line = True

    autocmake_entry = '\n'.join(s_out).split('autocmake.cfg configuration::')[1]
    autocmake_entry = autocmake_entry.replace('\n  ', '\n')

    # we prepend a fake section heading so that we can parse it with configparser
    autocmake_entry = '[foo]\n' + autocmake_entry

    buf = StringIO(autocmake_entry)
    config = ConfigParser(dict_type=collections.OrderedDict)
    config.readfp(buf)

    for section in config.sections():
        for s in ['docopt', 'define', 'export', 'fetch', 'warning']:
            if config.has_option(section, s):
                parsed_config[s] = config.get(section, s, 0, defaults)

    return parsed_config

# ------------------------------------------------------------------------------ 
Example 63
Project: LayoutExporterU   Author: aboood40091   File: common.py    GNU General Public License v3.0 5 votes vote down vote up
def getAsDict(self):
        _dict = OrderedDict()
        _dict["create"] = OrderedDict()
        _dict["create"]["@user"] = self.user
        _dict["create"]["@host"] = self.host
        _dict["create"]["@date"] = self.date
        _dict["create"]["@source"] = self.source
        _dict["title"] = self.title
        _dict["comment"] = self.comment
        _dict["generator"] = OrderedDict()
        _dict["generator"]["@name"] = self.generatorName
        _dict["generator"]["@version"] = self.generatorVersion

        return _dict 
Example 64
Project: gamereporter   Author: gamesbook   File: report_builder.py    MIT License 5 votes vote down vote up
def create_xls(self):
        """
        Create an XLS spreadsheet displaying games' details; one game per row
        """
        workbook = xlwt.Workbook()
        sheet = workbook.add_sheet("Summary")
        sheet.col(0).width = 256 * 60
        bold_style = xlwt.easyxf('font: bold 1')
        _items = (
            ('Name', 'name'),
            ('ID', 'id'),
            ('Weight', 'averageweight'),
            ('% Weight', 'percentageweight'),
            ('Year', 'yearpublished'),
            ('Age', 'age'),
            ('Time', 'playingtime'),
            ('Min.', 'minplayers'),
            ('Max', 'maxplayers'),
            ('Mechanics', 'mechanics'),
            ('Categories', 'categories'),
        )
        items = OrderedDict(_items)
        for col, head in enumerate(items.keys()):
            sheet.write(0, col, head, bold_style)
        for number, game in enumerate(self.games):
            if self.progress:
                print "Creating the row for game: %7d" % int(game.id)
                for col, head in enumerate(items.keys()):
                    sheet.write(number + 1, col, getattr(game, items[head]))
        workbook.save(self.filename) 
Example 65
Project: disentangling_conditional_gans   Author: zalandoresearch   File: tfutil.py    MIT License 5 votes vote down vote up
def __init__(
        self,
        name                = 'Train',
        tf_optimizer        = 'tf.train.AdamOptimizer',
        learning_rate       = 0.001,
        use_loss_scaling    = False,
        loss_scaling_init   = 64.0,
        loss_scaling_inc    = 0.0005,
        loss_scaling_dec    = 1.0,
        **kwargs):

        # Init fields.
        self.name               = name
        self.learning_rate      = tf.convert_to_tensor(learning_rate)
        self.id                 = self.name.replace('/', '.')
        self.scope              = tf.get_default_graph().unique_name(self.id)
        self.optimizer_class    = import_obj(tf_optimizer)
        self.optimizer_kwargs   = dict(kwargs)
        self.use_loss_scaling   = use_loss_scaling
        self.loss_scaling_init  = loss_scaling_init
        self.loss_scaling_inc   = loss_scaling_inc
        self.loss_scaling_dec   = loss_scaling_dec
        self._grad_shapes       = None          # [shape, ...]
        self._dev_opt           = OrderedDict() # device => optimizer
        self._dev_grads         = OrderedDict() # device => [[(grad, var), ...], ...]
        self._dev_ls_var        = OrderedDict() # device => variable (log2 of loss scaling factor)
        self._updates_applied   = False

    # Register the gradients of the given loss function with respect to the given variables.
    # Intended to be called once per GPU. 
Example 66
Project: disentangling_conditional_gans   Author: zalandoresearch   File: tfutil.py    MIT License 5 votes vote down vote up
def _init_graph(self):
        # Collect inputs.
        self.input_names = []
        for param in inspect.signature(self._build_func).parameters.values():
            if param.kind == param.POSITIONAL_OR_KEYWORD and param.default is param.empty:
                self.input_names.append(param.name)
        self.num_inputs = len(self.input_names)
        assert self.num_inputs >= 1

        # Choose name and scope.
        if self.name is None:
            self.name = self._build_func_name
        self.scope = tf.get_default_graph().unique_name(self.name.replace('/', '_'), mark_as_used=False)
        
        # Build template graph.
        with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE):
            assert tf.get_variable_scope().name == self.scope
            with absolute_name_scope(self.scope): # ignore surrounding name_scope
                with tf.control_dependencies(None): # ignore surrounding control_dependencies
                    self.input_templates = [tf.placeholder(tf.float32, name=name) for name in self.input_names]
                    out_expr = self._build_func(*self.input_templates, is_template_graph=True, **self.static_kwargs)
            
        # Collect outputs.
        assert is_tf_expression(out_expr) or isinstance(out_expr, tuple)
        self.output_templates = [out_expr] if is_tf_expression(out_expr) else list(out_expr)
        self.output_names = [t.name.split('/')[-1].split(':')[0] for t in self.output_templates]
        self.num_outputs = len(self.output_templates)
        assert self.num_outputs >= 1
        
        # Populate remaining fields.
        self.input_shapes   = [shape_to_list(t.shape) for t in self.input_templates]
        self.output_shapes  = [shape_to_list(t.shape) for t in self.output_templates]
        self.input_shape    = self.input_shapes[0]
        self.output_shape   = self.output_shapes[0]
        self.vars           = OrderedDict([(self.get_var_localname(var), var) for var in tf.global_variables(self.scope + '/')])
        self.trainables     = OrderedDict([(self.get_var_localname(var), var) for var in tf.trainable_variables(self.scope + '/')])

    # Run initializers for all variables defined by this network. 
Example 67
Project: auto-check-in   Author: zeekvfu   File: file_utility.py    GNU General Public License v3.0 5 votes vote down vote up
def load_json_config(file_name, keep_order=False):
        with open(file_name, encoding='utf-8', mode='r') as f:
            content = f.read()
            if keep_order:
                # 保持 JSON 文件里 dict 原来的顺序
                return json.loads(content, object_pairs_hook=OrderedDict)
            return json.loads(content)


    # 读取 Python data structure 形式的配置文件 
Example 68
Project: pyprnt   Author: kevink1103   File: test.py    MIT License 5 votes vote down vote up
def test_dict_basic(self):
        # For python version 3.4 and below
        menu = collections.OrderedDict()
        menu["kimchi"] = 5000
        menu["Ice Cream"] = 100
        testee = prnt(menu, output=True, width=50)
        expect = "┌─────────┬────┐\n│kimchi   │5000│\n│Ice Cream│100 │\n└─────────┴────┘"
        try:
            self.assertEqual(testee, expect)
        except:
            pass 
Example 69
Project: neural-fingerprinting   Author: StephanZheng   File: runner.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def set_input(self, X_batch=None):
        """
        Preprocessing the inputs before calling session.run()

        :param X_batch: A dictionary of inputs to the first sub-graph
        :return: A tuple, `(fetches, fd)`, with `fetches` being a list of
                 Tensors to be fetches and `fd` the feed dictionary.
        """
        inputs = self.inputs
        outputs = self.outputs

        # data for first gpu
        fd = {}
        if X_batch is not None:
            self.next_vals[0] = OrderedDict()
            for i, vname in enumerate(self.inputs[0]):
                if vname in X_batch:
                    self.next_vals[0][vname] = X_batch[vname]
                else:
                    self.next_vals[0][vname] = None
        else:
            self.next_vals[0] = None

        # Set `feed_dict` for each GPU. If there is something to run for that
        # GPU, collect outputs to be fetched.
        fetches = []
        self.active_gpus = []
        for i in range(len(outputs)):
            if self.next_vals[i] is None:
                self.active_gpus += [False]
                continue
            self.active_gpus += [True]
            for k in inputs[i]:
                if self.next_vals[i][k] is not None:
                    fd[inputs[i][k]] = self.next_vals[i][k]
            for k, v in outputs[i].iteritems():
                fetches += [v]

        fd.update(self.feed_dict)

        return fetches, fd 
Example 70
Project: neural-fingerprinting   Author: StephanZheng   File: runner.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def proc_fvals(self, fvals):
        """
        Postprocess the outputs of the Session.run(). Move the outputs of
        sub-graphs to next ones and return the output of the last sub-graph.

        :param fvals: A list of fetched values returned by Session.run()
        :return: A dictionary of fetched values returned by the last sub-graph.
        """
        inputs = self.inputs
        outputs = self.outputs

        # Move data to the next sub-graph for the next step
        cur = 0
        for i in range(len(inputs)-1):
            if not self.active_gpus[i]:
                self.next_vals[i+1] = None
                continue
            self.next_vals[i+1] = OrderedDict()
            for k in outputs[i]:
                self.next_vals[i+1][k] = fvals[cur]
                cur += 1
            if i == 0:
                self.next_vals[0] = None

        # Return the output of the last sub-graph
        last_fvals = OrderedDict()
        if self.active_gpus[-1]:
            assert cur+len(outputs[-1]) == len(fvals)
            for k in outputs[-1]:
                last_fvals[k] = fvals[cur]
                cur += 1
        return last_fvals 
Example 71
Project: neural-fingerprinting   Author: StephanZheng   File: utils.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def deterministic_dict(normal_dict):
    """
    Returns a version of `normal_dict` whose iteration order is always the same
    """
    out = OrderedDict()
    for key in sorted(normal_dict.keys()):
        out[key] = normal_dict[key]
    return out 
Example 72
Project: godot-mono-builds   Author: godotengine   File: desktop.py    MIT License 4 votes vote down vote up
def main(raw_args):
    import cmd_utils
    from collections import OrderedDict
    from typing import Callable

    actions = OrderedDict()
    actions['configure'] = configure
    actions['make'] = make
    actions['clean'] = clean

    parser = cmd_utils.build_arg_parser(description='Builds the Mono runtime for the Desktop')
    subparsers = parser.add_subparsers(dest='platform')

    default_help = 'default: %(default)s'

    for target_platform in target_platforms:
        target_platform_subparser = subparsers.add_parser(target_platform)
        target_platform_subparser.add_argument('action', choices=['configure', 'make', 'clean'])
        target_platform_subparser.add_argument('--target', choices=targets[target_platform], action='append', required=True)
        target_platform_subparser.add_argument('--with-llvm', action='store_true', default=False, help=default_help)

    cmd_utils.add_runtime_arguments(parser, default_help)

    args = parser.parse_args(raw_args)

    input_action = args.action
    input_target_platform = args.platform
    input_targets = args.target

    opts = desktop_opts_from_args(args)

    if not os.path.isdir(opts.mono_source_root):
        print('Mono sources directory not found: ' + opts.mono_source_root)
        sys.exit(1)

    if input_target_platform == 'osx' and sys.platform != 'darwin' and not 'OSXCROSS_ROOT' in os.environ:
        raise RuntimeError('The \'OSXCROSS_ROOT\' environment variable is required for cross-compiling to macOS')

    if is_cross_compiling(input_target_platform) and sys.platform == 'darwin':
        raise RuntimeError('Cross-compiling from macOS is not supported')

    action = actions[input_action]

    try:
        for target in input_targets:
            action(opts, 'desktop-%s' % input_target_platform, input_target_platform, target)
    except BuildError as e:
        sys.exit(e.message) 
Example 73
Project: godot-mono-builds   Author: godotengine   File: wasm.py    MIT License 4 votes vote down vote up
def main(raw_args):
    import cmd_utils
    from collections import OrderedDict
    from typing import Callable

    target_shortcuts = {'all-runtime': runtime_targets}

    target_values = runtime_targets + cross_targets + cross_mxe_targets + list(target_shortcuts)

    actions = OrderedDict()
    actions['configure'] = configure
    actions['make'] = make
    actions['clean'] = clean

    parser = cmd_utils.build_arg_parser(description='Builds the Mono runtime for WebAssembly')

    emsdk_root_default = os.environ.get('EMSDK_ROOT', default='')

    default_help = 'default: %(default)s'

    parser.add_argument('action', choices=['configure', 'make', 'clean'])
    parser.add_argument('--target', choices=target_values, action='append', required=True)

    cmd_utils.add_runtime_arguments(parser, default_help)

    args = parser.parse_args(raw_args)

    input_action = args.action
    input_targets = args.target

    opts = runtime_opts_from_args(args)

    if not os.path.isdir(opts.mono_source_root):
        print('Mono sources directory not found: ' + opts.mono_source_root)
        sys.exit(1)

    targets = cmd_utils.expand_input_targets(input_targets, target_shortcuts)
    action = actions[input_action]

    try:
        for target in targets:
            action(opts, 'wasm', target)
    except BuildError as e:
        sys.exit(e.message) 
Example 74
Project: BASS   Author: Cisco-Talos   File: avclass_common.py    GNU General Public License v2.0 4 votes vote down vote up
def get_category_ranking(self, sample_info):

        # Extract info from named tuple
        av_label_pairs = sample_info[3]
        hashes = [sample_info[0], sample_info[1], sample_info[2]]

        # Whitelist the AVs to filter the ones with meaningful labels
        av_whitelist = self.avs

        # Initialize auxiliary data structures
        duplicates = set()
        category_map = {}

        # Process each AV label
        for (av_name, label) in av_label_pairs:
            # If empty label, nothing to do
            if not label:
                continue

            #####################
            # Duplicate removal #
            #####################

            # If label ends in ' (B)', remove it
            if label.endswith(' (B)'):
                label = label[:-4]

            ##################
            # Suffix removal #
            ##################
            label = self.__remove_suffixes(av_name, label)

            ########################################################
            # Tokenization, token filtering, and alias replacement #
            ########################################################
            tokens = self.__norm_cat(label, hashes)
            # Increase token count in map
            for t in tokens:
                c = category_map[t] if t in category_map else 0
                category_map[t] = c + 1

        ##################################################################
        # Token ranking: sorts tokens by decreasing count and then token #
        ##################################################################
        sorted_category = sorted(category_map.iteritems(),
                                 key=itemgetter(1, 0),
                                 reverse=True)

        # Delete the tokens appearing only in one AV, add rest to output
        sorted_dict = OrdDict()
        for t, c in sorted_category:
            if c > 1:
                sorted_dict[t] = c
            else:
                break

        return sorted_dict 
Example 75
Project: prediction-constrained-topic-models   Author: dtak   File: train_and_eval_sklearn_binary_classifier.py    MIT License 4 votes vote down vote up
def default_param_grid(classifier_name, c_logspace_arg_str='-6,4,6', **kwargs):
    C_range = np.logspace(*map(float, c_logspace_arg_str.split(','))) 
    if classifier_name == 'logistic_regression':
        return OrderedDict([
            ('penalty', ['l2', 'l1']),
            ('class_weight', [None]),
            ('C', C_range),
            ('thr_', [0.5]),
            ])
    elif classifier_name == 'extra_trees':
        return OrderedDict([
            ('class_weight', [None]),
            ('n_estimators', np.asarray([16, 64, 256])),
            ('max_features', np.asarray([0.04, 0.16, 0.64])),
            ('min_samples_leaf', np.asarray([4, 16, 64, 256])), # bigger seems to be better
            ('thr_', [0.5]),
            ])
    elif classifier_name == 'svm_with_linear_kernel':
        return OrderedDict([
            ('kernel', ['linear']),
            ('C', C_range),
            ('class_weight', [None]),
            ('probability', [False]),
            ])
    elif classifier_name == 'svm_with_rbf_kernel':
        return OrderedDict([
            ('kernel', ['rbf']),
            ('C', C_range),
            ('gamma', np.logspace(-6, 6, 5)),
            ('class_weight', [None]),
            ('probability', [False]),
            ])
    elif classifier_name == 'k_nearest_neighbors':
        return OrderedDict([
            ('n_neighbors', [4, 16, 32, 64]),
            ('metric', ['euclidean', 'manhattan']),
            ('weight', ['uniform', 'distance']),
            ('algorithm', ['brute']),
            ])
    elif classifier_name == 'mlp':
        return OrderedDict([
            #('norm', ['l1', 'none']),
            ('hidden_layer_sizes', [(16), (64), (256), (1024)]),
            ('solver', ['adam']),
            ('alpha', np.logspace(-6, 0, 3)),
            ('learning_rate_init', np.asarray([0.01, 0.1])),
            ('activation', ['relu']),
            ('batch_size', [200]),
            ('early_stopping', [True]),
            ])
    else:
        raise ValueError("Unrecognized: " + classifier_name) 
Example 76
Project: slidoc   Author: mitotic   File: multiproxy.py    BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def parse_headers(self, header_list, replace={}, drop=[]):
        """ Returns tuple of modified header list and OrderedDict with lower-case header
        names as keys and a list of one or more header values for each key.
        replace: {lower_case_name, value}
        drop: list of lower_case header names to be dropped
        Content-Length value are converted to integers in the dict
        """
        mod_header_list = []
        header_dict = OrderedDict()
        last_header_values = None
        for header in header_list:
            if header[0] in (" ", "\t"):
                # Continuation header; append to last header
                if not last_header_values:
                    last_header_values[-1] += header
                    mod_header_list.append(header)
                continue

            header_name_raw, sep, header_value = header.partition(":")
            header_name = header_name_raw.lower()
            header_value = header_value.lstrip()

            if drop and header_name in drop:
                last_header_values = None
                continue

            if replace and header_name in replace:
                header = header_name_raw+": "+replace[header_name]

            mod_header_list.append(header)

            if header_name not in header_dict:
                header_dict[header_name] = []

            if header_name == "content-length":
                try:
                    header_value = int(header_value.strip())
                    if header_value < 0:
                        header_value = None
                except Exception:
                    header_value = None
                
            last_header_values = header_dict[header_name]
            last_header_values.append(header_value)

        return mod_header_list, header_dict 
Example 77
Project: LayoutExporterU   Author: aboood40091   File: main.py    GNU General Public License v3.0 4 votes vote down vote up
def main():
    print("Layout Exporter U v1.0.0")
    print("(C) 2019 AboodXD\n")

    file = input("Input (.bflyt):  ")
    output = input("Output (.flyt):  ")

    animPath = os.path.join(os.path.dirname(os.path.dirname(file)), 'anim')
    timgPath = os.path.join(os.path.dirname(os.path.dirname(file)), 'timg')
    timgOutP = os.path.join(os.path.dirname(output), 'Textures')

    fileName = os.path.splitext(os.path.basename(file))[0]
    animOutput = os.path.splitext(output)[0] + ".flan"

    if os.path.isfile(os.path.join(animPath, fileName + ".bflan")):
        files = [fileName + ".bflan"]

    else:
        files = []
        for _file in os.listdir(animPath):
            if _file.startswith(fileName + "_"):
                files.append(_file)

    textures, formats = None, None
    if files:
        textures, formats = flanMain(files, animPath, animOutput)

    lyt = Layout(file, timgPath, timgOutP, textures, formats)

    file = {}
    file["nw4f_layout"] = OrderedDict()
    file["nw4f_layout"]["@version"] = "1.5.14"
    file["nw4f_layout"]["@xmlns:xsd"] = "http://www.w3.org/2001/XMLSchema"
    file["nw4f_layout"]["@xmlns:xsi"] = "http://www.w3.org/2001/XMLSchema-instance"
    file["nw4f_layout"]["@xmlns"] = "http://www.nintendo.co.jp/NW4F/LayoutEditor"
    file["nw4f_layout"]["head"] = Head().getAsDict()
    file["nw4f_layout"]["body"] = {}
    file["nw4f_layout"]["body"]["lyt"] = lyt.getAsDict()

    xml = dictToXml(file)
    with open(output, "w", encoding="utf-8") as out:
        out.write(xml) 
Example 78
Project: reroils-data-legacy   Author: rero   File: cli.py    GNU General Public License v2.0 4 votes vote down vote up
def check_json(paths, replace, indent, sort_keys):
    """Check json files."""
    files_list = []
    for path in paths:
        if os.path.isfile(path):
            files_list.append(path)
        elif os.path.isdir(path):
            files_list = files_list + glob(os.path.join(path, '**/*.json'),
                                           recursive=True)
    if not paths:
        files_list = glob('**/*.json', recursive=True)
    tot_error_cnt = 0
    for path_file in files_list:
        error_cnt = 0
        try:
            fname = path_file
            with open(fname, 'r') as opened_file:
                json_orig = opened_file.read().rstrip()
                opened_file.seek(0)
                json_file = json.load(opened_file,
                                      object_pairs_hook=OrderedDict)
            json_dump = json.dumps(json_file, indent=indent).rstrip()
            if json_dump != json_orig:
                error_cnt = 1
            click.echo(fname + ': ', nl=False)
            if replace:
                with open(fname, 'w') as opened_file:
                    opened_file.write(json.dumps(json_file,
                                                 indent=indent,
                                                 sort_keys=sort_keys))
                click.secho('File replaced', fg='yellow')
            else:
                if error_cnt == 0:
                    click.secho('Well indented', fg='green')
                else:
                    click.secho('Bad indentation', fg='red')
        except ValueError as e:
            click.echo(fname + ': ', nl=False)
            click.secho('Invalid JSON', fg='red', nl=False)
            click.echo(' -- ' + e.msg)
            error_cnt = 1

        tot_error_cnt += error_cnt
    return tot_error_cnt 
Example 79
Project: mmdetection   Author: open-mmlab   File: detectron2pytorch.py    Apache License 2.0 4 votes vote down vote up
def convert(src, dst, depth):
    """Convert keys in detectron pretrained ResNet models to pytorch style."""
    # load arch_settings
    if depth not in arch_settings:
        raise ValueError('Only support ResNet-50 and ResNet-101 currently')
    block_nums = arch_settings[depth]
    # load caffe model
    caffe_model = mmcv.load(src, encoding='latin1')
    blobs = caffe_model['blobs'] if 'blobs' in caffe_model else caffe_model
    # convert to pytorch style
    state_dict = OrderedDict()
    converted_names = set()
    convert_conv_fc(blobs, state_dict, 'conv1', 'conv1', converted_names)
    convert_bn(blobs, state_dict, 'res_conv1_bn', 'bn1', converted_names)
    for i in range(1, len(block_nums) + 1):
        for j in range(block_nums[i - 1]):
            if j == 0:
                convert_conv_fc(blobs, state_dict,
                                'res{}_{}_branch1'.format(i + 1, j),
                                'layer{}.{}.downsample.0'.format(i, j),
                                converted_names)
                convert_bn(blobs, state_dict,
                           'res{}_{}_branch1_bn'.format(i + 1, j),
                           'layer{}.{}.downsample.1'.format(i, j),
                           converted_names)
            for k, letter in enumerate(['a', 'b', 'c']):
                convert_conv_fc(blobs, state_dict,
                                'res{}_{}_branch2{}'.format(i + 1, j, letter),
                                'layer{}.{}.conv{}'.format(i, j, k + 1),
                                converted_names)
                convert_bn(blobs, state_dict,
                           'res{}_{}_branch2{}_bn'.format(i + 1, j, letter),
                           'layer{}.{}.bn{}'.format(i, j,
                                                    k + 1), converted_names)
    # check if all layers are converted
    for key in blobs:
        if key not in converted_names:
            print('Not Convert: {}'.format(key))
    # save checkpoint
    checkpoint = dict()
    checkpoint['state_dict'] = state_dict
    torch.save(checkpoint, dst) 
Example 80
Project: models   Author: kipoi   File: dataloader.py    MIT License 4 votes vote down vote up
def loadData(self,filename,windows):
		with open(filename) as fi:
			csv_reader=csv.reader(fi)
			data=list(csv_reader)

			ncols=(len(data[0]))
		fi.close()
		nrows=len(data)
		ngenes=nrows/windows
		nfeatures=ncols-1
		print("Number of genes: %d" % ngenes)
		print("Number of entries: %d" % nrows)
		print("Number of HMs: %d" % nfeatures)

		count=0
		attr=collections.OrderedDict()

		for i in range(0,nrows,windows):
			hm1=torch.zeros(windows,1)
			hm2=torch.zeros(windows,1)
			hm3=torch.zeros(windows,1)
			hm4=torch.zeros(windows,1)
			hm5=torch.zeros(windows,1)
			for w in range(0,windows):
				hm1[w][0]=int(data[i+w][2])
				hm2[w][0]=int(data[i+w][3])
				hm3[w][0]=int(data[i+w][4])
				hm4[w][0]=int(data[i+w][5])
				hm5[w][0]=int(data[i+w][6])
			geneID=str(data[i][0].split("_")[0])

			thresholded_expr = int(data[i+w][7])

			attr[count]={
				'geneID':geneID,
				'expr':thresholded_expr,
				'hm1':hm1,
				'hm2':hm2,
				'hm3':hm3,
				'hm4':hm4,
				'hm5':hm5
			}
			count+=1

		return attr