Python logging.warn() Examples
The following are 30 code examples for showing how to use logging.warn(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
You may check out the related API usage on the sidebar.
You may also want to check out all available functions/classes of the module
logging
, or try the search function
.
Example 1
Project: svviz Author: svviz File: app.py License: MIT License | 6 votes |
def saveState(dataHub): import pickle as pickle import gzip pickle.dump(dataHub, gzip.open(dataHub.args.save_state, "wb")) logging.warn("^"*20 + " saving state to pickle and exiting " + "^"*20)
Example 2
Project: svviz Author: svviz File: export.py License: MIT License | 6 votes |
def getExportFormat(args): formats = [None, "png", "pdf", "svg"] if args.type == "batch" or args.format is not None: exportFormat = args.format if exportFormat is None: exportFormat = "pdf" else: exportFormat = args.export.partition(".") if len(exportFormat[2]) > 0: exportFormat = exportFormat[2] if exportFormat not in formats: logging.warn("= File suffix {} not recognized; exporting as .svg =".format(exportFormat)) exportFormat = "svg" else: exportFormat = "svg" exportFormat = exportFormat.lower() return exportFormat
Example 3
Project: dynamic-training-with-apache-mxnet-on-aws Author: awslabs File: symbol_factory.py License: Apache License 2.0 | 6 votes |
def get_symbol_train(network, data_shape, **kwargs): """Wrapper for get symbol for train Parameters ---------- network : str name for the base network symbol data_shape : int input shape kwargs : dict see symbol_builder.get_symbol_train for more details """ if network.startswith('legacy'): logging.warn('Using legacy model.') return symbol_builder.import_module(network).get_symbol_train(**kwargs) config = get_config(network, data_shape, **kwargs).copy() config.update(kwargs) return symbol_builder.get_symbol_train(**config)
Example 4
Project: dynamic-training-with-apache-mxnet-on-aws Author: awslabs File: symbol_factory.py License: Apache License 2.0 | 6 votes |
def get_symbol(network, data_shape, **kwargs): """Wrapper for get symbol for test Parameters ---------- network : str name for the base network symbol data_shape : int input shape kwargs : dict see symbol_builder.get_symbol for more details """ if network.startswith('legacy'): logging.warn('Using legacy model.') return symbol_builder.import_module(network).get_symbol(**kwargs) config = get_config(network, data_shape, **kwargs).copy() config.update(kwargs) return symbol_builder.get_symbol(**config)
Example 5
Project: hsds Author: HDFGroup File: get_s3_stats.py License: Apache License 2.0 | 6 votes |
def get_remote_info_json(jfname): try: logging.info('loading example '+jfname) rfo = urllib.urlopen(jfname) di = json.loads(rfo.read()) nat, glbs = 0, 0 for k,v in di.items(): if k != 'dimensions' or k != 'variables': glbs +=1 for k,v in di['variables'].items(): for a in v: nat += 1 dims = [ l for k, v in di['dimensions'].items() for d, l in v.items() if d == 'length' ] return { 'num global attr' : glbs, 'num vars' : len(di['variables'].keys()), 'num dims' : \ len(di['dimensions'].keys()), 'ave attrs per var' : nat / len(di['variables'].keys()), \ 'dims sizes' : dims } except Exception, e: logging.warn("WARN get_remote_info_json on %s : %s, update S3 bucket" % (jfname, str(e))) return {} #---------------------------------------------------------------------------------
Example 6
Project: loaner Author: google File: bootstrap.py License: Apache License 2.0 | 6 votes |
def bootstrap_chrome_ous(**kwargs): """Bootstraps Chrome device OUs. Args: **kwargs: keyword args including a user_email with which to run the Directory API client methods (required for BigQuery streaming). """ logging.info('Requesting delegated admin for bootstrap') client = directory.DirectoryApiClient(user_email=kwargs['user_email']) for org_unit_name, org_unit_path in constants.ORG_UNIT_DICT.iteritems(): logging.info( 'Creating org unit %s at path %s ...', org_unit_name, org_unit_path) if client.get_org_unit(org_unit_path): logging.warn(_ORG_UNIT_EXISTS_MSG, org_unit_name) else: client.insert_org_unit(org_unit_path)
Example 7
Project: zamia-speech Author: gooofy File: speech_sentences.py License: GNU Lesser General Public License v3.0 | 6 votes |
def proc_cornell_movie_dialogs(corpus_path, tokenize): num_sentences = 0 with codecs.open('%s/movie_lines.txt' % corpus_path, 'r', 'latin1') as inf: for line in inf: parts = line.split('+++$+++') if not len(parts) == 5: logging.warn('movie dialogs: skipping line %s' % line) continue sentence = u' '.join(tokenize(parts[4], lang='en')) if not sentence: logging.warn('movie dialogs: skipping null sentence %s' % line) continue yield u'%s' % sentence num_sentences += 1 if num_sentences % SENTENCES_STATS == 0: logging.info('movie dialogs: %8d sentences.' % num_sentences) if DEBUG_LIMIT and num_sentences >= DEBUG_LIMIT: logging.warn('movie dialogs: debug limit reached, stopping.') break
Example 8
Project: zamia-speech Author: gooofy File: speech_sentences.py License: GNU Lesser General Public License v3.0 | 6 votes |
def proc_corpus_with_one_sentence_perline(corpus_path, tokenize, lang): logging.info("adding sentences from %s..." % corpus_path) num_sentences = 0 with codecs.open(corpus_path, 'r', 'utf8') as inf: for line in inf: sentence = u' '.join(tokenize(line, lang=lang)) if not sentence: logging.warn('%s: skipping null sentence.' % corpus_path) continue yield u'%s' % sentence num_sentences += 1 if num_sentences % SENTENCES_STATS == 0: logging.info('%s: %8d sentences.' % (corpus_path, num_sentences)) if DEBUG_LIMIT and num_sentences >= DEBUG_LIMIT: logging.warn('%s: debug limit reached, stopping.' % corpus_path) break
Example 9
Project: dataiku-contrib Author: dataiku File: models.py License: Apache License 2.0 | 6 votes |
def make_extra_conf(self, extra_conf_template, add_overwrite_keys=None): for k in extra_conf_template: value = self._generate_value_from_template_key(k, extra_conf_template[k]) if value: extra_conf_template[k] = value else: logging.warn('Value for key %s is None removing from config' % k) extra_conf_template.pop(k) # Useful to build storage config - keep it after first template transformation if add_overwrite_keys: for k in add_overwrite_keys: new_value = add_overwrite_keys[k] if extra_conf_template.get(k): logging.warn('Overriding key:{} value: {} with new_value {}'.format(k,extra_conf_template[k], new_value)) else: logging.info('Adding new key:{} with value: {}'.format(k, new_value)) extra_conf_template[k] = new_value return self._make_extra_conf_as_kv_list(extra_conf_template)
Example 10
Project: Windows-Agent Author: AutohomeRadar File: hbs.py License: Apache License 2.0 | 6 votes |
def report_status_to_hbs(data): """ send formated data to transfer via rpc Args: data (dict): {} """ addr = g.HEARTBEAT['addr'] rpc = get_hbs_rpc_client(addr) for i in range(3): try: res = rpc.call('Agent.ReportStatus', data) except Exception as e: logging.warn("call (%s) Agent.ReportStatus failure, times: %s -> msg: %s" % (addr, i, e)) continue return res logging.error("report_status_to_hbs %s to hbs (%s) failure" % (data, addr))
Example 11
Project: Windows-Agent Author: AutohomeRadar File: transfer.py License: Apache License 2.0 | 6 votes |
def send_data_to_transfer(data): """ send formated data to transfer via rpc, select transfer randomly and every transfer will retry 3 times if failure Args: data (list of dict): [{}, {}, ...] """ addrs = g.TRANSFER['addrs'] logging.debug(addrs) random.shuffle(addrs) for addr in addrs: call_success = False rpc = get_transfer_rpc_client(addr) for i in range(3): try: res = rpc.call('Transfer.Update', data) except Exception as e: logging.warn("call (%s) Transfer.update failure, times: %s -> msg: %s" % (addr, i, e)) continue call_success = True return res if not call_success: logging.error("send data %s to transfer (%s) failure" % (data, addr))
Example 12
Project: gae-secure-scaffold-python Author: google File: api_fixer.py License: Apache License 2.0 | 6 votes |
def _HttpUrlLoggingWrapper(func): """Decorates func, logging when 'url' params do not start with https://.""" @functools.wraps(func) def _CheckAndLog(*args, **kwargs): try: arg_index = FindArgumentIndex(func, 'url') except ValueError: return func(*args, **kwargs) if arg_index < len(args): arg_value = args[arg_index] elif 'url' in kwargs: arg_value = kwargs['url'] elif 'url' not in kwargs: arg_value = GetDefaultArgument(func, 'url') if arg_value and not arg_value.startswith('https://'): logging.warn('SECURITY : fetching non-HTTPS url %s' % (arg_value)) return func(*args, **kwargs) return _CheckAndLog
Example 13
Project: browserscope Author: elsigh File: local_scores.py License: Apache License 2.0 | 6 votes |
def CheckTests(db): cursor = db.cursor() cursor.execute(''' SELECT category, test, count(*) FROM scores WHERE category IS NOT NULL GROUP BY category, test ORDER BY category, test ;''') for category, test_key, num_scores in cursor.fetchall(): test_set = all_test_sets.GetTestSet(category) if not test_set: logging.warn('No test_set for category: %s (num_scores=%s)', category, num_scores) continue test = test_set.GetTest(test_key) if not test: logging.warn('No test: %s, %s (num_scores=%s)', category, test_key, num_scores)
Example 14
Project: airflow-rest-api-plugin Author: teamclairvoyant File: rest_api_plugin.py License: Apache License 2.0 | 6 votes |
def http_token_secure(func): def secure_check(arg): logging.info("Rest_API_Plugin.http_token_secure() called") # Check if the airflow_expected_http_token variable is not none from configurations. This means authentication is enabled. if airflow_expected_http_token: logging.info("Performing Token Authentication") if request.headers.get(airflow_rest_api_plugin_http_token_header_name, None) != airflow_expected_http_token: warning_message = "Token Authentication Failed" logging.warn(warning_message) base_response = REST_API_Response_Util.get_base_response(include_arguments=False) return REST_API_Response_Util.get_403_error_response(base_response=base_response, output=warning_message) return func(arg) return secure_check # Function used to validate the JWT Token
Example 15
Project: svviz Author: svviz File: remap.py License: MIT License | 5 votes |
def do_realign(dataHub, sample): processes = dataHub.args.processes if processes is None or processes == 0: # we don't really gain from using virtual cores, so try to figure out how many physical # cores we have processes = misc.cpu_count_physical() variant = dataHub.variant reads = sample.reads name = "{}:{{}}".format(sample.name[:15]) t0 = time.time() refalignments, badReadsRef = do1remap(variant.chromParts("ref"), reads, processes, jobName=name.format("ref"), tryExact=dataHub.args.fast) altalignments, badReadsAlt = do1remap(variant.chromParts("alt"), reads, processes, jobName=name.format("alt"), tryExact=dataHub.args.fast) t1 = time.time() logging.debug(" Time to realign: {:.1f}s".format(t1-t0)) badReads = badReadsRef.union(badReadsAlt) if len(badReads) > 0: logging.warn(" Alignment failed with {} reads (this is a known issue)".format(badReads)) for badRead in badReads: refalignments.pop(badRead, None) altalignments.pop(badRead, None) assert set(refalignments.keys()) == set(altalignments.keys()), \ set(refalignments.keys()) ^ set(altalignments.keys()) alnCollections = [] for key in refalignments: alnCollection = AlignmentSetCollection(key) alnCollection.addSet(refalignments[key], "ref") alnCollection.addSet(altalignments[key], "alt") alnCollections.append(alnCollection) return alnCollections
Example 16
Project: svviz Author: svviz File: remap.py License: MIT License | 5 votes |
def getReads(variant, bam, minmapq, pair_minmapq, searchDistance, single_ended=False, include_supplementary=False, max_reads=None, sample_reads=None): t0 = time.time() searchRegions = variant.searchRegions(searchDistance) # This cludge tries the chromosomes as given ('chr4' or '4') and if that doesn't work # tries to switch to the other variation ('4' or 'chr4') try: reads, supplementaryAlignmentsFound = _getreads(searchRegions, bam, minmapq, pair_minmapq, single_ended, include_supplementary, max_reads, sample_reads) except ValueError as e: oldchrom = searchRegions[0].chr() try: if "chr" in oldchrom: newchrom = oldchrom.replace("chr", "") searchRegions = [Locus(l.chr().replace("chr", ""), l.start(), l.end(), l.strand()) for l in searchRegions] else: newchrom = "chr{}".format(oldchrom) searchRegions = [Locus("chr{}".format(l.chr()), l.start(), l.end(), l.strand()) for l in searchRegions] logging.warn(" Couldn't find reads on chromosome '{}'; trying instead '{}'".format(oldchrom, newchrom)) reads, supplementaryAlignmentsFound = _getreads(searchRegions, bam, minmapq, pair_minmapq, single_ended, include_supplementary, max_reads, sample_reads) except ValueError: raise e t1 = time.time() if supplementaryAlignmentsFound: logging.warn(" ** Supplementary alignments found: these alignments (with sam flag 0x800) **\n" " ** are poorly documented among mapping software and may result in missing **\n" " ** portions of reads; consider using the --include-supplementary **\n" " ** command line argument if you think this is happening **") logging.debug(" time to find reads and mates:{:.1f}s".format(t1 - t0)) logging.info(" number of reads found: {}".format(len(reads))) return reads
Example 17
Project: svviz Author: svviz File: app.py License: MIT License | 5 votes |
def loadISDs(dataHub): """ Load the Insert Size Distributions """ for sample in dataHub: logging.info(" > {} <".format(sample.name)) sample.readStatistics = insertsizes.ReadStatistics(sample.bam, keepReads=dataHub.args.save_reads) if sample.readStatistics.orientations != "any": if len(sample.readStatistics.orientations) > 1: logging.warn(" ! multiple read pair orientations found within factor !\n" " ! of 2x of one another; if you aren't expecting your !\n" " ! input data to contain multiple orientations, this !\n" " ! could be a bug in the mapping software or svviz !") if len(sample.readStatistics.orientations) < 1: logging.error(" No valid read orientations found for dataset:{}".format(sample.name)) sample.orientations = sample.readStatistics.orientations if sample.orientations == "any": sample.singleEnded = True logging.info(" valid orientations: {}".format(",".join(sample.orientations) if sample.orientations!="any" else "any")) if sample.orientations == "any": searchDist = sample.readStatistics.readLengthUpperQuantile() alignDist = sample.readStatistics.readLengthUpperQuantile()*1.25 + dataHub.args.context else: searchDist = sample.readStatistics.meanInsertSize()+sample.readStatistics.stddevInsertSize()*2 alignDist = sample.readStatistics.meanInsertSize()+sample.readStatistics.stddevInsertSize()*4 + dataHub.args.context if dataHub.args.flanks: searchDist += dataHub.args.context sample.searchDistance = int(searchDist) dataHub.alignDistance = max(dataHub.alignDistance, int(alignDist)) logging.info(" Using search distance: {}".format(sample.searchDistance)) logging.info(" Using align distance: {}".format(dataHub.alignDistance))
Example 18
Project: svviz Author: svviz File: app.py License: MIT License | 5 votes |
def loadReads(dataHub): readCount = 0 readLength = 0 maxReads = dataHub.args.max_reads sampleReads = dataHub.args.sample_reads for sample in dataHub: logging.info(" - {}".format(sample.name)) sample.reads = remap.getReads(dataHub.variant, sample.bam, dataHub.args.min_mapq, dataHub.args.pair_min_mapq, sample.searchDistance, sample.singleEnded, dataHub.args.include_supplementary, maxReads, sampleReads) readCount += len(sample.reads) readLength += sum(len(read.seq) for read in sample.reads) if maxReads is not None: maxReads -= readCount logging.info(" Found {:,} reads across {} samples for a total of {:,} nt".format(readCount, len(dataHub.samples), readLength)) if readLength > 2.5e6 or (dataHub.args.aln_quality is not None and readLength > 5e5): if not dataHub.args.skip_cigar and (dataHub.args.export or not dataHub.args.no_web): logging.warn("==== Based on the number reads (sequence nucleotides) found relevant =====\n" "==== to the current variant, performance for the web browser and =====\n" "==== export may be poor; using the --skip-cigar option is =====\n" "==== recommended to reduce the number of shapes being drawn =====") return readCount, readLength
Example 19
Project: svviz Author: svviz File: variants.py License: MIT License | 5 votes |
def commonSegments(self): """ return the segment IDs of the segments that are identical between the ref and alt alleles (eg, flanking regions) """ common = [] refCounter = collections.Counter((segment.id for segment in self._segments("ref"))) altCounter = collections.Counter((segment.id for segment in self._segments("alt"))) if max(refCounter.values()) > 1 or max(altCounter.values()) > 1: logging.warn(" Same genomic region repeated multiple times within one allele; " "all flanking reads will be marked as ambiguous") return [] refSegments = dict((segment.id, segment) for segment in self._segments("ref")) altSegments = dict((segment.id, segment) for segment in self._segments("alt")) for segmentID, refSegment in refSegments.items(): if not segmentID in altSegments: continue altSegment = altSegments[segmentID] # Could remove the requirement to have the strand be the same # allowing the reads within the inversion to be plotted too if refSegment.chrom==altSegment.chrom and \ refSegment.start == altSegment.start and \ refSegment.end == altSegment.end and \ refSegment.strand == altSegment.strand and \ refSegment.source == altSegment.source: common.append(segmentID) return common
Example 20
Project: svviz Author: svviz File: pairfinder.py License: MIT License | 5 votes |
def loadRegion(self, chrom, start, end, mates=False): count = self.sam.count(chrom, start, end) reads = self.sam.fetch(chrom, start, end) if count > 1e5: if mates: logging.warn(" LOTS OF READS IN MATE-PAIR REGION: {}:{}-{} count={:,}".format(chrom, start, end, count)) else: logging.warn(" LOTS OF READS IN BREAKPOINT REGION: {}:{}-{} count={:,}".format(chrom, start, end, count)) # goodReads = [] for i, read in enumerate(reads): if i%1000000 == 0 and count > 5e6: logging.debug(" > {} of {}".format(i, count)) if read.mapq >= self.minmapq and not read.is_secondary and not read.is_duplicate: if (read.flag & 0x800) != 0 and not self.include_supplementary: self.supplementaryAlignmentsFound = True continue if not mates and read.mapq < self.pair_minmapq: continue yield read # self.readsByID[read.qname].add(read) # goodReads.append(read) # if not mates and self.maxReads and len(goodReads) > self.maxReads: # return goodReads # return goodReads
Example 21
Project: dynamic-training-with-apache-mxnet-on-aws Author: awslabs File: base.py License: Apache License 2.0 | 5 votes |
def load_params(self, name="", dir_path="", epoch=None): params, aux_states, param_loading_path = load_params(dir_path=dir_path, epoch=epoch, name=name) logging.info('Loading params from \"%s\" to %s' % (param_loading_path, self.name)) for k, v in params.items(): if k in self.params: logging.debug(' Loading %s %s' %(k, str(v.shape))) self.params[k][:] = v else: logging.warn("Found unused param in the saved model file: %s" % k) for k, v in aux_states.items(): self.aux_states[k][:] = v
Example 22
Project: dynamic-training-with-apache-mxnet-on-aws Author: awslabs File: OpWrapperGenerator.py License: Apache License 2.0 | 5 votes |
def __init__(self, typeName = 'ElementWiseOpType', \ typeString = "{'avg', 'max', 'sum'}"): self.name = typeName if (typeString[0] == '{'): # is a enum type isEnum = True # parse enum self.enumValues = typeString[typeString.find('{') + 1:typeString.find('}')].split(',') for i in range(0, len(self.enumValues)): self.enumValues[i] = self.enumValues[i].strip().strip("'") else: logging.warn("trying to parse none-enum type as enum: %s" % typeString)
Example 23
Project: dynamic-training-with-apache-mxnet-on-aws Author: awslabs File: test_tvm_bridge.py License: Apache License 2.0 | 5 votes |
def test_tvm_bridge(): # only enable test if TVM is available try: import tvm import tvm.contrib.mxnet import topi except ImportError: logging.warn("TVM bridge test skipped because TVM is missing...") return def check(target, dtype): shape = (20,) scale = tvm.var("scale", dtype="float32") x = tvm.placeholder(shape, dtype=dtype) y = tvm.placeholder(shape, dtype=dtype) z = tvm.compute(shape, lambda i: x[i] + y[i]) zz = tvm.compute(shape, lambda *i: z(*i) * scale.astype(dtype)) ctx = mx.gpu(0) if target == "cuda" else mx.cpu(0) target = tvm.target.create(target) # build the function with target: s = topi.generic.schedule_injective(zz) f = tvm.build(s, [x, y, zz, scale]) # get a mxnet version mxf = tvm.contrib.mxnet.to_mxnet_func(f, const_loc=[0, 1]) xx = mx.nd.uniform(shape=shape, ctx=ctx).astype(dtype) yy = mx.nd.uniform(shape=shape, ctx=ctx).astype(dtype) zz = mx.nd.empty(shape=shape, ctx=ctx).astype(dtype) # invoke myf: this runs in mxnet engine mxf(xx, yy, zz, 10.0) np.testing.assert_allclose( zz.asnumpy(), (xx.asnumpy() + yy.asnumpy()) * 10) for tgt in ["llvm", "cuda"]: for dtype in ["int8", "uint8", "int64", "float32", "float64"]: check(tgt, dtype)
Example 24
Project: DOTA_models Author: ringringyi File: swiftshader_renderer.py License: Apache License 2.0 | 5 votes |
def __init__(self, obj_file, material_file=None, load_materials=True, name_prefix='', name_suffix=''): if material_file is not None: logging.error('Ignoring material file input, reading them off obj file.') load_flags = self.get_pyassimp_load_options() scene = assimp.load(obj_file, processing=load_flags) filter_ind = self._filter_triangles(scene.meshes) self.meshes = [scene.meshes[i] for i in filter_ind] for m in self.meshes: m.name = name_prefix + m.name + name_suffix dir_name = os.path.dirname(obj_file) # Load materials materials = None if load_materials: materials = [] for m in self.meshes: file_name = os.path.join(dir_name, m.material.properties[('file', 1)]) assert(os.path.exists(file_name)), \ 'Texture file {:s} foes not exist.'.format(file_name) img_rgb = cv2.imread(file_name)[::-1,:,::-1] if img_rgb.shape[0] != img_rgb.shape[1]: logging.warn('Texture image not square.') sz = np.maximum(img_rgb.shape[0], img_rgb.shape[1]) sz = int(np.power(2., np.ceil(np.log2(sz)))) img_rgb = cv2.resize(img_rgb, (sz,sz), interpolation=cv2.INTER_LINEAR) else: sz = img_rgb.shape[0] sz_ = int(np.power(2., np.ceil(np.log2(sz)))) if sz != sz_: logging.warn('Texture image not square of power of 2 size. ' + 'Changing size from %d to %d.', sz, sz_) sz = sz_ img_rgb = cv2.resize(img_rgb, (sz,sz), interpolation=cv2.INTER_LINEAR) materials.append(img_rgb) self.scene = scene self.materials = materials
Example 25
Project: DOTA_models Author: ringringyi File: object_detection_evaluation.py License: Apache License 2.0 | 5 votes |
def add_single_ground_truth_image_info(self, image_key, groundtruth_boxes, groundtruth_class_labels, groundtruth_is_difficult_list=None): """Add ground truth info of a single image into the evaluation database. Args: image_key: sha256 key of image content groundtruth_boxes: A numpy array of shape [M, 4] representing object box coordinates[y_min, x_min, y_max, x_max] groundtruth_class_labels: A 1-d numpy array of length M representing class labels groundtruth_is_difficult_list: A length M numpy boolean array denoting whether a ground truth box is a difficult instance or not. To support the case that no boxes are difficult, it is by default set as None. """ if image_key in self.groundtruth_boxes: logging.warn( 'image %s has already been added to the ground truth database.', image_key) return self.groundtruth_boxes[image_key] = groundtruth_boxes self.groundtruth_class_labels[image_key] = groundtruth_class_labels if groundtruth_is_difficult_list is None: num_boxes = groundtruth_boxes.shape[0] groundtruth_is_difficult_list = np.zeros(num_boxes, dtype=bool) self.groundtruth_is_difficult_list[ image_key] = groundtruth_is_difficult_list.astype(dtype=bool) self._update_ground_truth_statistics(groundtruth_class_labels, groundtruth_is_difficult_list)
Example 26
Project: DOTA_models Author: ringringyi File: object_detection_evaluation.py License: Apache License 2.0 | 5 votes |
def evaluate(self): """Compute evaluation result. Returns: average_precision_per_class: float numpy array of average precision for each class. mean_ap: mean average precision of all classes, float scalar precisions_per_class: List of precisions, each precision is a float numpy array recalls_per_class: List of recalls, each recall is a float numpy array corloc_per_class: numpy float array mean_corloc: Mean CorLoc score for each class, float scalar """ if (self.num_gt_instances_per_class == 0).any(): logging.warn( 'The following classes have no ground truth examples: %s', np.squeeze(np.argwhere(self.num_gt_instances_per_class == 0))) for class_index in range(self.num_class): if self.num_gt_instances_per_class[class_index] == 0: continue scores = np.concatenate(self.scores_per_class[class_index]) tp_fp_labels = np.concatenate(self.tp_fp_labels_per_class[class_index]) precision, recall = metrics.compute_precision_recall( scores, tp_fp_labels, self.num_gt_instances_per_class[class_index]) self.precisions_per_class.append(precision) self.recalls_per_class.append(recall) average_precision = metrics.compute_average_precision(precision, recall) self.average_precision_per_class[class_index] = average_precision self.corloc_per_class = metrics.compute_cor_loc( self.num_gt_imgs_per_class, self.num_images_correctly_detected_per_class) mean_ap = np.nanmean(self.average_precision_per_class) mean_corloc = np.nanmean(self.corloc_per_class) return (self.average_precision_per_class, mean_ap, self.precisions_per_class, self.recalls_per_class, self.corloc_per_class, mean_corloc)
Example 27
Project: End-to-end-ASR-Pytorch Author: Alexander-H-Liu File: generate_vocab_file.py License: MIT License | 5 votes |
def main(args): if args.mode == "subword": logging.warn("Subword model is based on `sentencepiece`.") import sentencepiece as splib cmd = ("--input={} --model_prefix={} --model_type=bpe " "--vocab_size={} --character_coverage={} " "--pad_id=0 --eos_id=1 --unk_id=2 --bos_id=-1 " "--eos_piece=<eos> --remove_extra_whitespaces=true".format( args.input_file, args.output_file, args.vocab_size, args.character_coverage)) splib.SentencePieceTrainer.Train(cmd) else: with open(args.input_file, "r") as f: lines = [line.strip("\r\n ") for line in f] counter = Counter() if args.mode == "word": for line in lines: counter.update(line.split()) # In word mode, vocab_list is sorted by frequency # Only selected top `vocab_size` vocabularies vocab_list = sorted( counter.keys(), key=lambda k: counter[k], reverse=True)[:args.vocab_size] elif args.mode == "character": for line in lines: counter.update(line) # In character mode, vocab_list is sorted in alphabetical order vocab_list = sorted(counter) logging.info("Collected totally {} vocabularies.".format(len(counter))) logging.info("Selected {} vocabularies.".format(len(vocab_list))) with open(args.output_file, "w") as f: f.write("\n".join(vocab_list))
Example 28
Project: object_detector_app Author: datitran File: object_detection_evaluation.py License: MIT License | 5 votes |
def add_single_ground_truth_image_info(self, image_key, groundtruth_boxes, groundtruth_class_labels, groundtruth_is_difficult_list=None): """Add ground truth info of a single image into the evaluation database. Args: image_key: sha256 key of image content groundtruth_boxes: A numpy array of shape [M, 4] representing object box coordinates[y_min, x_min, y_max, x_max] groundtruth_class_labels: A 1-d numpy array of length M representing class labels groundtruth_is_difficult_list: A length M numpy boolean array denoting whether a ground truth box is a difficult instance or not. To support the case that no boxes are difficult, it is by default set as None. """ if image_key in self.groundtruth_boxes: logging.warn( 'image %s has already been added to the ground truth database.', image_key) return self.groundtruth_boxes[image_key] = groundtruth_boxes self.groundtruth_class_labels[image_key] = groundtruth_class_labels if groundtruth_is_difficult_list is None: num_boxes = groundtruth_boxes.shape[0] groundtruth_is_difficult_list = np.zeros(num_boxes, dtype=bool) self.groundtruth_is_difficult_list[ image_key] = groundtruth_is_difficult_list.astype(dtype=bool) self._update_ground_truth_statistics(groundtruth_class_labels, groundtruth_is_difficult_list)
Example 29
Project: object_detector_app Author: datitran File: object_detection_evaluation.py License: MIT License | 5 votes |
def evaluate(self): """Compute evaluation result. Returns: average_precision_per_class: float numpy array of average precision for each class. mean_ap: mean average precision of all classes, float scalar precisions_per_class: List of precisions, each precision is a float numpy array recalls_per_class: List of recalls, each recall is a float numpy array corloc_per_class: numpy float array mean_corloc: Mean CorLoc score for each class, float scalar """ if (self.num_gt_instances_per_class == 0).any(): logging.warn( 'The following classes have no ground truth examples: %s', np.squeeze(np.argwhere(self.num_gt_instances_per_class == 0))) for class_index in range(self.num_class): if self.num_gt_instances_per_class[class_index] == 0: continue scores = np.concatenate(self.scores_per_class[class_index]) tp_fp_labels = np.concatenate(self.tp_fp_labels_per_class[class_index]) precision, recall = metrics.compute_precision_recall( scores, tp_fp_labels, self.num_gt_instances_per_class[class_index]) self.precisions_per_class.append(precision) self.recalls_per_class.append(recall) average_precision = metrics.compute_average_precision(precision, recall) self.average_precision_per_class[class_index] = average_precision self.corloc_per_class = metrics.compute_cor_loc( self.num_gt_imgs_per_class, self.num_images_correctly_detected_per_class) mean_ap = np.nanmean(self.average_precision_per_class) mean_corloc = np.nanmean(self.corloc_per_class) return (self.average_precision_per_class, mean_ap, self.precisions_per_class, self.recalls_per_class, self.corloc_per_class, mean_corloc)
Example 30
Project: pySocialWatcher Author: maraujo File: utils.py License: MIT License | 5 votes |
def get_fake_response(): response = requests.models.Response() response._content = constants.FAKE_DATA_RESPONSE_CONTENT response.status_code = 200 logging.warn("Fake Response created: " + response.content) return response