|
44 | 44 | from official.legacy.detection.utils import class_utils
|
45 | 45 |
|
46 | 46 |
|
| 47 | +class OlnCOCOevalWrapper(cocoeval.COCOeval): |
| 48 | + """COCOeval wrapper class. |
| 49 | +
|
| 50 | + Rewritten based on cocoapi: (pycocotools/cocoeval.py) |
| 51 | +
|
| 52 | + This class wraps COCOEVAL API object, which provides the following additional |
| 53 | + functionalities: |
| 54 | + 1. summarze 'all', 'seen', and 'novel' split output print-out, e.g., AR at |
| 55 | + different K proposals, AR and AP resutls for 'seen' and 'novel' class |
| 56 | + splits. |
| 57 | + """ |
| 58 | + |
| 59 | + def __init__(self, coco_gt, coco_dt, iou_type='box'): |
| 60 | + super(OlnCOCOevalWrapper, self).__init__( |
| 61 | + cocoGt=coco_gt, cocoDt=coco_dt, iouType=iou_type) |
| 62 | + |
| 63 | + def summarize(self): |
| 64 | + """Compute and display summary metrics for evaluation results. |
| 65 | +
|
| 66 | + Delta to the standard cocoapi function: |
| 67 | + More Averate Recall metrics are produced with different top-K proposals. |
| 68 | + Note this functin can *only* be applied on the default parameter |
| 69 | + setting. |
| 70 | + Raises: |
| 71 | + Exception: Please run accumulate() first. |
| 72 | + """ |
| 73 | + |
| 74 | + def _summarize(ap=1, iou_thr=None, area_rng='all', max_dets=100): |
| 75 | + p = self.params |
| 76 | + i_str = (' {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = ' |
| 77 | + '{:0.3f}') |
| 78 | + title_str = 'Average Precision' if ap == 1 else 'Average Recall' |
| 79 | + type_str = '(AP)' if ap == 1 else '(AR)' |
| 80 | + iou_str = '{:0.2f}:{:0.2f}'.format( |
| 81 | + p.iouThrs[0], |
| 82 | + p.iouThrs[-1]) if iou_thr is None else '{:0.2f}'.format(iou_thr) |
| 83 | + |
| 84 | + aind = [i for i, a_rng in enumerate(p.areaRngLbl) if a_rng == area_rng] |
| 85 | + mind = [i for i, m_det in enumerate(p.maxDets) if m_det == max_dets] |
| 86 | + if ap == 1: |
| 87 | + # dimension of precision: [TxRxKxAxM] |
| 88 | + s = self.eval['precision'] |
| 89 | + # IoU |
| 90 | + if iou_thr is not None: |
| 91 | + t = np.where(iou_thr == p.iouThrs)[0] |
| 92 | + s = s[t] |
| 93 | + s = s[:, :, :, aind, mind] |
| 94 | + else: |
| 95 | + # dimension of recall: [TxKxAxM] |
| 96 | + s = self.eval['recall'] |
| 97 | + if iou_thr is not None: |
| 98 | + t = np.where(iou_thr == p.iouThrs)[0] |
| 99 | + s = s[t] |
| 100 | + s = s[:, :, aind, mind] |
| 101 | + |
| 102 | + if not (s[s > -1]).any(): |
| 103 | + mean_s = -1 |
| 104 | + else: |
| 105 | + mean_s = np.mean(s[s > -1]) |
| 106 | + print( |
| 107 | + i_str.format(title_str, type_str, iou_str, area_rng, max_dets, |
| 108 | + mean_s)) |
| 109 | + return mean_s |
| 110 | + |
| 111 | + def _summarize_dets(): |
| 112 | + stats = np.zeros((14,)) |
| 113 | + stats[0] = _summarize(1) |
| 114 | + stats[1] = _summarize( |
| 115 | + 1, |
| 116 | + iou_thr=.5, |
| 117 | + ) |
| 118 | + stats[2] = _summarize( |
| 119 | + 1, |
| 120 | + iou_thr=.75, |
| 121 | + ) |
| 122 | + stats[3] = _summarize( |
| 123 | + 1, |
| 124 | + area_rng='small', |
| 125 | + ) |
| 126 | + stats[4] = _summarize( |
| 127 | + 1, |
| 128 | + area_rng='medium', |
| 129 | + ) |
| 130 | + stats[5] = _summarize( |
| 131 | + 1, |
| 132 | + area_rng='large', |
| 133 | + ) |
| 134 | + |
| 135 | + stats[6] = _summarize(0, max_dets=self.params.maxDets[0]) # 10 |
| 136 | + stats[7] = _summarize(0, max_dets=self.params.maxDets[1]) # 20 |
| 137 | + stats[8] = _summarize(0, max_dets=self.params.maxDets[2]) # 50 |
| 138 | + stats[9] = _summarize(0, max_dets=self.params.maxDets[3]) # 100 |
| 139 | + stats[10] = _summarize(0, max_dets=self.params.maxDets[4]) # 200 |
| 140 | + |
| 141 | + stats[11] = _summarize(0, area_rng='small', max_dets=10) |
| 142 | + stats[12] = _summarize(0, area_rng='medium', max_dets=10) |
| 143 | + stats[13] = _summarize(0, area_rng='large', max_dets=10) |
| 144 | + return stats |
| 145 | + |
| 146 | + if not self.eval: |
| 147 | + raise Exception('Please run accumulate() first') |
| 148 | + summarize = _summarize_dets |
| 149 | + self.stats = summarize() |
| 150 | + |
| 151 | + |
| 152 | +class OlnCOCOevalXclassWrapper(OlnCOCOevalWrapper): |
| 153 | + """COCOeval wrapper class. |
| 154 | +
|
| 155 | + Rewritten based on cocoapi: (pycocotools/cocoeval.py) |
| 156 | + Delta to the standard cocoapi: |
| 157 | + Detections that hit the 'seen' class objects are ignored in top-K proposals. |
| 158 | +
|
| 159 | + This class wraps COCOEVAL API object, which provides the following additional |
| 160 | + functionalities: |
| 161 | + 1. Include ignore-class split (e.g., 'voc' or 'nonvoc'). |
| 162 | + 2. Do not count (or ignore) box proposals hitting ignore-class when |
| 163 | + evaluating Average Recall at top-K proposals. |
| 164 | + """ |
| 165 | + |
| 166 | + def __init__(self, coco_gt, coco_dt, iou_type='box'): |
| 167 | + super(OlnCOCOevalXclassWrapper, self).__init__( |
| 168 | + coco_gt=coco_gt, coco_dt=coco_dt, iou_type=iou_type) |
| 169 | + |
| 170 | + def evaluateImg(self, img_id, cat_id, a_rng, max_det): |
| 171 | + p = self.params |
| 172 | + if p.useCats: |
| 173 | + gt = self._gts[img_id, cat_id] |
| 174 | + dt = self._dts[img_id, cat_id] |
| 175 | + else: |
| 176 | + gt, dt = [], [] |
| 177 | + for c_id in p.catIds: |
| 178 | + gt.extend(self._gts[img_id, c_id]) |
| 179 | + dt.extend(self._dts[img_id, c_id]) |
| 180 | + |
| 181 | + if not gt and not dt: |
| 182 | + return None |
| 183 | + |
| 184 | + for g in gt: |
| 185 | + if g['ignore'] or (g['area'] < a_rng[0] or g['area'] > a_rng[1]): |
| 186 | + g['_ignore'] = 1 |
| 187 | + else: |
| 188 | + g['_ignore'] = 0 |
| 189 | + # Class manipulation: ignore the 'ignored_split'. |
| 190 | + if 'ignored_split' in g and g['ignored_split'] == 1: |
| 191 | + g['_ignore'] = 1 |
| 192 | + |
| 193 | + # sort dt highest score first, sort gt ignore last |
| 194 | + gtind = np.argsort([g['_ignore'] for g in gt], kind='mergesort') |
| 195 | + gt = [gt[i] for i in gtind] |
| 196 | + dtind = np.argsort([-d['score'] for d in dt], kind='mergesort') |
| 197 | + dt = [dt[i] for i in dtind[0:max_det]] |
| 198 | + iscrowd = [int(o['iscrowd']) for o in gt] |
| 199 | + # load computed ious |
| 200 | + # ious = self.ious[img_id, cat_id][:, gtind] if len( |
| 201 | + # self.ious[img_id, cat_id]) > 0 else self.ious[img_id, cat_id] |
| 202 | + if self.ious[img_id, cat_id].any(): |
| 203 | + ious = self.ious[img_id, cat_id][:, gtind] |
| 204 | + else: |
| 205 | + ious = self.ious[img_id, cat_id] |
| 206 | + |
| 207 | + tt = len(p.iouThrs) |
| 208 | + gg = len(gt) |
| 209 | + dd = len(dt) |
| 210 | + gtm = np.zeros((tt, gg)) |
| 211 | + dtm = np.zeros((tt, dd)) |
| 212 | + gt_ig = np.array([g['_ignore'] for g in gt]) |
| 213 | + dt_ig = np.zeros((tt, dd)) |
| 214 | + # indicator of whether the gt object class is of ignored_split or not. |
| 215 | + gt_ig_split = np.array([g['ignored_split'] for g in gt]) |
| 216 | + dt_ig_split = np.zeros((dd)) |
| 217 | + |
| 218 | + if ious.any(): |
| 219 | + for tind, t in enumerate(p.iouThrs): |
| 220 | + for dind, d in enumerate(dt): |
| 221 | + # information about best match so far (m=-1 -> unmatched) |
| 222 | + iou = min([t, 1 - 1e-10]) |
| 223 | + m = -1 |
| 224 | + for gind, g in enumerate(gt): |
| 225 | + # if this gt already matched, and not a crowd, continue |
| 226 | + if gtm[tind, gind] > 0 and not iscrowd[gind]: |
| 227 | + continue |
| 228 | + # if dt matched to reg gt, and on ignore gt, stop |
| 229 | + if m > -1 and gt_ig[m] == 0 and gt_ig[gind] == 1: |
| 230 | + break |
| 231 | + # continue to next gt unless better match made |
| 232 | + if ious[dind, gind] < iou: |
| 233 | + continue |
| 234 | + # if match successful and best so far, store appropriately |
| 235 | + iou = ious[dind, gind] |
| 236 | + m = gind |
| 237 | + # if match made store id of match for both dt and gt |
| 238 | + if m == -1: |
| 239 | + continue |
| 240 | + dt_ig[tind, dind] = gt_ig[m] |
| 241 | + dtm[tind, dind] = gt[m]['id'] |
| 242 | + gtm[tind, m] = d['id'] |
| 243 | + |
| 244 | + # Activate to ignore the seen-class detections. |
| 245 | + if tind == 0: # Register just only once: tind > 0 is also fine. |
| 246 | + dt_ig_split[dind] = gt_ig_split[m] |
| 247 | + |
| 248 | + # set unmatched detections outside of area range to ignore |
| 249 | + a = np.array([d['area'] < a_rng[0] or d['area'] > a_rng[1] for d in dt |
| 250 | + ]).reshape((1, len(dt))) |
| 251 | + dt_ig = np.logical_or(dt_ig, np.logical_and(dtm == 0, np.repeat(a, tt, 0))) |
| 252 | + |
| 253 | + # Activate to ignore the seen-class detections. |
| 254 | + # Take only eval_split (eg, nonvoc) and ignore seen_split (eg, voc). |
| 255 | + if dt_ig_split.sum() > 0: |
| 256 | + dtm = dtm[:, dt_ig_split == 0] |
| 257 | + dt_ig = dt_ig[:, dt_ig_split == 0] |
| 258 | + len_dt = min(max_det, len(dt)) |
| 259 | + dt = [dt[i] for i in range(len_dt) if dt_ig_split[i] == 0] |
| 260 | + |
| 261 | + # store results for given image and category |
| 262 | + return { |
| 263 | + 'image_id': img_id, |
| 264 | + 'category_id': cat_id, |
| 265 | + 'aRng': a_rng, |
| 266 | + 'maxDet': max_det, |
| 267 | + 'dtIds': [d['id'] for d in dt], |
| 268 | + 'gtIds': [g['id'] for g in gt], |
| 269 | + 'dtMatches': dtm, |
| 270 | + 'gtMatches': gtm, |
| 271 | + 'dtScores': [d['score'] for d in dt], |
| 272 | + 'gtIgnore': gt_ig, |
| 273 | + 'dtIgnore': dt_ig, |
| 274 | + } |
| 275 | + |
| 276 | + |
47 | 277 | class MetricWrapper(object):
|
48 | 278 | """Metric Wrapper of the COCO evaluator."""
|
49 | 279 | # This is only a wrapper for COCO metric and works on for numpy array. So it
|
|
0 commit comments