首页 > 编程知识 正文

@equalsandhashcode注解的作用,equalsandhashcode 作用

时间:2023-05-05 11:47:08 阅读:19184 作者:3668

传送门:相关OHEM介绍:改进检测模型—OHEM和Focal-Loss算法总结代码地址: OHEM 1.前言关于OHEM的介绍请参考上面提供的链接这里主要对OHEM是如何工作的进行了简单的分析,整个OHEM的代码也不多,所以这里把算法

1 )计算检测仪的损耗。 该部分使用与最后的fc6、fc7预测头相同的共享参数,预测分类和边界框回归的结果,将预测的结果与GT进行比较,得到分类和边界框回归的loss。 这里的损失是把两种损失加起来的。

2 )使用阈值为0.7的NMS预处理检测框一次,去除无效的检测框;

3 ) NMS后的检测框按loss大小顺序排列,选择一定数量(由两个数取最小决定)的边界框返回。

以下是OHEM在网络定义文件中的定义,便于以后查看相关代码时查找相应的条目。

layer { name : ' hard _ ROI _ mining ' type : ' python ' bottom 3360 ' cls _ prob _ readonly ' bottom 3360 ' box _ ' weights ' bottom 3360 ' bbox _ outside _ weights p : ' bbox _ targets _ hard ' top : ' bbox _ inside _ weights _ hargets 60 false propagate _ down : false propagate _ down 3360 false propagate _ down 3360 e : ' ROI _ datagate ram _ str : ' ' num _ classes ' 3360 ' #6}2. ohem代码简单整理2.1 ohemdatalayerclassohemdatalayer (caffe.layer ) : ' ' top ) : ' ' setuptheohemdatalayer.' ' # parsethelayerparameterstring, whichmustbevalidyamllayer _ params=YAML.load (self.param _ str _ ) self._ num _ classes=layer _ params [ ] 使用dict将Bottom的blob名称和索引转换为' cls_prob_readonly': 0, 与' Box_pred_关联的' labels ' :3 } if CFG.train.bbox _ reg : #有边界框的self._ name _ to _ bottom _ map [=4self._ name _ to _ bottom _ map=5self._ name _ to _ top _ map={ } #同样,top的blob名称也必须与索引相关联……#前方selectroisusingohem.useroistogetblobsandcopythemintothislayer ' stopblobvector.' ' cls _ prob=bottom [0].data # botbot . data rois=bottom [2].data labels=bottom [3] .获取dataifcfg.train.bbox _ reg 330.databbox _ inside _ weights

bbox_target = None bbox_inside_weights = None bbox_outside_weights = None flt_min = np.finfo(float).eps # 计算分类的损失 loss = [ -1 * np.log(max(x, flt_min)) for x in [cls_prob[i,label] for i, label in enumerate(labels)]] # 计算边界框回归的损失,并且将两个损失加起来 if cfg.TRAIN.BBOX_REG: # bounding-box regression loss # d := w * (b0 - b1) # smoothL1(x) = 0.5 * x^2 if |x| < 1 # |x| - 0.5 otherwise def smoothL1(x): if abs(x) < 1: return 0.5 * x * x else: return abs(x) - 0.5 bbox_loss = np.zeros(labels.shape[0]) # 边界框损失 for i in np.where(labels > 0 )[0]: indices = np.where(bbox_inside_weights[i,:] != 0)[0] bbox_loss[i] = sum(bbox_outside_weights[i,indices] * [smoothL1(x) for x in bbox_inside_weights[i,indices] * (bbox_pred[i,indices] - bbox_target[i,indices])]) loss += bbox_loss # 两者损失相加 # 筛选出损失比较大的返回 blobs = get_ohem_minibatch(loss, rois, labels, bbox_target, bbox_inside_weights, bbox_outside_weights)# 给top blob赋值 for blob_name, blob in blobs.iteritems(): top_ind = self._name_to_top_map[blob_name] # Reshape net's input blobs top[top_ind].reshape(*(blob.shape)) # Copy data into net's input blobs top[top_ind].data[...] = blob.astype(np.float32, copy=False) 2.2 get_ohem_minibatch # 获取OHEM训练的batchdef get_ohem_minibatch(loss, rois, labels, bbox_targets=None, bbox_inside_weights=None, bbox_outside_weights=None): """Given rois and their loss, construct a minibatch using OHEM.""" loss = np.array(loss)# 使用NMS过滤检测框 if cfg.TRAIN.OHEM_USE_NMS:# NMS thresh=0.7 # Do NMS using loss for de-dup and diversity keep_inds = [] nms_thresh = cfg.TRAIN.OHEM_NMS_THRESH # 0.7 source_img_ids = [roi[0] for roi in rois] # 0或1(背景与前景) for img_id in np.unique(source_img_ids): for label in np.unique(labels): sel_indx = np.where(np.logical_and(labels == label, source_img_ids == img_id))[0] if not len(sel_indx): continue boxes = np.concatenate((rois[sel_indx, 1:], loss[sel_indx][:,np.newaxis]), axis=1).astype(np.float32) keep_inds.extend(sel_indx[nms(boxes, nms_thresh)]) hard_keep_inds = select_hard_examples(loss[keep_inds]) # 按照损失排序选择样本 hard_inds = np.array(keep_inds)[hard_keep_inds] # 最后保留下来的困难样本索引 else: hard_inds = select_hard_examples(loss) blobs = {'rois_hard': rois[hard_inds, :].copy(), 'labels_hard': labels[hard_inds].copy()} if bbox_targets is not None: assert cfg.TRAIN.BBOX_REG blobs['bbox_targets_hard'] = bbox_targets[hard_inds, :].copy() blobs['bbox_inside_weights_hard'] = bbox_inside_weights[hard_inds, :].copy() blobs['bbox_outside_weights_hard'] = bbox_outside_weights[hard_inds, :].copy() return blobsdef select_hard_examples(loss): """Select hard rois.""" # Sort and select top hard examples. sorted_indices = np.argsort(loss)[::-1] hard_keep_inds = sorted_indices[0:np.minimum(len(loss), cfg.TRAIN.BATCH_SIZE)] # (explore more ways of selecting examples in this function; e.g., sampling) return hard_keep_inds

版权声明:该文观点仅代表作者本人。处理文章:请发送邮件至 三1五14八八95#扣扣.com 举报,一经查实,本站将立刻删除。