# Copyright (c) OpenMMLab. All rights reserved.
import logging
import os
from argparse import ArgumentParserfrom mmengine.logging import print_logfrom mmdet3d.apis import LidarDet3DInferencerdef parse_args():parser = ArgumentParser()parser.add_argument('--pcd', default='/home/robotics/mmdetection3d/demo/data/nuscenes/n015-2018-07-24-11-22-45+0800__LIDAR_TOP__1532402927647951.pcd.bin', help='Point cloud file')parser.add_argument('--model', default='/home/robotics/mmdetection3d/configs/centerpoint/centerpoint_voxel0075_second_secfpn_head-circlenms_8xb4-cyclic-20e_nus-3d.py', help='Config file')parser.add_argument('--weights', default='/home/robotics/mmdetection3d/checkpoint/centerpoint_0075voxel_second_secfpn_circlenms_4x8_cyclic_20e_nus_20220810_011659-04cb3a3b.pth', help='Checkpoint file')parser.add_argument('--device', default='cuda:0', help='Device used for inference')parser.add_argument('--pred-score-thr',type=float,default=0.3,help='bbox score threshold')parser.add_argument('--out-dir',type=str,default='outputs',help='Output directory of prediction and visualization results.')parser.add_argument('--show',default=True,action='store_true',help='Show online visualization results')parser.add_argument('--wait-time',type=float,default=-1,help='The interval of show (s). Demo will be blocked in showing''results, if wait_time is -1. Defaults to -1.')parser.add_argument('--no-save-vis',action='store_true',help='Do not save detection visualization results')parser.add_argument('--no-save-pred',action='store_true',help='Do not save detection prediction results')parser.add_argument('--print-result',action='store_true',help='Whether to print the results.')call_args = vars(parser.parse_args())call_args['inputs'] = dict(points=call_args.pop('pcd'))if call_args['no_save_vis'] and call_args['no_save_pred']:call_args['out_dir'] = ''init_kws = ['model', 'weights', 'device']init_args = {}for init_kw in init_kws:init_args[init_kw] = call_args.pop(init_kw)# NOTE: If your operating environment does not have a display device,# (e.g. a remote server), you can save the predictions and visualize# them in local devices.if os.environ.get('DISPLAY') is None and call_args['show']:print_log('Display device not found. `--show` is forced to False',logger='current',level=logging.WARNING)call_args['show'] = Falsereturn init_args, call_argsdef main():# TODO: Support inference of point cloud numpy file.init_args, call_args = parse_args()inferencer = LidarDet3DInferencer(**init_args)inferencer(**call_args)if call_args['out_dir'] != '' and not (call_args['no_save_vis']and call_args['no_save_pred']):print_log(f'results have been saved at {call_args["out_dir"]}',logger='current')if __name__ == '__main__':main()
inputs = self.preprocess(ori_inputs, batch_size=batch_size, **preprocess_kwargs) 将 self.preprocess 这个类方法作为生成器,ori_inputs是一个list,储存的点云文件的路径,实现的功能是: 依次将 ori_inputs 中的元素作为输入参数,带入到self.preprocess函数中,通过 for data in inputs:pass 上面这样的for循环,来运行self.preprocess生成器,执行函数在self.preprocess函数中 map(self.pipeline, inputs) inputs是点云文件地址,与pipeline中的四个类: LidarDet3DInferencerLoader LoadPointsFromFile MultiScaleFlipAug3D Pack3DDetInputs 建立了一个映射关系,注意这里只是建立了映射关系,并没有运行任何函数,在下面的 yield from map(self.collate_fn, chunked_data) 中,函数才被调用def _get_chunk_data(self, inputs: Iterable, chunk_size: int): 函数中 processed_data = next(inputs_iter) 这一行调用了pipeline中的类方法pipeline首先load点云 load点云bin文件的部分在 mmdetection3d/mmdet3d/datasets/transforms/loading.py 的645行 在load点云之前,先生成了一个dict,这部分代码在 loading.py 1125行 line679 生成了一个 class LiDARPoints(BasePoints)类实例 第二步,loading.py中 412行 将原始雷达点云中的点 remove_close, 第三步,/home/robotics/mmdetection3d/mmdet3d/datasets/transforms/test_time_aug.py 69行进行 augment 第四步,/home/robotics/mmdetection3d/mmdet3d/datasets/transforms/transforms_3d.py 775行 rotate, scale and translate 第五步,/home/robotics/mmdetection3d/mmdet3d/datasets/transforms/formating.py 89行 Pack3DDetInputs
魔改版本的demo
# Copyright (c) OpenMMLab. All rights reserved.
import logging
import os
from argparse import ArgumentParserfrom mmengine.logging import print_logfrom mmdet3d.apis import LidarDet3DInferencerdef parse_args():parser = ArgumentParser()parser.add_argument('--pcd', default='/home/robotics/mmdetection3d/demo/data/nuscenes/n015-2018-07-24-11-22-45+0800__LIDAR_TOP__1532402927647951.pcd.bin', help='Point cloud file')parser.add_argument('--model', default='/home/robotics/mmdetection3d/configs/centerpoint/centerpoint_voxel0075_second_secfpn_head-circlenms_8xb4-cyclic-20e_nus-3d.py', help='Config file')parser.add_argument('--weights', default='/home/robotics/mmdetection3d/checkpoint/centerpoint_0075voxel_second_secfpn_circlenms_4x8_cyclic_20e_nus_20220810_011659-04cb3a3b.pth', help='Checkpoint file')parser.add_argument('--device', default='cuda:0', help='Device used for inference')parser.add_argument('--pred-score-thr',type=float,default=0.3,help='bbox score threshold')parser.add_argument('--out-dir',type=str,default='outputs',help='Output directory of prediction and visualization results.')parser.add_argument('--show',default=True,action='store_true',help='Show online visualization results')parser.add_argument('--wait-time',type=float,default=-1,help='The interval of show (s). Demo will be blocked in showing''results, if wait_time is -1. Defaults to -1.')parser.add_argument('--no-save-vis',action='store_true',help='Do not save detection visualization results')parser.add_argument('--no-save-pred',action='store_true',help='Do not save detection prediction results')parser.add_argument('--print-result',action='store_true',help='Whether to print the results.')call_args = vars(parser.parse_args())call_args['inputs'] = dict(points=call_args.pop('pcd'))if call_args['no_save_vis'] and call_args['no_save_pred']:call_args['out_dir'] = ''init_kws = ['model', 'weights', 'device']init_args = {}for init_kw in init_kws:init_args[init_kw] = call_args.pop(init_kw)# NOTE: If your operating environment does not have a display device,# (e.g. a remote server), you can save the predictions and visualize# them in local devices.if os.environ.get('DISPLAY') is None and call_args['show']:print_log('Display device not found. `--show` is forced to False',logger='current',level=logging.WARNING)call_args['show'] = Falsereturn init_args, call_argsdef main():# TODO: Support inference of point cloud numpy file.init_args, call_args = parse_args()inferencer = LidarDet3DInferencer(**init_args)kwargs = {'pred_score_thr': 0.3, 'out_dir': 'outputs', 'show': True, 'wait_time': -1, 'no_save_vis': False, 'no_save_pred': False, 'print_result': False}inputs = {'points': '/home/robotics/mmdetection3d/demo/data/nuscenes/n015-2018-07-24-11-22-45+0800__LIDAR_TOP__1532402927647951.pcd.bin'}batch_size = 1(preprocess_kwargs,forward_kwargs,visualize_kwargs,postprocess_kwargs,) = inferencer._dispatch_kwargs(**kwargs)cam_type = preprocess_kwargs.pop('cam_type', 'CAM2')ori_inputs = inferencer._inputs_to_list(inputs, cam_type=cam_type)preds = []data = {'points': '/home/robotics/mmdetection3d/demo/data/nuscenes/n015-2018-07-24-11-22-45+0800__LIDAR_TOP__1532402927647951.pcd.bin'}t0 = inferencer.pipeline.transforms[0]data = t0(data)t1 = inferencer.pipeline.transforms[1]data = t1(data)t2 = inferencer.pipeline.transforms[2]data = t2(data)t3 = inferencer.pipeline.transforms[3]data = t3(data)data['data_samples'] = [data['data_samples']]data['inputs']['points'] = [data['inputs']['points']]results_dict = {'predictions': [], 'visualization': []}preds.extend(inferencer.forward(data, **forward_kwargs))visualization = inferencer.visualize(ori_inputs, preds,**visualize_kwargs)results = inferencer.postprocess(preds, visualization,return_datasamples,**postprocess_kwargs)results_dict['predictions'].extend(results['predictions'])if results['visualization'] is not None:results_dict['visualization'].extend(results['visualization'])return results_dictif __name__ == '__main__':results_dict = main()print(results_dict)