iT邦幫忙

0

MotionNet SourceCode 解讀[#2]-from_file_multisweep_bf_sample_data篇

  • 分享至 

  • xImage
  •  

from_file_multisweep_bf_sample_data()

def from_file_multisweep_bf_sample_data(cls,
                                            nusc: 'NuScenes',
                                            ref_sd_rec: Dict,
                                            nsweeps_back: int = 5,
                                            nsweeps_forward: int = 5,
                                            return_trans_matrix: bool = False,
                                            min_distance: float = 1.0):

red_sd_rec 此項為NuScenes光達Top的sensor sample

# Init
points = np.zeros((cls.nbr_dims(), 0))
all_pc = cls(points)
all_times = np.zeros((1, 0))

# Get reference pose and timestamp
ref_pose_rec = nusc.get('ego_pose', ref_sd_rec['ego_pose_token'])
ref_cs_rec = nusc.get('calibrated_sensor', ref_sd_rec['calibrated_sensor_token'])
ref_time = 1e-6 * ref_sd_rec['timestamp']

ref_pose_rec : {'token': 'fdddd75ee1d94f14a09991988dab8b3e', 'timestamp': 1533151603547590, 'rotation': [-0.968669701688471, -0.004043399262151301, -0.007666594265959211, 0.24820129589817977], 'translation': [600.1202137947669, 1647.490776275174, 0.0]}
ref_cs_rec :{'token': 'd051cafdd9fe4d999b413462364d44a0', 'sensor_token': 'dc8b396651c05aedbb9cdaae573bb567', 'translation': [0.985793, 0.0, 1.84019], 'rotation': [0.706749235646644, -0.015300993788500868, 0.01739745181256607, -0.7070846669051719], 'camera_intrinsic': []}
ref_time :1533151603.54759

# Homogeneous transform from ego car frame to reference frame  從car到reference的轉換
ref_from_car = transform_matrix(ref_cs_rec['translation'], Quaternion(ref_cs_rec['rotation']), inverse=True)

# Homogeneous transformation matrix from global to _current_ ego car frame 從global到car的轉換這邊inverse=True 是因為 原本的translation 跟 rotation 是從car到global的轉換
car_from_global = transform_matrix(ref_pose_rec['translation'], Quaternion(ref_pose_rec['rotation']),
                                   inverse=True)

# Aggregate current and previous sweeps.
current_sd_rec = ref_sd_rec    # "sample_data"
trans_matrix_list = list()
skip_frame = 0

ref_from_car : 4x4 quaternion matrix
ref_from_global : 4x4x quaternion matrix

for k in range(nsweeps_back):
    # Load up the pointcloud.
    current_pc = cls.from_file(osp.join(nusc.dataroot, current_sd_rec['filename']))

    # Get past pose.
    # 當下這個frame 的 ego pose 數值: token,timestamp, rotation, translation
    current_pose_rec = nusc.get('ego_pose', current_sd_rec['ego_pose_token'])
    # car到global的轉換矩陣
    global_from_car = transform_matrix(current_pose_rec['translation'],
                                       Quaternion(current_pose_rec['rotation']), inverse=False)

    
    # Homogeneous transformation matrix from sensor coordinate frame to ego car frame.
    # calibration 矩陣
    current_cs_rec = nusc.get('calibrated_sensor', current_sd_rec['calibrated_sensor_token'])
    # 從當下的frame 轉換到car的frame
    car_from_current = transform_matrix(current_cs_rec['translation'], Quaternion(current_cs_rec['rotation']),inverse=False)
    

    # Fuse four transformation matrices into one and perform transform.
    # reduce 在給定的運算式中對給定的數列進行迭帶運算 ex reduce[1,2,3,4,5] = (((((1+2)+3)+4)+5)
    # 求出ref_from_current 從遍歷到的lidar sweep轉到當下時間的frame
    # 並把current 的lidar frame 轉換到reference 的
    trans_matrix = reduce(np.dot, [ref_from_car, car_from_global, global_from_car, car_from_current])
    current_pc.transform(trans_matrix)
    
    # 把當下的trans_matrix記錄下來
    # Collect the transformation matrix
    trans_matrix_list.append(trans_matrix)
    
    # Remove close points and add timevector.
    # 這邊的remove 是移除離自駕車一定範圍內的點 (min_distance是半徑)
    current_pc.remove_close(min_distance)
    #time lag 是current_frame 到 reference frame的時間差
    time_lag = ref_time - 1e-6 * current_sd_rec['timestamp']  # positive difference
    
    # 不太懂...
    if k % (skip_frame + 1) == 0:
        times = time_lag * np.ones((1, current_pc.nbr_points()))
    else:
        times = time_lag * np.ones((1, 1))  # dummy value
    all_times = np.hstack((all_times, times))
    
    # 把過去的frame的點雲疊加到reference frame上
    # Merge with key pc.
    if k % (skip_frame + 1) == 0:
        all_pc.points = np.hstack((all_pc.points, current_pc.points))
    else:
        tmp_points = np.zeros((4, 1), dtype=np.float32)
        all_pc.points = np.hstack((all_pc.points, tmp_points))  # dummy value

    if current_sd_rec['prev'] == '':  # Abort if there are no previous sweeps.
        break
    else:
        current_sd_rec = nusc.get('sample_data', current_sd_rec['prev'])

trans_matrix_list = np.stack(trans_matrix_list, axis=0)
# shape會變( N , 4, 4 )
# Aggregate the future sweeps

#reset 現在的frame 
current_sd_rec = ref_sd_rec

# Abort if there are no future sweeps. Return.
if current_sd_rec['next'] == '':
    return all_pc, np.squeeze(all_times, axis=0)
else:
    current_sd_rec = nusc.get('sample_data', current_sd_rec['next'])

for k in range(1, nsweeps_forward + 1):
    # Load up the pointcloud.
    current_pc = cls.from_file(osp.join(nusc.dataroot, current_sd_rec['filename']))

    # Get the pose for this future sweep.
    # 取得future sweep 的pose 還有取得 car到global的轉換 
    current_pose_rec = nusc.get('ego_pose', current_sd_rec['ego_pose_token'])
    global_from_car = transform_matrix(current_pose_rec['translation'],
                                       Quaternion(current_pose_rec['rotation']), inverse=False)
   
    # 取得 current 到 car的轉換 利用calibrated matrix
    # Homogeneous transformation matrix from sensor coordinate frame to ego car frame.
    current_cs_rec = nusc.get('calibrated_sensor', current_sd_rec['calibrated_sensor_token'])
    car_from_current = transform_matrix(current_cs_rec['translation'], Quaternion(current_cs_rec['rotation']),
                                        inverse=False)
    
    # Fuse four transformation matrices into one and perform transform.
    trans_matrix = reduce(np.dot, [ref_from_car, car_from_global, global_from_car, car_from_current])
    current_pc.transform(trans_matrix)

    # Remove close points and add timevector.
    current_pc.remove_close(min_distance)
    #時間差會是負的 因為是未來的frame
    time_lag = ref_time - 1e-6 * current_sd_rec['timestamp']  # negative difference

    if k % (skip_frame + 1) == 0:
        times = time_lag * np.ones((1, current_pc.nbr_points()))
    else:
        times = time_lag * np.ones((1, 1))  # dummy value
    all_times = np.hstack((all_times, times))

    # Merge with key pc.
    if k % (skip_frame + 1) == 0:
        all_pc.points = np.hstack((all_pc.points, current_pc.points))
    else:
        tmp_points = np.zeros((4, 1), dtype=np.float32)
        all_pc.points = np.hstack((all_pc.points, tmp_points))  # dummy value

    if current_sd_rec['next'] == '':  # Abort if there are no future sweeps.
        break
    else:
        current_sd_rec = nusc.get('sample_data', current_sd_rec['next'])

if return_trans_matrix:
    return all_pc, np.squeeze(all_times, 0), trans_matrix_list
else:
    return all_pc, np.squeeze(all_times, 0)

圖片
  直播研討會
圖片
{{ item.channelVendor }} {{ item.webinarstarted }} |
{{ formatDate(item.duration) }}
直播中

尚未有邦友留言

立即登入留言