fix issue: depth occasionally has negative numbers

This commit is contained in:
TJU_Lu 2024-12-23 11:05:45 +08:00
parent 43cf7bea6d
commit 09e832c829
6 changed files with 48 additions and 45 deletions

View File

@ -1,10 +1,10 @@
quadrotor_env:
collect_data: yes # yes in Data Collection (random init state); no in Imitation Learning and Testing
collect_data: yes # yes in Data Collection (random init state); no in Imitation Learning and Testing
bounding_box: [60.0, 60.0, 2.0] # spawn quadrotor within this bounding box
bounding_box_origin: [-10, 20, 2.5]
sim_dt: 0.1 # sim_dt in imitation learning and testing
sim_dt: 0.1 # sim_dt in imitation learning and testing
data_collection:
data_collection: # range of random attitude in data collection
roll_var: 0.01
pitch_var: 0.01

View File

@ -1,14 +1,14 @@
main_loop_freq: 30
unity_render: yes
scene_id: 4 # 0 wasteland, 1 japanese street, 4 emptyforest in standalone (SR)
odom_topic: /juliett/ground_truth/odom
scene_id: 4 # 0 wasteland, 1 japanese street, 4 empty forest in standalone (SR)
odom_topic: /juliett/ground_truth/odom # Topic of UAV odometry for render
quad_size: 0.1
ply_path: "/flightrender/RPG_Flightmare/pointcloud_data/"
ply_path: "/flightrender/RPG_Flightmare/pointcloud_data/" # temporary path of saving point could (for visualization)
rgb_camera_left:
on: yes
t_BC: [0.0, 0.0, 0.1] # translational vector of the camera with repect to the body frame
r_BC: [0.0, -5.0, -90] # rotational angle (roll, pitch, yaw) of the camera in degree.
t_BC: [0.0, 0.0, 0.1] # translational vector of the camera with respect to the body frame
r_BC: [0.0, -5.0, -90] # rotational angle (roll, pitch, yaw) of the camera in degree
width: 160
height: 90
fov: 90.0 # Horizontal FOV
@ -16,7 +16,7 @@ rgb_camera_left:
enable_segmentation: no
enable_opticalflow: no
# enable stereo depth when rgb_camera_right on (If use, please use larger resolution (e.g., 640x360)).
# enable stereo depth when rgb_camera_right on (If used, please use larger resolution (e.g., 640x360))
rgb_camera_right:
on: no
t_BC: [0.0, -0.2, 0.1] # translational vector of the camera with repect to the body frame
@ -25,7 +25,7 @@ rgb_camera_right:
unity:
spawn_trees: true
save_pointcloud: true # set to 'false' to save startup time, if visualization is not needed.
avg_tree_spacing: 4.0
avg_tree_spacing: 4.0 # average spacing between trees
bounding_box: [80.0, 80.0, 11.0] # spawn objects within this bounding box
bounding_box_origin: [-10, 20, 2.5]
pointcloud_resolution: 0.2

View File

@ -1,39 +1,40 @@
vel_max: 6.0
# Maximum Speed: applicable when test speed not much higher than training
vel_max: 6.0 # the given weights perform smoothly at speeds between 0 - 6 m/s
# segment_time = 2 * radio / vel_max
# IMPORTANT PARAM: weight of penalties (6m/s)
ws: 0.00004
wc: 0.001
wl: 0.00
wg: 0.0002
# IMPORTANT TRAINING PARAM: weight of penalties (6m/s)
ws: 0.00004 # smoothness cost (reduce ws when increase vel_max in training)
wc: 0.001 # collision cost
wg: 0.0002 # goal cost
wl: 0.00 # trajectory length cost
#ws: 0.00004
#wc: 0.001
#wl: 0.02
#wg: 0.0001
# trajectory and primitive parma
horizon_num: 5
vertical_num: 3
horizon_camera_fov: 90.0
vertical_camera_fov: 60.0
horizon_anchor_fov: 30
vertical_anchor_fov: 30
goal_length: 10
radio_range: 4.0 # planning horizon: 2 * radio_range
vel_fov: 90.0 # not use currently
radio_num: 1 # 1 just ok
vel_num: 1 # 1 just ok
vel_prefile: 0.0 # 0 just ok
# trajectory and primitive parameters
horizon_num: 5 # grids num in horizon
vertical_num: 3 # grids num in vertical
horizon_camera_fov: 90.0 # horizon camera fov
vertical_camera_fov: 60.0 # vertical camera fov
horizon_anchor_fov: 30 # horizon fov of each gird
vertical_anchor_fov: 30 # vertical fov of each grid
goal_length: 10 # used for standardization of goal penalties (should >= 2 * radio_range)
radio_range: 4.0 # planning horizon: 2 * radio_range
vel_fov: 90.0 # not use currently
radio_num: 1 # 1 just ok (deprecated)
vel_num: 1 # 1 just ok (deprecated)
vel_prefile: 0.0 # 0 just ok (deprecated)
# For data efficiency, we randomly sample multiple vel and acc for each depth image with the following the distribution.
# For data efficiency, we randomly sample multiple vel and acc for each depth image with the following the distribution
# values at normalized speed (actual speed can be denormalized by multiplying v_multiple)
# 单位数据倍数: v_multiple = 0.5 * v_max = radio / time
# v数据的均值 v_mean = v_multiple * v_mean_unit
# v数据的方差 v_var = v_multiple^2 * v_var_unit
# a数据的均值 v_mean = v_multiple^2 * a_mean_unit
# a数据的方差 v_var = v_multiple^4 * a_var_unit
vx_mean_unit: 1.5
vx_mean_unit: 1.5 # vel_x: skewed distribution
vy_mean_unit: 0.0
vz_mean_unit: 0.0
vx_var_unit: 0.15
@ -46,19 +47,19 @@ ax_var_unit: 0.0278
ay_var_unit: 0.05
az_var_unit: 0.0278
# penalties
# collision penalties
alpha: 10.0
d0: 1.2
r: 0.6
# vel penalties (deprecated)
alphav: 2.0
v0: 3.5
rv: 1.5
# acc penalties (deprecated)
alphaa: 2.0
a0: 3.5
ra: 1.5
# deprecated weight
# vel and acc weight (deprecated)
wv: 0.0
wa: 0.0

View File

@ -1,14 +1,14 @@
env:
seed: 1
scene_id: 4 # 0 wasteland, 1 japanese street, 4 emptyforest in SR standalone
num_envs: 16 # Important: same to batch size!
scene_id: 4 # 0 wasteland, 1 japanese street, 4 emptyforest in SR standalone
num_envs: 16 # Important: same to batch size!
num_threads: 16
dataset_path: "/run/yopo_sim/"
ply_path: "/run/yopo_sim/"
dataset_path: "/run/yopo_sim/" # path of training dataset
ply_path: "/run/yopo_sim/" # path of the point could of training data
unity:
spawn_trees: true
avg_tree_spacing: 4
avg_tree_spacing: 4 # average spacing between trees
# larger than the position to generate the drone to ensure the completeness of the point cloud in edge.
bounding_box: [80.0, 80.0, 11] # spawn objects within this bounding box
bounding_box_origin: [-10, 20, 2.5]

View File

@ -279,7 +279,7 @@ bool UnityBridge::handleOutput(FrameID& frameID) {
// 2、附加开启的图-------------------------------------------
for (size_t layer_idx = 0; layer_idx < cam.post_processing.size(); layer_idx++) {
if (cam.post_processing[layer_idx] == RGBCameraTypes::Depth) {
// depth
// depth (float32存在4个uint8)
uint32_t image_len = cam.width * cam.height * 4;
// Get raw image bytes from ZMQ message.
// WARNING: This is a zero-copy operation that also casts the input to an array of unit8_t. when the message is deleted, this
@ -291,9 +291,9 @@ bool UnityBridge::handleOutput(FrameID& frameID) {
cv::Mat new_image = cv::Mat(cam.height, cam.width, CV_32FC1);
memcpy(new_image.data, image_data, image_len);
// Flip image since OpenCV origin is upper left, but Unity's is lower left.
new_image = new_image * (1.f);
new_image = new_image * (1.f); // 默认单位km
cv::flip(new_image, new_image, 0);
new_image = cv::max(new_image, 0.0f); // 有时候返回的深度值有负数
unity_quadrotors_[idx]->getCameras()[cam.output_index]->feedImageQueue(CameraLayer::DepthMap, new_image);
}
}

View File

@ -100,9 +100,11 @@ if __name__ == '__main__':
start = time.time()
for epoch in range(1):
last = time.time()
for i, (depth, pos, quat, obs, id) in enumerate(data_loader):
pass
max_val = depth.max()
min_val = depth.min()
if max_val > 1 or min_val < 0:
print(f"Image {i} has values out of range: max={max_val}, min={min_val}")
end = time.time()
print("总耗时:", end - start)