diff --git a/teleop/teleop_hand_and_arm.py b/teleop/teleop_hand_and_arm.py index 5f0e9bb..e466018 100644 --- a/teleop/teleop_hand_and_arm.py +++ b/teleop/teleop_hand_and_arm.py @@ -534,27 +534,28 @@ if __name__ == '__main__': f"Robot L={robot_ref_left[:3,3]}, R={robot_ref_right[:3,3]}") prev_start = START + # Webcam: always send regardless of tracking state + if webcam_cap is not None: + if current_time - webcam_last_send >= webcam_send_interval: + ret, webcam_frame = webcam_cap.read() + if ret: + head_img = webcam_frame + tv_wrapper.render_to_xr(webcam_frame) + webcam_last_send = current_time + # Skip arm tracking while paused (buttons + reset still work) if not START: i_error_left = np.zeros(3) i_error_right = np.zeros(3) i_rot_error_left = np.zeros(3) i_rot_error_right = np.zeros(3) - current_time = time.time() time_elapsed = current_time - start_time sleep_time = max(0, (1 / args.frequency) - time_elapsed) time.sleep(sleep_time) continue - # get image (webcam: non-blocking, rate-limited to save bandwidth) - if webcam_cap is not None: - if current_time - webcam_last_send >= webcam_send_interval: - ret, webcam_frame = webcam_cap.read() - if ret: - head_img = webcam_frame - tv_wrapper.render_to_xr(webcam_frame) - webcam_last_send = current_time - else: + # get image (non-webcam sources, only during tracking) + if webcam_cap is None: if camera_config['head_camera']['enable_zmq']: if args.record or xr_need_local_img: head_img = img_client.get_head_frame()