pub mod combinations;
pub mod database;
pub mod pattern;
pub mod solve;
pub mod track;
pub mod wcs_refine;
use rkyv::{Archive, Deserialize, Serialize};
use crate::camera_model::CameraModel;
use crate::distortion::Distortion;
use crate::rkyv_numeris::AsQuatArray;
use crate::{Quaternion, StarCatalog};
#[derive(Debug, Clone, Copy, PartialEq, Archive, Serialize, Deserialize)]
#[repr(C)]
pub struct PatternEntry {
pub star_indices: [u32; 4],
pub largest_edge: f32,
pub key_hash: u16,
_pad: u16,
}
impl PatternEntry {
pub const EMPTY: Self = Self {
star_indices: [0, 0, 0, 0],
largest_edge: 0.0,
key_hash: 0,
_pad: 0,
};
#[inline]
pub fn new(star_indices: [u32; 4], largest_edge: f32, key_hash: u16) -> Self {
Self {
star_indices,
largest_edge,
key_hash,
_pad: 0,
}
}
#[inline]
pub fn is_empty(&self) -> bool {
self.star_indices == [0, 0, 0, 0]
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, rkyv::Archive, rkyv::Serialize, rkyv::Deserialize)]
pub enum SolveStatus {
MatchFound,
NoMatch,
Timeout,
TooFew,
}
#[derive(Debug, Clone, Archive, Serialize, Deserialize)]
pub struct DatabaseProperties {
pub pattern_bins: u32,
pub pattern_max_error: f32,
pub max_fov_rad: f32,
pub min_fov_rad: f32,
pub star_max_magnitude: f32,
pub num_patterns: u32,
pub epoch_equinox: u16,
pub epoch_proper_motion_year: f32,
pub verification_stars_per_fov: u32,
pub lattice_field_oversampling: u32,
pub patterns_per_lattice_field: u32,
}
#[derive(Debug, Clone, Archive, Serialize, Deserialize)]
pub struct SolverDatabase {
pub star_catalog: StarCatalog,
pub star_vectors: Vec<[f32; 3]>,
pub star_catalog_ids: Vec<i64>,
pub pattern_catalog: Vec<PatternEntry>,
pub props: DatabaseProperties,
}
pub struct GenerateDatabaseConfig {
pub max_fov_deg: f32,
pub min_fov_deg: Option<f32>,
pub star_max_magnitude: Option<f32>,
pub pattern_max_error: f32,
pub lattice_field_oversampling: u32,
pub patterns_per_lattice_field: u32,
pub verification_stars_per_fov: u32,
pub multiscale_step: f32,
pub epoch_proper_motion_year: Option<f64>,
pub catalog_nside: u32,
}
impl Default for GenerateDatabaseConfig {
fn default() -> Self {
Self {
max_fov_deg: 30.0,
min_fov_deg: None,
star_max_magnitude: None,
pattern_max_error: 0.001,
lattice_field_oversampling: 100,
patterns_per_lattice_field: 50,
verification_stars_per_fov: 150,
multiscale_step: 1.5,
epoch_proper_motion_year: Some(2025.0),
catalog_nside: 16,
}
}
}
pub struct SolveConfig {
pub fov_estimate_rad: f32,
pub image_width: u32,
pub image_height: u32,
pub fov_max_error_rad: Option<f32>,
pub match_radius: f32,
pub match_threshold: f64,
pub solve_timeout_ms: Option<u64>,
pub match_max_error: Option<f32>,
pub refine_iterations: u32,
pub camera_model: CameraModel,
pub observer_velocity_km_s: Option<[f64; 3]>,
pub attitude_hint: Option<Quaternion>,
pub hint_uncertainty_rad: f32,
pub strict_hint: bool,
}
impl Default for SolveConfig {
fn default() -> Self {
Self {
fov_estimate_rad: 0.0,
image_width: 0,
image_height: 0,
fov_max_error_rad: None,
match_radius: 0.01,
match_threshold: 1e-5,
solve_timeout_ms: Some(5000),
match_max_error: None,
refine_iterations: 2,
camera_model: CameraModel {
focal_length_px: 1.0,
image_width: 0,
image_height: 0,
crpix: [0.0, 0.0],
parity_flip: false,
distortion: Distortion::None,
},
observer_velocity_km_s: None,
attitude_hint: None,
hint_uncertainty_rad: 1.0_f32.to_radians(),
strict_hint: false,
}
}
}
impl SolveConfig {
pub fn new(fov_estimate_rad: f32, image_width: u32, image_height: u32) -> Self {
Self {
fov_estimate_rad,
image_width,
image_height,
camera_model: CameraModel::from_fov(fov_estimate_rad as f64, image_width, image_height),
..Default::default()
}
}
pub fn pixel_scale(&self) -> f32 {
if self.image_width > 0 && self.fov_estimate_rad > 0.0 {
let f = (self.image_width as f32 / 2.0) / (self.fov_estimate_rad / 2.0).tan();
1.0 / f
} else {
0.0
}
}
}
#[derive(Debug, Clone, Archive, Serialize, Deserialize)]
pub struct SolveResult {
#[rkyv(with = rkyv::with::Map<AsQuatArray>)]
pub qicrs2cam: Option<Quaternion>,
pub fov_rad: Option<f32>,
pub num_matches: Option<u32>,
pub rmse_rad: Option<f32>,
pub p90e_rad: Option<f32>,
pub max_err_rad: Option<f32>,
pub prob: Option<f64>,
pub solve_time_ms: f32,
pub status: SolveStatus,
pub parity_flip: bool,
pub matched_catalog_ids: Vec<i64>,
pub matched_centroid_indices: Vec<usize>,
pub image_width: u32,
pub image_height: u32,
pub cd_matrix: Option<[[f64; 2]; 2]>,
pub crval_rad: Option<[f64; 2]>,
pub camera_model: Option<CameraModel>,
pub theta_rad: Option<f64>,
}
impl SolveResult {
pub(crate) fn failure(status: SolveStatus, solve_time_ms: f32) -> Self {
Self {
qicrs2cam: None,
fov_rad: None,
num_matches: None,
rmse_rad: None,
p90e_rad: None,
max_err_rad: None,
prob: None,
solve_time_ms,
status,
parity_flip: false,
matched_catalog_ids: Vec::new(),
matched_centroid_indices: Vec::new(),
image_width: 0,
image_height: 0,
cd_matrix: None,
crval_rad: None,
camera_model: None,
theta_rad: None,
}
}
pub fn pixel_to_world(&self, x: f64, y: f64) -> Option<(f64, f64)> {
if let (Some(ref cam), Some(crval), Some(theta)) =
(&self.camera_model, &self.crval_rad, self.theta_rad)
{
let (xi_cam, eta_cam) = cam.pixel_to_tanplane(x, y);
let cos_t = theta.cos();
let sin_t = theta.sin();
let xi = cos_t * xi_cam - sin_t * eta_cam;
let eta = sin_t * xi_cam + cos_t * eta_cam;
let (ra, dec) = wcs_refine::inverse_tan_project(xi, eta, crval[0], crval[1]);
Some((ra.to_degrees().rem_euclid(360.0), dec.to_degrees()))
} else if let (Some(cd), Some(crval)) = (&self.cd_matrix, &self.crval_rad) {
let xi = cd[0][0] * x + cd[0][1] * y;
let eta = cd[1][0] * x + cd[1][1] * y;
let (ra, dec) = wcs_refine::inverse_tan_project(xi, eta, crval[0], crval[1]);
Some((ra.to_degrees().rem_euclid(360.0), dec.to_degrees()))
} else {
let q = self.qicrs2cam.as_ref()?;
let fov = self.fov_rad? as f64;
let f = (self.image_width.max(1) as f64 / 2.0) / (fov / 2.0).tan();
let pixel_scale = 1.0 / f;
let ps = if self.parity_flip { -1.0 } else { 1.0 };
let xr = ps * x * pixel_scale;
let yr = y * pixel_scale;
let norm = (xr * xr + yr * yr + 1.0).sqrt();
let cx = xr / norm;
let cy = yr / norm;
let cz = 1.0 / norm;
let m = q.to_rotation_matrix();
let ix = m[(0, 0)] as f64 * cx + m[(1, 0)] as f64 * cy + m[(2, 0)] as f64 * cz;
let iy = m[(0, 1)] as f64 * cx + m[(1, 1)] as f64 * cy + m[(2, 1)] as f64 * cz;
let iz = m[(0, 2)] as f64 * cx + m[(1, 2)] as f64 * cy + m[(2, 2)] as f64 * cz;
let dec = iz.asin();
let ra = iy.atan2(ix);
Some((ra.to_degrees().rem_euclid(360.0), dec.to_degrees()))
}
}
pub fn world_to_pixel(&self, ra_deg: f64, dec_deg: f64) -> Option<(f64, f64)> {
if let (Some(ref cam), Some(crval), Some(theta)) =
(&self.camera_model, &self.crval_rad, self.theta_rad)
{
let ra = ra_deg.to_radians();
let dec = dec_deg.to_radians();
let (xi, eta) = wcs_refine::tan_project(ra, dec, crval[0], crval[1])?;
let cos_t = theta.cos();
let sin_t = theta.sin();
let xi_cam = cos_t * xi + sin_t * eta;
let eta_cam = -sin_t * xi + cos_t * eta;
let (px, py) = cam.tanplane_to_pixel(xi_cam, eta_cam);
Some((px, py))
} else if let (Some(cd), Some(crval)) = (&self.cd_matrix, &self.crval_rad) {
let ra = ra_deg.to_radians();
let dec = dec_deg.to_radians();
let (xi, eta) = wcs_refine::tan_project(ra, dec, crval[0], crval[1])?;
let cd_inv = wcs_refine::cd_inverse(cd)?;
let px = cd_inv[0][0] * xi + cd_inv[0][1] * eta;
let py = cd_inv[1][0] * xi + cd_inv[1][1] * eta;
Some((px, py))
} else {
let q = self.qicrs2cam.as_ref()?;
let fov = self.fov_rad? as f64;
let f = (self.image_width.max(1) as f64 / 2.0) / (fov / 2.0).tan();
let pixel_scale = 1.0 / f;
let ra = ra_deg.to_radians();
let dec = dec_deg.to_radians();
let cos_dec = dec.cos();
let ix = ra.cos() * cos_dec;
let iy = ra.sin() * cos_dec;
let iz = dec.sin();
let m = q.to_rotation_matrix();
let cx = m[(0, 0)] as f64 * ix + m[(0, 1)] as f64 * iy + m[(0, 2)] as f64 * iz;
let cy = m[(1, 0)] as f64 * ix + m[(1, 1)] as f64 * iy + m[(1, 2)] as f64 * iz;
let cz = m[(2, 0)] as f64 * ix + m[(2, 1)] as f64 * iy + m[(2, 2)] as f64 * iz;
if cz <= 0.0 {
return None;
}
let xr = cx / cz;
let yr = cy / cz;
let ps = if self.parity_flip { -1.0 } else { 1.0 };
let ux = ps * xr / pixel_scale;
let uy = yr / pixel_scale;
Some((ux, uy))
}
}
}