Skip to main content

cdshealpix/nested/
bmoc.rs

1//! Definition of a BMOC, i.e. a MOC storing an additional flag telling if a cell is fully
2//! or partially covered by the MOC.
3//!
4//! So far, all BMOC logical operations (not, and, or, xor) are made from the BMOC representation.
5//! It is probably simpler and faster to work on ranges (but we have to handle the flag).
6
7use std::{
8  cmp::{max, Ordering},
9  fs::File,
10  io::{BufRead, BufReader, Error as IoError, Write},
11  path::Path,
12  slice::Iter,
13  str,
14  time::SystemTime,
15  vec::IntoIter,
16};
17
18use base64::{engine::general_purpose::STANDARD, DecodeError, Engine};
19use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
20use chrono::{DateTime, SecondsFormat, Utc};
21use log::debug;
22
23use crate::{
24  nested::{
25    hash,
26    map::fits::{
27      error::FitsError,
28      read::{
29        check_keyword_and_parse_uint_val, check_keyword_and_str_val, check_keyword_and_val,
30        get_str_val_no_quote, next_36_chunks_of_80_bytes, parse_uint_val,
31      },
32      write::{
33        write_final_padding_ioerr, write_keyword_record, write_primary_hdu_ioerr,
34        write_str_keyword_record, write_uint_mandatory_keyword_record,
35      },
36    },
37    to_range,
38  },
39  nside_square_unsafe,
40};
41
42/// A very basic and simple BMOC Builder: we push elements in it assuming that we provide them
43/// in the right order, without duplicates and small cells included in larger cells.
44#[derive(Debug)]
45pub struct BMOCBuilderUnsafe {
46  // (super) // removed because of external test
47  depth_max: u8,
48  entries: Option<Vec<u64>>,
49}
50
51impl BMOCBuilderUnsafe {
52  pub fn new(depth_max: u8, capacity: usize) -> BMOCBuilderUnsafe {
53    BMOCBuilderUnsafe {
54      depth_max,
55      entries: Some(Vec::with_capacity(capacity)),
56    }
57  }
58
59  /* Commented because not used so far
60  /// Clear the content and start a fresh builder with the given initial capacity.
61  #[warn(dead_code)]
62  pub fn re_init(&mut self, capacity: usize) -> &mut BMOCBuilderUnsafe {
63    self.entries = Some(Vec::with_capacity(capacity));
64    self
65  }*/
66
67  pub fn push(&mut self, depth: u8, hash: u64, is_full: bool) -> &mut BMOCBuilderUnsafe {
68    if let Some(ref mut v) = self.entries {
69      v.push(build_raw_value(depth, hash, is_full, self.depth_max));
70    // println!("push {:?}", Cell::new(*v.last().unwrap(), self.depth_max));
71    } else {
72      panic!("Empty builder, you have to re-init it before re-using it!");
73    }
74    self
75  }
76
77  fn push_raw_unsafe(&mut self, raw_value: u64) -> &mut BMOCBuilderUnsafe {
78    // println!("push {:?}", Cell::new(raw_value, self.depth_max));
79    if let Some(ref mut v) = self.entries {
80      v.push(raw_value);
81    } else {
82      panic!("Empty builder, you have to re-init it before re-using it!");
83    }
84    self
85  }
86
87  pub fn push_all(
88    &mut self,
89    depth: u8,
90    from_hash: u64,
91    to_hash: u64,
92    are_full: bool,
93  ) -> &mut BMOCBuilderUnsafe {
94    if let Some(ref mut v) = self.entries {
95      for h in from_hash..to_hash {
96        v.push(build_raw_value(depth, h, are_full, self.depth_max));
97      }
98    } else {
99      panic!("Empty builder, you have to re-init it before re-using it!");
100    }
101    self
102  }
103
104  #[allow(clippy::wrong_self_convention)]
105  pub fn to_bmoc(mut self) -> BMOC {
106    BMOC::create_unsafe(
107      self.depth_max,
108      self
109        .entries
110        .take()
111        .expect("Empty builder!")
112        .into_boxed_slice(),
113    )
114  }
115
116  /// We consider that the pushed elements are not ordered, but they come from a valid BMOC (i.e.
117  /// no cell included in another cell)
118  #[allow(clippy::wrong_self_convention)]
119  pub fn to_bmoc_from_unordered(mut self) -> BMOC {
120    let mut res = self.entries.take().expect("Empty builder!");
121    res.sort_unstable();
122    BMOC::create_unsafe(self.depth_max, res.into_boxed_slice())
123  }
124
125  fn pack(&mut self) -> Vec<u64> {
126    let mut entries = self.entries.take().expect("Empty builder!");
127    // On-place pack
128    let mut prev_to_index = 0_usize;
129    let mut curr_to_index = entries.len();
130    while prev_to_index != curr_to_index {
131      // changes occurs
132      prev_to_index = curr_to_index;
133      let mut i_prev_moc = 0_usize;
134      let mut i_curr_moc = 0_usize;
135      while i_prev_moc < prev_to_index {
136        let mut curr_cell = entries[i_prev_moc];
137        i_prev_moc += 1;
138        let mut curr_cell_depth = get_depth(curr_cell, self.depth_max);
139        let mut curr_cell_hash =
140          get_hash_from_delta_depth(curr_cell, self.depth_max - curr_cell_depth);
141        // Look for the first cell of the larger cell (depth - 1)  (=> 2 last bits = 00), the cell must be FULL
142        while i_prev_moc < prev_to_index
143          && (curr_cell_depth == 0
144            || is_partial(curr_cell)
145            || is_not_first_cell_of_larger_cell(curr_cell_hash))
146        {
147          if i_curr_moc != i_prev_moc {
148            entries[i_curr_moc] = curr_cell;
149            i_curr_moc += 1;
150          }
151          curr_cell = entries[i_prev_moc];
152          i_prev_moc += 1;
153          curr_cell_depth = get_depth(curr_cell, self.depth_max);
154          curr_cell_hash = get_hash_from_delta_depth(curr_cell, self.depth_max - curr_cell_depth);
155        }
156        // Look at the 3 siblings
157        if i_prev_moc + 2 < prev_to_index
158          && entries[i_prev_moc]
159            == build_raw_value(curr_cell_depth, curr_cell_hash | 1, true, self.depth_max)
160          && entries[i_prev_moc + 1]
161            == build_raw_value(curr_cell_depth, curr_cell_hash | 2, true, self.depth_max)
162          && entries[i_prev_moc + 2]
163            == build_raw_value(curr_cell_depth, curr_cell_hash | 3, true, self.depth_max)
164        {
165          entries[i_curr_moc] = build_raw_value(
166            curr_cell_depth - 1,
167            curr_cell_hash >> 2,
168            true,
169            self.depth_max,
170          );
171          i_curr_moc += 1;
172          i_prev_moc += 3;
173        } else if i_curr_moc != i_prev_moc {
174          entries[i_curr_moc] = curr_cell;
175          i_curr_moc += 1;
176        }
177      }
178      curr_to_index = i_curr_moc;
179    }
180    // We may find a better algorithm doing a single pass on the input MOC
181    // Here the number of passes max = mocDepth - smallestDepthOfACellInOutputMoc
182    // YEP: new idea: do it like a buffer with a cursor on the last "unmergeable" element!!
183    entries.truncate(curr_to_index);
184    entries
185  }
186
187  fn low_depth_raw_val_at_lower_depth(&self, raw_value: u64, new_depth: u8) -> u64 {
188    debug_assert!(self.get_depth(raw_value) <= new_depth);
189    debug_assert!(new_depth <= self.depth_max);
190    let twice_delta_depth = (self.depth_max - new_depth) << 1;
191    (raw_value >> twice_delta_depth) | (raw_value & 1_u64)
192  }
193
194  // We assume the given entries form a valid BMOC (already packef, ordered, ...)
195  fn to_lower_depth(&self, new_depth: u8, mut entries: Vec<u64>) -> Vec<u64> {
196    if new_depth >= self.depth_max {
197      panic!("The given depth must be lower than the depth max of the BMOC");
198    }
199    let mut i_new = 0_usize;
200    let mut prev_hash_at_new_depth = loop {
201      if i_new == entries.len() {
202        // All cells have a depth <= new_depth
203        break None;
204      }
205      let raw_value = entries[i_new];
206      let depth = self.get_depth(raw_value);
207      if depth <= new_depth {
208        entries[i_new] = self.low_depth_raw_val_at_lower_depth(raw_value, new_depth);
209        i_new += 1;
210      } else {
211        break Some(get_hash_from_delta_depth(
212          raw_value,
213          self.depth_max - new_depth,
214        ));
215      }
216    };
217    for i in (i_new + 1)..entries.len() {
218      let raw_value = entries[i];
219      let depth = self.get_depth(raw_value);
220      if depth <= new_depth {
221        if prev_hash_at_new_depth.is_some() {
222          entries[i_new] = (prev_hash_at_new_depth.take().unwrap() << 2) | 2_u64;
223          i_new += 1;
224        }
225        entries[i_new] = self.low_depth_raw_val_at_lower_depth(raw_value, new_depth);
226        i_new += 1;
227      } else {
228        let curr_hash_at_new_depth =
229          get_hash_from_delta_depth(raw_value, self.depth_max - new_depth);
230        if let Some(prev_val_at_new_depth) = prev_hash_at_new_depth {
231          if prev_val_at_new_depth != curr_hash_at_new_depth {
232            entries[i_new] = (prev_val_at_new_depth << 2) | 2_u64; // sentinel bit + flag = 0
233            i_new += 1;
234            prev_hash_at_new_depth.replace(curr_hash_at_new_depth);
235          }
236        } else {
237          prev_hash_at_new_depth.replace(curr_hash_at_new_depth);
238        }
239      }
240    }
241    if prev_hash_at_new_depth.is_some() {
242      entries[i_new] = (prev_hash_at_new_depth.take().unwrap() << 2) | 2_u64;
243      i_new += 1;
244    }
245    entries.truncate(i_new);
246    entries
247  }
248
249  #[allow(clippy::wrong_self_convention)]
250  pub fn to_bmoc_packing(&mut self) -> BMOC {
251    let entries = self.pack();
252    BMOC::create_unsafe(self.depth_max, entries.into_boxed_slice())
253  }
254
255  #[allow(clippy::wrong_self_convention)]
256  pub fn to_lower_depth_bmoc(&mut self, new_depth: u8) -> BMOC {
257    let entries = self.entries.take().expect("Empty builder!");
258    let entries = self.to_lower_depth(new_depth, entries);
259    BMOC::create_unsafe(new_depth, entries.into_boxed_slice())
260  }
261
262  #[allow(clippy::wrong_self_convention)]
263  pub fn to_lower_depth_bmoc_packing(&mut self, new_depth: u8) -> BMOC {
264    let entries = self.pack();
265    let entries = self.to_lower_depth(new_depth, entries);
266    BMOC::create_unsafe(new_depth, entries.into_boxed_slice())
267  }
268
269  #[inline]
270  fn get_depth(&self, raw_value: u64) -> u8 {
271    self.get_depth_no_flag(rm_flag(raw_value))
272  }
273  #[inline]
274  /// Works both with no flag or with flag set to 0
275  fn get_depth_no_flag(&self, raw_value_no_flag: u64) -> u8 {
276    self.depth_max - (raw_value_no_flag.trailing_zeros() >> 1) as u8
277  }
278}
279
280pub enum Status {
281  /// The point is in the MOC
282  IN,
283  /// The point is out of the MOC
284  OUT,
285  /// The point may be in or out of the MOC
286  UNKNOWN,
287}
288
289/// Builder taking cell at the MOC maximum depth.
290pub struct BMOCBuilderFixedDepth {
291  depth: u8,
292  bmoc: Option<BMOC>,
293  is_full: bool,
294  buffer: Vec<u64>,
295  sorted: bool,
296}
297
298impl BMOCBuilderFixedDepth {
299  /// # Inputs
300  ///  - `depth`: BMOC depth.
301  ///  - `is_full`: the flag to be set for each cell number (I expect`true` to be used for example
302  ///    when building catalogues MOC.
303  ///
304  /// The results of logical operations between BMOC having the flag of each of their cells
305  /// set to `true` must equal the results of regular MOC logical operations.
306  pub fn new(depth: u8, is_full: bool) -> BMOCBuilderFixedDepth {
307    BMOCBuilderFixedDepth::with_capacity(depth, is_full, 10_000_000)
308  }
309
310  pub fn with_capacity(depth: u8, is_full: bool, buff_capacity: usize) -> BMOCBuilderFixedDepth {
311    BMOCBuilderFixedDepth {
312      depth,
313      bmoc: None,
314      is_full,
315      buffer: Vec::with_capacity(buff_capacity),
316      sorted: true,
317    }
318  }
319
320  /// The hash must be at the builder depth
321  pub fn push(&mut self, hash: u64) {
322    if let Some(h) = self.buffer.last() {
323      if *h == hash {
324        return;
325      } else if self.sorted && *h > hash {
326        self.sorted = false;
327      }
328    }
329    self.buffer.push(hash);
330    if self.buffer.len() == self.buffer.capacity() {
331      self.drain_buffer();
332    }
333  }
334
335  #[allow(clippy::wrong_self_convention)]
336  pub fn to_bmoc(&mut self) -> Option<BMOC> {
337    // if self.buffer.len() > 0 {
338    self.drain_buffer();
339    // }
340    self.bmoc.take()
341  }
342
343  fn drain_buffer(&mut self) {
344    if !self.sorted {
345      // Sort and remove duplicates
346      self.buffer.sort_unstable();
347      self.buffer.dedup();
348    }
349    let new_bmoc = self.buff_to_bmoc();
350    self.clear_buff();
351    self.bmoc = Some(match self.bmoc.take() {
352      Some(prev_bmoc) => prev_bmoc.or(&new_bmoc),
353      None => new_bmoc,
354    })
355  }
356
357  fn buff_to_bmoc(&mut self) -> BMOC {
358    let mut i = 0_usize;
359    let mut k = 0_usize;
360    while i < self.buffer.len() {
361      let h = self.buffer[i];
362      let sequence_len = self.largest_lower_cell_sequence_len(h, &self.buffer[i..]);
363      /*{
364        // Look at the maximum number of cell that could be merge if the hash is the first of a cell
365        let delta_depth = (h.trailing_zeros() >> 1).min(self.depth); // low_res_cell_depth = self.depth - delta_depth
366        let num_cells = 1_usize << (dd << 1); // number of depth self.depth cells in the low_res_cell = (2^dd)^2 = 2^(2*dd)
367        // Look for a sequence
368        let mut j = i + 1;
369        let mut expected_h = h + 1_u64;
370
371        while j < self.buffer.len() && sequence_len < num_cells && self.buffer[j] == expected_h {
372          j += 1;
373          sequence_len = 1;
374          expected_h += 1;
375        }
376      }*/
377      // Look at the actual low_res_cell the sequence correspond to
378      let delta_depth = sequence_len.next_power_of_two();
379      let delta_depth = if delta_depth > sequence_len {
380        delta_depth.trailing_zeros() >> 2 // take previous value and divide by 2
381      } else {
382        debug_assert_eq!(delta_depth, sequence_len);
383        delta_depth.trailing_zeros() >> 1 // divide by 2
384      } as u8;
385      let twice_dd = delta_depth << 1;
386      let sequence_len = 1_usize << twice_dd;
387      // Write the value
388      self.buffer[k] = build_raw_value(
389        self.depth - delta_depth,
390        h >> twice_dd,
391        self.is_full,
392        self.depth,
393      );
394      k += 1;
395      i += sequence_len;
396    }
397    // self.buffer.truncate(k);
398    BMOC::create_unsafe_copying(self.depth, &self.buffer[0..k])
399  }
400
401  #[inline]
402  fn largest_lower_cell_sequence_len(&self, mut h: u64, entries: &[u64]) -> usize {
403    // Look for the maximum number of cells that could be merged if the hash is the first of a cell
404    let dd = ((h.trailing_zeros() >> 1) as u8).min(self.depth); // low_res_cell_depth = self.depth - delta_depth
405    let n = 1_usize << (dd << 1); // number of depth self.depth cells in the low_res_cell = (2^dd)^2 = 2^(2*dd)
406                                  // Look for a sequence
407    let n = n.min(entries.len());
408    /*for i in 1..n {
409      h += 1;
410      if entries[i] != h {
411        return i;
412      }
413    }*/
414    for (i, e) in entries.iter().enumerate().take(n).skip(1) {
415      h += 1;
416      if *e != h {
417        return i;
418      }
419    }
420    n
421  }
422
423  fn clear_buff(&mut self) {
424    self.sorted = true;
425    self.buffer.clear();
426  }
427}
428
429/// Structure defining a simple BMOC.
430/// Three different iterators are available:
431/// - `bmoc.iter() -> Iterator<u64>` : iterates on the raw value stored in the BMOC (the ordering
432///   follow the z-order-curve order).
433/// - `bmoc.into_iter() -> Iterator<Cell>`: same a `iter()` except that it returns Cells,
434///   i.e. decoded raw value containing the `depth`, `order` and `flag`.
435/// - `bmoc.flat_iter() -> Iterator<u64>`: iterates on all the cell number at the maximum depth, in
436///   ascending order (flag information is lost).
437/// - `bmoc.flat_iter_cell() -> Iterator<Cell>` same as `flat_iter()` but conserving then `flag`
438///   information (and the depth which must always equals the BMOC depth).
439#[derive(Debug, PartialEq, Eq)]
440pub struct BMOC {
441  depth_max: u8,
442  pub entries: Box<[u64]>,
443}
444
445#[derive(Debug)]
446pub struct Cell {
447  pub raw_value: u64,
448  pub depth: u8,
449  pub hash: u64,
450  pub is_full: bool,
451}
452
453impl Cell {
454  fn new(raw_value: u64, depth_max: u8) -> Cell {
455    // Extract the flag
456    let is_full = (raw_value & 1_u64) == 1_u64;
457    // Remove the flag bit, then divide by 2 (2 bits per level)
458    let delta_depth = ((raw_value >> 1).trailing_zeros() >> 1) as u8;
459    // Remove 2 bits per depth difference + 1 sentinel bit + 1 flag bit
460    let hash = raw_value >> (2 + (delta_depth << 1));
461    let depth = depth_max - delta_depth;
462    Cell {
463      raw_value,
464      depth,
465      hash,
466      is_full,
467    }
468  }
469}
470
471impl BMOC {
472  pub fn new_empty(depth: u8) -> Self {
473    let builder = BMOCBuilderUnsafe::new(depth, 0);
474    builder.to_bmoc()
475  }
476
477  pub fn new_allsky(depth: u8) -> Self {
478    let mut builder = BMOCBuilderUnsafe::new(depth, 12);
479    builder.push_all(0, 0, 12, true);
480    builder.to_bmoc()
481  }
482
483  pub fn size(&self) -> usize {
484    self.entries.len()
485  }
486
487  /// We suppose here that the entries are already sorted (ASC natural ordering) with
488  /// no duplicates and no small cells included into larger one's.
489  /// # WARNING
490  /// * method made public, use at your own risks!
491  pub fn create_unsafe(depth_max: u8, entries: Box<[u64]>) -> BMOC {
492    BMOC { depth_max, entries }
493  }
494
495  pub(super) fn create_unsafe_copying(depth_max: u8, entries: &[u64]) -> BMOC {
496    let mut entries_copy = Vec::with_capacity(entries.len());
497    for e in entries {
498      entries_copy.push(*e);
499    }
500    BMOC {
501      depth_max,
502      entries: entries_copy.into_boxed_slice(),
503    }
504  }
505
506  pub fn get_depth_max(&self) -> u8 {
507    self.depth_max
508  }
509
510  pub fn equals(&self, other: &BMOC) -> bool {
511    if self.depth_max == other.depth_max && self.entries.len() == other.entries.len() {
512      for (r1, r2) in self.iter().zip(other.iter()) {
513        if r1 != r2 {
514          return false;
515        }
516      }
517      return true;
518    }
519    false
520  }
521
522  pub fn assert_equals(&self, other: &BMOC) {
523    if self.depth_max == other.depth_max {
524      for (r1, r2) in self.iter().zip(other.iter()) {
525        if *r1 != *r2 {
526          panic!(
527            "Left: {:?}; Right: {:?}",
528            self.from_raw_value(*r1),
529            other.from_raw_value(*r2)
530          );
531        }
532      }
533      if self.entries.len() != other.entries.len() {
534        panic!("Lengths are different");
535      }
536    } else {
537      panic!("Depths are different");
538    }
539  }
540
541  /// Test the given point and return its "Status": in, out of the MOC or maybe.
542  pub fn test_coo(&self, lon: f64, lat: f64) -> Status {
543    let h_raw = build_raw_value(
544      self.depth_max,
545      hash(self.depth_max, lon, lat),
546      true,
547      self.depth_max,
548    );
549    match self.entries.binary_search(&h_raw) {
550      Ok(i) => {
551        if is_partial(self.entries[i]) {
552          Status::UNKNOWN
553        } else {
554          Status::IN
555        }
556      }
557      Err(i) => {
558        let cell = Cell::new(h_raw, self.depth_max);
559        // look in next or previous cels
560        if i > 0 && is_in(&self.from_raw_value(self.entries[i - 1]), &cell) {
561          if is_partial(self.entries[i - 1]) {
562            Status::UNKNOWN
563          } else {
564            Status::IN
565          }
566        } else if i < self.entries.len() && is_in(&self.from_raw_value(self.entries[i]), &cell) {
567          if is_partial(self.entries[i]) {
568            Status::UNKNOWN
569          } else {
570            Status::IN
571          }
572        } else {
573          Status::OUT
574        }
575      }
576    }
577  }
578
579  /// Returns the BMOC complement:
580  /// - cells with flag set to 1 (fully covered) are removed
581  /// - cells with flag set to 0 (partially covered) are kept
582  /// - empty cells are added with flag set to 1
583  ///
584  /// The method as been tested when all flags are `is_full` (i.e. regular MOC case).
585  pub fn not(&self) -> BMOC {
586    // Worst case: only 1 sub-cell by cell in the MOC (+11 for depth 0)
587    let mut builder = BMOCBuilderUnsafe::new(self.depth_max, 3 * self.entries.len() + 12);
588    // Empty MOC, easy
589    if self.entries.is_empty() {
590      for h in 0..12_u64 {
591        builder.push(0_u8, h, true);
592      }
593      return builder.to_bmoc();
594    }
595    // Real case
596    let mut d = 0_u8;
597    let mut h = 0_u64;
598    // Go down to first cell
599    let mut cell = self.from_raw_value(self.entries[0]);
600    go_down(&mut d, &mut h, cell.depth, cell.hash, true, &mut builder);
601    if !cell.is_full {
602      builder.push_raw_unsafe(cell.raw_value);
603    }
604    // Between first and last
605    for i in 1..self.entries.len() {
606      cell = self.from_raw_value(self.entries[i]);
607      let dd = dd_4_go_up(d, h, cell.depth, cell.hash);
608      go_up(&mut d, &mut h, dd, true, &mut builder);
609      go_down(&mut d, &mut h, cell.depth, cell.hash, true, &mut builder);
610      if !cell.is_full {
611        builder.push_raw_unsafe(cell.raw_value);
612      }
613    }
614    // After last
615    let delta_depth = d;
616    go_up(&mut d, &mut h, delta_depth, true, &mut builder); // go up to depth 0
617    for h in h..12 {
618      // Complete with base cells if needed
619      builder.push(0_u8, h, true);
620    }
621    builder.to_bmoc()
622  }
623
624  /*
625  /// Go to the next hash value:
626  /// - if the input hash is not the last one of the super-cell
627  ///   (the cell of depth deph - 1 the hash belongs to), the result is simply
628  ///   - output_depth = input_depth
629  ///   - output_hash = input_hash + 1
630  /// - else, the depth is changed (we go up) until the hash is not the last of the super-cell
631  ///   and the result is:
632  ///   - output_depth < input_depth
633  ///   - output_hash = input_hash_at_outpu_depth + 1
634  fn go_next(&self, start_depth: &mut u8, start_hash: &mut u64) {
635    while *start_depth > 0 && ((*start_hash & 3_u64) == 3_u64) {
636      *start_depth -= 1;
637      *start_hash >>= 2;
638    }
639    *start_hash += 1;
640  }*/
641
642  /// Returns the intersection of this BMOC with the given BMOC:
643  /// - all non overlapping cells are removed
644  /// - when two cells are overlapping, the overlapping part is kept
645  ///   - the value of the flag is the result of a logical AND between the flags of the merged cells.
646  ///
647  /// The method as been tested when all flags are `is_full` (i.e. regular MOC case).
648  pub fn and(&self, other: &BMOC) -> BMOC {
649    let mut builder = BMOCBuilderUnsafe::new(
650      max(self.depth_max, other.depth_max),
651      max(self.entries.len(), other.entries.len()),
652    );
653    let mut it_left = self.into_iter();
654    let mut it_right = other.into_iter();
655    let mut left = it_left.next();
656    let mut right = it_right.next();
657    // We have 9 cases to take into account:
658    // -  3: dL == dR, dL < dR and dR < dL
659    // - x3: hL == hR, hL < hR and hR < hL
660    while let (Some(l), Some(r)) = (&left, &right) {
661      match l.depth.cmp(&r.depth) {
662        Ordering::Less => {
663          let hr_at_dl = r.hash >> ((r.depth - l.depth) << 1);
664          match l.hash.cmp(&hr_at_dl) {
665            Ordering::Less => left = it_left.next(),
666            Ordering::Greater => right = it_right.next(),
667            Ordering::Equal => {
668              debug_assert_eq!(l.hash, hr_at_dl);
669              builder.push(r.depth, r.hash, r.is_full && l.is_full);
670              right = it_right.next()
671            }
672          }
673        }
674        Ordering::Greater => {
675          let hl_at_dr = l.hash >> ((l.depth - r.depth) << 1);
676          match hl_at_dr.cmp(&r.hash) {
677            Ordering::Less => left = it_left.next(),
678            Ordering::Greater => right = it_right.next(),
679            Ordering::Equal => {
680              debug_assert_eq!(hl_at_dr, r.hash);
681              builder.push(l.depth, l.hash, r.is_full && l.is_full);
682              left = it_left.next()
683            }
684          }
685        }
686        Ordering::Equal => {
687          debug_assert_eq!(l.depth, r.depth);
688          match l.hash.cmp(&r.hash) {
689            Ordering::Less => left = it_left.next(),
690            Ordering::Greater => right = it_right.next(),
691            Ordering::Equal => {
692              debug_assert_eq!(l.hash, r.hash);
693              builder.push(l.depth, l.hash, r.is_full && l.is_full);
694              left = it_left.next();
695              right = it_right.next()
696            }
697          }
698        }
699      }
700    }
701    builder.to_bmoc()
702  }
703
704  /* Try making operations with as few if as possible, playing on indices
705  fn and_v2(&self, other: &BMOC) -> BMOC {
706    let mut builder = BMOCBuilderUnsafe::new(
707      max(self.depth_max, other.depth_max),
708      max(self.entries.len(), other.entries.len())
709    );
710    let mut left = self.entries;
711    let mut right = other.entries;
712    let mut ileft = 0_usize;
713    let mut iright = 0_usize;
714
715  }
716  */
717
718  /// Returns the union of this BMOC with the given BMOC:
719  /// - all non overlapping cells in both BMOCs are kept;
720  /// - overlapping cells are merged, the value of the flag is the result of a logical OR between.
721  ///
722  /// the flags of the merged cells.
723  /// The method as been tested when all flags are `is_full` (i.e. regular MOC case).
724  pub fn or(&self, other: &BMOC) -> BMOC {
725    let mut builder = BMOCBuilderUnsafe::new(
726      max(self.depth_max, other.depth_max),
727      max(self.entries.len(), other.entries.len()),
728    );
729    let mut it_left = self.into_iter();
730    let mut it_right = other.into_iter();
731    let mut left = it_left.next();
732    let mut right = it_right.next();
733    // We have 9 cases to take into account:
734    // -  3: dL == dR, dL < dR and dR < dL
735    // - x3: hL == hR, hL < hR and hR < hL
736    while let (Some(l), Some(r)) = (&left, &right) {
737      match l.depth.cmp(&r.depth) {
738        Ordering::Less => {
739          let hr_at_dl = r.hash >> ((r.depth - l.depth) << 1);
740          if l.hash < hr_at_dl {
741            builder.push(l.depth, l.hash, l.is_full);
742            left = it_left.next();
743          } else if l.hash > hr_at_dl {
744            builder.push(r.depth, r.hash, r.is_full);
745            right = it_right.next();
746          } else if l.is_full {
747            debug_assert_eq!(l.hash, hr_at_dl);
748            builder.push(l.depth, l.hash, l.is_full);
749            right = consume_while_overlapped(l, &mut it_right);
750            left = it_left.next();
751          } else {
752            debug_assert_eq!(l.hash, hr_at_dl);
753            debug_assert!(!l.is_full);
754            let mut is_overlapped = false;
755            right = consume_while_overlapped_and_partial(l, &mut it_right, &mut is_overlapped);
756            if is_overlapped {
757              right = self.not_in_cell_4_or(l, right.unwrap(), &mut it_right, &mut builder);
758            } else {
759              // all flags set to 0 => put large cell with flag  = 0
760              builder.push(l.depth, l.hash, false);
761            }
762            left = it_left.next();
763          }
764        }
765        Ordering::Greater => {
766          let hl_at_dr = l.hash >> ((l.depth - r.depth) << 1);
767          if hl_at_dr < r.hash {
768            builder.push(l.depth, l.hash, l.is_full);
769            left = it_left.next();
770          } else if hl_at_dr > r.hash {
771            builder.push(r.depth, r.hash, r.is_full);
772            right = it_right.next();
773          } else if r.is_full {
774            debug_assert_eq!(hl_at_dr, r.hash);
775            builder.push(r.depth, r.hash, r.is_full);
776            left = consume_while_overlapped(r, &mut it_left);
777            right = it_right.next();
778          } else {
779            debug_assert_eq!(hl_at_dr, r.hash);
780            debug_assert!(!r.is_full);
781            let mut is_overlapped = false;
782            left = consume_while_overlapped_and_partial(r, &mut it_left, &mut is_overlapped);
783            if is_overlapped {
784              left = self.not_in_cell_4_or(r, left.unwrap(), &mut it_left, &mut builder);
785            } else {
786              // all flags set to 0 => put large cell with flag  = 0
787              builder.push(r.depth, r.hash, false);
788            }
789            right = it_right.next();
790          }
791        }
792        Ordering::Equal => {
793          debug_assert_eq!(l.depth, r.depth);
794          match l.hash.cmp(&r.hash) {
795            Ordering::Less => {
796              builder.push(l.depth, l.hash, l.is_full);
797              left = it_left.next();
798            }
799            Ordering::Greater => {
800              builder.push(r.depth, r.hash, r.is_full);
801              right = it_right.next();
802            }
803            Ordering::Equal => {
804              debug_assert_eq!(l.hash, r.hash);
805              builder.push(l.depth, l.hash, r.is_full || l.is_full);
806              left = it_left.next();
807              right = it_right.next();
808            }
809          }
810        }
811      }
812    }
813    while let Some(l) = &left {
814      debug_assert!(right.is_none());
815      builder.push(l.depth, l.hash, l.is_full);
816      left = it_left.next();
817    }
818    while let Some(r) = &right {
819      debug_assert!(left.is_none());
820      builder.push(r.depth, r.hash, r.is_full);
821      right = it_right.next();
822    }
823    builder.to_bmoc_packing()
824  }
825
826  fn not_in_cell_4_or(
827    &self,
828    low_resolution: &Cell,
829    mut c: Cell,
830    iter: &mut BMOCIter,
831    builder: &mut BMOCBuilderUnsafe,
832  ) -> Option<Cell> {
833    let mut d = low_resolution.depth;
834    let mut h = low_resolution.hash;
835    debug_assert!(c.is_full);
836    go_down(&mut d, &mut h, c.depth, c.hash, false, builder);
837    builder.push(c.depth, c.hash, true);
838    let mut is_overlapped = false;
839    let mut cell;
840    while {
841      cell = consume_while_overlapped_and_partial(low_resolution, iter, &mut is_overlapped);
842      is_overlapped
843    } {
844      c = cell.unwrap(); // if flag => right is not None
845      let dd = dd_4_go_up(d, h, c.depth, c.hash);
846      go_up(&mut d, &mut h, dd, false, builder);
847      go_down(&mut d, &mut h, c.depth, c.hash, false, builder);
848      builder.push(c.depth, c.hash, true);
849    }
850    let dd = d - low_resolution.depth;
851    go_up(&mut d, &mut h, dd, false, builder);
852    go_down(
853      &mut d,
854      &mut h,
855      low_resolution.depth,
856      low_resolution.hash + 1,
857      false,
858      builder,
859    );
860    cell
861  }
862
863  /// Returns the symmetric difference of this BMOC with the given BMOC:
864  /// - all non overlapping cells in both BMOCs are kept
865  /// - when two cells are overlapping, the overlapping part is:
866  ///   - removed if both flags = 1
867  ///   - kept if one of the flags = 0 (since 0 meas partially covered but O don't know which part)
868  ///
869  /// The method as been tested when all flags are `is_full` (i.e. regular MOC case).
870  pub fn xor(&self, other: &BMOC) -> BMOC {
871    let mut builder = BMOCBuilderUnsafe::new(
872      max(self.depth_max, other.depth_max),
873      max(self.entries.len(), other.entries.len()),
874    );
875    let mut it_left = self.into_iter();
876    let mut it_right = other.into_iter();
877    let mut left = it_left.next();
878    let mut right = it_right.next();
879    // We have 9 cases to take into account:
880    // -  3: dL == dR, dL < dR and dR < dL
881    // - x3: hL == hR, hL < hR and hR < hL
882    while let (Some(l), Some(r)) = (&left, &right) {
883      match l.depth.cmp(&r.depth) {
884        Ordering::Less => {
885          let hr_at_dl = r.hash >> ((r.depth - l.depth) << 1);
886          if l.hash < hr_at_dl {
887            builder.push(l.depth, l.hash, l.is_full);
888            left = it_left.next();
889          } else if l.hash > hr_at_dl {
890            builder.push(r.depth, r.hash, r.is_full);
891            right = it_right.next();
892          } else if l.is_full {
893            debug_assert_eq!(l.hash, hr_at_dl);
894            right = self.not_in_cell_4_xor(l, r, &mut it_right, &mut builder);
895            left = it_left.next();
896          } else {
897            debug_assert_eq!(l.hash, hr_at_dl);
898            debug_assert!(!l.is_full);
899            builder.push(l.depth, l.hash, l.is_full);
900            right = consume_while_overlapped(l, &mut it_right);
901            left = it_left.next();
902          }
903        }
904        Ordering::Greater => {
905          let hl_at_dr = l.hash >> ((l.depth - r.depth) << 1);
906          if hl_at_dr < r.hash {
907            builder.push(l.depth, l.hash, l.is_full);
908            left = it_left.next();
909          } else if hl_at_dr > r.hash {
910            builder.push(r.depth, r.hash, r.is_full);
911            right = it_right.next();
912          } else if r.is_full {
913            debug_assert_eq!(hl_at_dr, r.hash);
914            left = self.not_in_cell_4_xor(r, l, &mut it_left, &mut builder);
915            right = it_right.next();
916          } else {
917            debug_assert_eq!(hl_at_dr, r.hash);
918            debug_assert!(!r.is_full);
919            builder.push(r.depth, r.hash, r.is_full);
920            left = consume_while_overlapped(r, &mut it_left);
921            right = it_right.next();
922          }
923        }
924        Ordering::Equal => {
925          debug_assert_eq!(l.depth, r.depth);
926          match l.hash.cmp(&r.hash) {
927            Ordering::Less => {
928              builder.push(l.depth, l.hash, l.is_full);
929              left = it_left.next();
930            }
931            Ordering::Greater => {
932              builder.push(r.depth, r.hash, r.is_full);
933              right = it_right.next();
934            }
935            Ordering::Equal => {
936              debug_assert_eq!(l.hash, r.hash);
937              let both_fully_covered = r.is_full && l.is_full;
938              if !both_fully_covered {
939                builder.push(l.depth, l.hash, both_fully_covered);
940              }
941              left = it_left.next();
942              right = it_right.next();
943            }
944          }
945        }
946      }
947    }
948    while let Some(l) = &left {
949      debug_assert!(right.is_none());
950      builder.push(l.depth, l.hash, l.is_full);
951      left = it_left.next();
952    }
953    while let Some(r) = &right {
954      debug_assert!(left.is_none());
955      builder.push(r.depth, r.hash, r.is_full);
956      right = it_right.next();
957    }
958    builder.to_bmoc_packing()
959  }
960
961  // add elements of the low resolution cell which are not in the c cell
962  fn not_in_cell_4_xor(
963    &self,
964    low_resolution: &Cell,
965    c: &Cell,
966    iter: &mut BMOCIter,
967    builder: &mut BMOCBuilderUnsafe,
968  ) -> Option<Cell> {
969    let mut d = low_resolution.depth;
970    let mut h = low_resolution.hash;
971    go_down(&mut d, &mut h, c.depth, c.hash, true, builder);
972    if !c.is_full {
973      builder.push(c.depth, c.hash, false);
974    }
975    let mut cell = iter.next();
976    while let Some(c) = &cell {
977      if !is_in(low_resolution, c) {
978        break;
979      }
980      let dd = dd_4_go_up(d, h, c.depth, c.hash);
981      go_up(&mut d, &mut h, dd, true, builder);
982      go_down(&mut d, &mut h, c.depth, c.hash, true, builder);
983      if !c.is_full {
984        builder.push(c.depth, c.hash, false);
985      }
986      cell = iter.next()
987    }
988    let dd = d - low_resolution.depth;
989    go_up(&mut d, &mut h, dd, true, builder);
990    go_down(
991      &mut d,
992      &mut h,
993      low_resolution.depth,
994      low_resolution.hash + 1,
995      true,
996      builder,
997    );
998    cell
999  }
1000
1001  /// Returns the difference of this BMOC (left) with the given BMOC (right):
1002  /// - all non overlapping cells of this (left) BMOC are kept
1003  /// - non overlapping cells of the other (right) BMOC are removed
1004  ///   if full, and kept if partially covered (since A MINUS B = A AND (NOT(B))
1005  /// - when two cells are overlapping, the overlapping part is:
1006  ///   - removed if both flags = 1
1007  ///   - kept if one of the flags = 0 (since 0 meas partially covered but O don't know which part)
1008  ///
1009  /// Poor's man implementation: A MINUS B = A AND NOT(B).
1010  pub fn minus(&self, other: &BMOC) -> BMOC {
1011    let mut builder = BMOCBuilderUnsafe::new(
1012      max(self.depth_max, other.depth_max),
1013      max(self.entries.len(), other.entries.len()),
1014    );
1015    let mut it_left = self.into_iter();
1016    let mut it_right = other.into_iter();
1017    let mut left = it_left.next();
1018    let mut right = it_right.next();
1019    // We have 9 cases to take into account:
1020    // -  3: dL == dR, dL < dR and dR < dL
1021    // - x3: hL == hR, hL < hR and hR < hL
1022    while let (Some(l), Some(r)) = (&left, &right) {
1023      match l.depth.cmp(&r.depth) {
1024        Ordering::Less => {
1025          // The l cell is larger than the r cell
1026          // - degrade r cell at the l cell depth
1027          let hr_at_dl = r.hash >> ((r.depth - l.depth) << 1);
1028          if l.hash < hr_at_dl {
1029            builder.push(l.depth, l.hash, l.is_full);
1030            left = it_left.next();
1031          } else if l.hash > hr_at_dl {
1032            right = it_right.next();
1033          } else if l.is_full {
1034            debug_assert_eq!(l.hash, hr_at_dl);
1035            // add elements of the l cell which are not in common with the r cell
1036            right = self.not_in_cell_4_xor(l, r, &mut it_right, &mut builder);
1037            left = it_left.next();
1038          } else {
1039            debug_assert_eq!(l.hash, hr_at_dl);
1040            debug_assert!(!l.is_full);
1041            builder.push(l.depth, l.hash, false);
1042            right = consume_while_overlapped(l, &mut it_right);
1043            left = it_left.next();
1044          }
1045        }
1046        Ordering::Greater => {
1047          // The r cell is larger than the l cell
1048          // - degrade l cell at the r cell depth
1049          let hl_at_dr = l.hash >> ((l.depth - r.depth) << 1);
1050          if hl_at_dr < r.hash {
1051            builder.push(l.depth, l.hash, l.is_full);
1052            left = it_left.next();
1053          } else if hl_at_dr > r.hash {
1054            // remove the r cell
1055            right = it_right.next();
1056          } else if !r.is_full || !l.is_full {
1057            debug_assert_eq!(hl_at_dr, r.hash);
1058            builder.push(l.depth, l.hash, false);
1059            left = it_left.next();
1060          }
1061        }
1062        Ordering::Equal => {
1063          debug_assert_eq!(l.depth, r.depth);
1064          match l.hash.cmp(&r.hash) {
1065            Ordering::Less => {
1066              builder.push(l.depth, l.hash, l.is_full);
1067              left = it_left.next();
1068            }
1069            Ordering::Greater => {
1070              right = it_right.next();
1071            }
1072            Ordering::Equal => {
1073              debug_assert_eq!(l.hash, r.hash);
1074              let both_fully_covered = r.is_full && l.is_full;
1075              if !both_fully_covered {
1076                builder.push(l.depth, l.hash, both_fully_covered);
1077              }
1078              left = it_left.next();
1079              right = it_right.next();
1080            }
1081          }
1082        }
1083      }
1084    }
1085    while let Some(l) = &left {
1086      debug_assert!(right.is_none());
1087      builder.push(l.depth, l.hash, l.is_full);
1088      left = it_left.next();
1089    }
1090    builder.to_bmoc_packing()
1091  }
1092
1093  pub fn from_raw_value(&self, raw_value: u64) -> Cell {
1094    Cell::new(raw_value, self.depth_max)
1095  }
1096
1097  /// Returns the number of cells at depth `depth_max` the moc contains, i.e.
1098  /// the sum for each cell of the number of cells at depth `depth_max`.
1099  pub fn deep_size(&self) -> usize {
1100    let mut sum = 0_usize;
1101    for &raw_value in self.entries.iter() {
1102      let depth = self.get_depth(raw_value);
1103      sum += nside_square_unsafe(self.depth_max - depth) as usize;
1104    }
1105    sum
1106  }
1107
1108  /// Iterator on the BMOC **raw values**. Raw values contains:
1109  /// * the depth if the cell;
1110  /// * the cell number;
1111  /// * a flag telling if the cell is fully covered or not.
1112  /// WARNING: this is probably not the method you are interested in as a user,
1113  /// see [flat_iter](#method.flat_iter) instead.
1114  /// TODO: make a public method to extract information from the raw value
1115  pub fn iter(&self) -> Iter<'_, u64> {
1116    self.entries.iter()
1117  }
1118
1119  /// Returns an iterator iterating over all cells at the BMOC maximum depth
1120  /// (the iteration is made in the natural cell order).
1121  pub fn flat_iter(&self) -> BMOCFlatIter<'_> {
1122    BMOCFlatIter::new(self.depth_max, self.deep_size(), self.entries.iter())
1123  }
1124
1125  /// Returns an iterator iterating over all cells at the BMOC maximum depth
1126  /// (the iteration is made in the natural cell order).
1127  pub fn into_flat_iter(self) -> BMOCIntoFlatIter {
1128    BMOCIntoFlatIter::new(
1129      self.depth_max,
1130      self.deep_size(),
1131      self.entries.into_vec().into_iter(),
1132    )
1133  }
1134
1135  /// Returns an iterator iterating over all cells at the BMOC maximum depth
1136  /// (the iteration is made in the natural cell order).  
1137  /// Contrary to [flat_iter](fn.flat_iter.html), the full cell information (the raw BMOC value
1138  /// it belongs to, its flag) is kept.
1139  pub fn flat_iter_cell(&self) -> BMOCFlatIterCell<'_> {
1140    BMOCFlatIterCell::new(self.depth_max, self.deep_size(), self.entries.iter())
1141  }
1142
1143  /// Returns an array containing all the BMOC cells flattened at the maximum depth.
1144  /// This is an utility methods basically calling `deep_size` to initialize an array
1145  /// and `flat_iter` to retrieve all cells.
1146  pub fn to_flat_array(&self) -> Box<[u64]> {
1147    let mut res: Vec<u64> = Vec::with_capacity(self.deep_size());
1148    for cell in self.flat_iter() {
1149      res.push(cell);
1150    }
1151    res.into_boxed_slice()
1152  }
1153
1154  fn get_depth(&self, raw_value: u64) -> u8 {
1155    self.get_depth_no_flag(rm_flag(raw_value))
1156  }
1157
1158  /// Works both with no flag or with flag set to 0
1159  fn get_depth_no_flag(&self, raw_value_no_flag: u64) -> u8 {
1160    self.depth_max - (raw_value_no_flag.trailing_zeros() >> 1) as u8
1161  }
1162
1163  fn get_depth_icell(&self, raw_value: u64) -> (u8, u64) {
1164    // Remove the flag bit, then divide by 2 (2 bits per level)
1165    let delta_depth = ((raw_value >> 1).trailing_zeros() >> 1) as u8;
1166    // Remove 2 bits per depth difference + 1 sentinel bit + 1 flag bit
1167    let hash = raw_value >> (2 + (delta_depth << 1));
1168    let depth = self.depth_max - delta_depth;
1169    (depth, hash)
1170  }
1171
1172  /// Transform this (B)MOC as a simple (sorted) array of ranges
1173  /// (WARNING: the ranges are at the MOC depth, not at the depth 29).
1174  /// During the operation, we loose the `flag` information attached to each BMOC cell.  
1175  pub fn to_ranges(&self) -> Box<[std::ops::Range<u64>]> {
1176    let mut ranges: Vec<std::ops::Range<u64>> = Vec::with_capacity(self.entries.len());
1177    let mut prev_min = 0_u64;
1178    let mut prev_max = 0_u64;
1179    for cell in self.into_iter() {
1180      if cell.depth < self.depth_max {
1181        let range = to_range(cell.hash, self.depth_max - cell.depth);
1182        if range.start != prev_max {
1183          if prev_min != prev_max {
1184            // false only at first call, then always true
1185            ranges.push(prev_min..prev_max);
1186          }
1187          prev_min = range.start;
1188        }
1189        prev_max = range.end;
1190      } else if cell.hash == prev_max {
1191        prev_max += 1;
1192      } else {
1193        if prev_min != prev_max {
1194          // false only at first call, then always true
1195          ranges.push(prev_min..prev_max);
1196        }
1197        prev_min = cell.hash;
1198        prev_max = cell.hash + 1;
1199      }
1200    }
1201    if prev_min != prev_max {
1202      // false only at first call, then always true
1203      ranges.push(prev_min..prev_max);
1204    }
1205    ranges.into_boxed_slice()
1206  }
1207
1208  /// Transform this (B)MOC as a simple (sorted) array of ranges.
1209  /// Ranges containing different flag values are split in sub-ranges
1210  pub fn to_flagged_ranges(&self) -> Vec<(std::ops::Range<u64>, bool)> {
1211    let mut ranges: Vec<(std::ops::Range<u64>, bool)> = Vec::with_capacity(self.entries.len());
1212    let mut prev_min = 0_u64;
1213    let mut prev_max = 0_u64;
1214    let mut prev_flag = self
1215      .entries
1216      .first()
1217      .map(|v| (v & 1_u64) == 1_u64)
1218      .unwrap_or(false);
1219    for cell in self.into_iter() {
1220      if cell.depth < self.depth_max {
1221        let range = to_range(cell.hash, self.depth_max - cell.depth);
1222        if range.start == prev_max && (prev_max == 0 || cell.is_full == prev_flag) {
1223          prev_max = range.end;
1224        } else {
1225          if prev_min != prev_max {
1226            // false only at first call, then always true
1227            ranges.push((prev_min..prev_max, prev_flag));
1228          }
1229          prev_min = range.start;
1230          prev_max = range.end;
1231          prev_flag = cell.is_full;
1232        }
1233      } else if cell.hash == prev_max && cell.is_full == prev_flag {
1234        prev_max += 1;
1235      } else {
1236        if prev_min != prev_max {
1237          // false only at first call, then always true
1238          ranges.push((prev_min..prev_max, prev_flag));
1239        }
1240        prev_min = cell.hash;
1241        prev_max = cell.hash + 1;
1242        prev_flag = cell.is_full;
1243      }
1244    }
1245    if prev_min != prev_max {
1246      // false only at first call, then always true
1247      ranges.push((prev_min..prev_max, prev_flag));
1248    }
1249    ranges.shrink_to_fit();
1250    ranges
1251  }
1252
1253  /// Transform this (B)MOC in a very compressed version. We call it `lossy` because
1254  /// during the operation, we loose the `flag` information attached to each BMOC cell.
1255  /// # Remark
1256  /// * If needed we could store the flag information!
1257  ///
1258  /// # Info
1259  /// * Original idea by F.-X. Pineau (see Java library), improved by M. Reinecke (through
1260  ///   private communication) leading to an even better compression factor.
1261  /// * Although its seems (c.f. M. Reinecke) that this is quite similar to `Interpolative coding`,
1262  ///   M. Reinecke tests show a slightly better compression factor. M. Reinecke raised the following
1263  ///   question: was it worth implementing this specific case instead of using an
1264  ///   `Interpolative coding` library?
1265  ///
1266  /// # Idea
1267  /// * The basic idea consists in...
1268  #[allow(clippy::many_single_char_names)]
1269  pub fn compress_lossy(&self) -> CompressedMOC {
1270    let n = self.entries.len();
1271    let dm = self.depth_max;
1272    let mut b = CompressedMOCBuilder::new(dm, 4 + 3 * n);
1273    if n == 0 {
1274      // Special case of empty MOC
1275      if dm == 0 {
1276        for _ in 0..12 {
1277          b.push_leaf_empty();
1278        }
1279      } else {
1280        for _ in 0..12 {
1281          b.push_node_empty();
1282        }
1283      }
1284      return b.to_compressed_moc();
1285    } else if dm == 0 {
1286      // Special case of other MOC at depth max = 0
1287      let (curr_d, _) = self.get_depth_icell(self.entries[0]);
1288      assert_eq!(curr_d, 0);
1289      let mut h = 0_u64;
1290      for (_, curr_h) in self.entries.iter().map(|e| self.get_depth_icell(*e)) {
1291        for _ in h..curr_h {
1292          b.push_leaf_empty();
1293        }
1294        b.push_leaf_full();
1295        h = curr_h + 1;
1296      }
1297      for _ in h..12 {
1298        b.push_leaf_empty();
1299      }
1300      return b.to_compressed_moc();
1301    }
1302    // Let's start serious things
1303    let mut d;
1304    let mut h = 0;
1305    let (curr_d, curr_h) = self.get_depth_icell(self.entries[0]);
1306    // go down to curr hash
1307    for dd in (0..=curr_d).rev() {
1308      let target_h = curr_h >> (dd << 1);
1309      if curr_d == dm && dd == 0 {
1310        for _ in h..target_h {
1311          b.push_leaf_empty();
1312        }
1313        b.push_leaf_full();
1314      } else {
1315        for _ in h..target_h {
1316          b.push_node_empty();
1317        }
1318        if dd == 0 {
1319          b.push_node_full()
1320        } else {
1321          b.push_node_partial()
1322        };
1323      }
1324      h = target_h << 2;
1325    }
1326    d = curr_d;
1327    h = curr_h;
1328    // middle, go up and down
1329    let mut i = 1_usize;
1330    while i < n {
1331      let (curr_d, curr_h) = self.get_depth_icell(self.entries[i]);
1332      // go up (if needed)!
1333      let target_h = if d > curr_d {
1334        // case previous hash deeper that current hash
1335        let dd = d - curr_d;
1336        curr_h << (dd << 1)
1337      } else {
1338        // case current hash deeper that previous hash, need to go up?
1339        let dd = curr_d - d;
1340        curr_h >> (dd << 1)
1341      };
1342      let mut dd = ((63 - (h ^ target_h).leading_zeros()) >> 1) as u8;
1343      if dd > d {
1344        dd = d;
1345      }
1346      // - go up to common depth
1347      if dd > 0 && d == dm {
1348        for _ in h & 3..3 {
1349          // <=> (h + 1) & 3 < 4
1350          b.push_leaf_empty();
1351        }
1352        h >>= 2;
1353        dd -= 1;
1354        d -= 1;
1355      }
1356      for _ in 0..dd {
1357        for _ in h & 3..3 {
1358          // <=> (h + 1) & 3 < 4
1359          b.push_node_empty();
1360        }
1361        h >>= 2;
1362      }
1363      d -= dd;
1364      h += 1;
1365      // - go down
1366      let dd = curr_d - d;
1367      for rdd in (0..=dd).rev() {
1368        let target_h = curr_h >> (rdd << 1);
1369        if curr_d == dm && rdd == 0 {
1370          for _ in h..target_h {
1371            b.push_leaf_empty();
1372          }
1373          b.push_leaf_full();
1374        } else {
1375          for _ in h..target_h {
1376            b.push_node_empty();
1377          }
1378          if rdd == 0 {
1379            b.push_node_full()
1380          } else {
1381            b.push_node_partial()
1382          };
1383        }
1384        h = target_h << 2;
1385      }
1386      d = curr_d;
1387      h = curr_h;
1388      i += 1;
1389    }
1390    // - go up to depth 0
1391    if d == dm {
1392      for _ in h & 3..3 {
1393        b.push_leaf_empty();
1394      }
1395      h >>= 2;
1396      d -= 1;
1397    }
1398    for _ in 0..d {
1399      for _ in h & 3..3 {
1400        b.push_node_empty();
1401      }
1402      h >>= 2;
1403    }
1404    // - complete till base cell 11
1405    if dm == 0 {
1406      for _ in h + 1..12 {
1407        b.push_leaf_empty();
1408      }
1409    } else {
1410      for _ in h + 1..12 {
1411        b.push_node_empty();
1412      }
1413    }
1414    b.to_compressed_moc()
1415  }
1416
1417  /// FITS serialization aiming at been the most efficient (in space and read speed) and made first
1418  /// of all for internal usage.
1419  pub fn to_fits<W: Write>(&self, mut writer: W) -> Result<(), IoError> {
1420    self
1421      .write_fits_header(&mut writer)
1422      .and_then(|()| self.write_fits_data(&mut writer))
1423      .and_then(|n_bytes_written| write_final_padding_ioerr(writer, n_bytes_written))
1424  }
1425
1426  fn write_fits_header<W: Write>(&self, mut writer: W) -> Result<(), IoError> {
1427    // Prepare the header
1428    let mut header_block = [b' '; 2880];
1429    let mut it = header_block.chunks_mut(80);
1430    it.next().unwrap()[0..30].copy_from_slice(b"SIMPLE  =                    T"); // Conform to FITS standard
1431    it.next().unwrap()[0..30].copy_from_slice(b"BITPIX  =                    8"); // We work on bytes, i.e. 8 bits
1432    it.next().unwrap()[0..30].copy_from_slice(b"NAXIS   =                    2"); // Number of data axis
1433    if self.depth_max <= 13 {
1434      // Number of bytes in a u32 value
1435      it.next().unwrap()[0..30].copy_from_slice(b"NAXIS1  =                    4");
1436    } else {
1437      // Number of bytes in a u64 value
1438      it.next().unwrap()[0..30].copy_from_slice(b"NAXIS1  =                    8");
1439    }
1440    write_uint_mandatory_keyword_record(it.next().unwrap(), b"NAXIS2  ", self.entries.len() as u64); // Len of data axis 2 = n_hash(depth)+1
1441    it.next().unwrap()[0..30].copy_from_slice(b"EXTEND  =                    F"); // No extension allowed
1442    it.next().unwrap()[0..16].copy_from_slice(b"PRODTYPE= 'BMOC'"); // Product type
1443    write_uint_mandatory_keyword_record(it.next().unwrap(), b"HPXORDER", self.depth_max as u64);
1444    write_str_keyword_record(it.next().unwrap(), b"VAL_NAME", "FLAGGED_ZUNIQ");
1445    it.next().unwrap()[0..20].copy_from_slice(b"DTENDIAN= 'LITTLE  '");
1446    write_keyword_record(
1447      it.next().unwrap(),
1448      b"DATE    ",
1449      format!(
1450        "'{}'",
1451        Utc::now().to_rfc3339_opts(SecondsFormat::Secs, true)
1452      )
1453      .as_str(),
1454    );
1455    write_keyword_record(
1456      it.next().unwrap(),
1457      b"CREATOR ",
1458      format!(
1459        "'Rust crate {} {}'",
1460        env!("CARGO_PKG_NAME"),
1461        env!("CARGO_PKG_VERSION")
1462      )
1463      .as_str(),
1464    );
1465    it.next().unwrap()[0..3].copy_from_slice(b"END");
1466    // Do write the header
1467    writer.write_all(&header_block[..])
1468  }
1469  /// Returns the number of bytes written.
1470  fn write_fits_data<W: Write>(&self, mut writer: W) -> Result<usize, IoError> {
1471    let elem_byte_size = if self.depth_max <= 13 {
1472      for fzuniq in &self.entries {
1473        writer.write_all(((*fzuniq) as u32).to_le_bytes().as_ref())?;
1474      }
1475      size_of::<u32>()
1476    } else {
1477      for fzuniq in &self.entries {
1478        writer.write_all(fzuniq.to_le_bytes().as_ref())?;
1479      }
1480      size_of::<u64>()
1481    };
1482    Ok(self.entries.len() * elem_byte_size)
1483  }
1484
1485  pub fn from_fits_file<P: AsRef<Path>>(path: P) -> Result<Self, FitsError> {
1486    File::open(path)
1487      .map_err(FitsError::Io)
1488      .and_then(|file| Self::from_fits(BufReader::new(file)))
1489  }
1490
1491  pub fn from_fits<R: BufRead>(mut reader: R) -> Result<Self, FitsError> {
1492    let mut raw_header = [b' '; 2880];
1493    // Parse header
1494    let mut raw_cards_it = next_36_chunks_of_80_bytes(&mut reader, &mut raw_header)?;
1495    // Parse mandatory, well ordered keywords
1496    let (zuniq_n_bytes, n_rows) =
1497      check_keyword_and_val(raw_cards_it.next().unwrap(), b"SIMPLE ", b"T")
1498        .and_then(|()| check_keyword_and_val(raw_cards_it.next().unwrap(), b"BITPIX ", b"8"))
1499        .and_then(|()| check_keyword_and_val(raw_cards_it.next().unwrap(), b"NAXIS  ", b"2"))
1500        .and_then(|()| {
1501          check_keyword_and_parse_uint_val::<u64>(raw_cards_it.next().unwrap(), b"NAXIS1  ")
1502        })
1503        .and_then(|n_bytes| {
1504          check_keyword_and_parse_uint_val::<u64>(raw_cards_it.next().unwrap(), b"NAXIS2  ")
1505            .map(move |n_rows| (n_bytes, n_rows))
1506        })?;
1507    // Parse other keywords
1508    let mut end_found = false;
1509    let mut prodtype_found = false;
1510    let mut dtendian_found = false;
1511    let mut hpxoder: Option<u8> = None;
1512    let mut date: Option<SystemTime> = None;
1513    for kw_record in &mut raw_cards_it {
1514      match &kw_record[0..8] {
1515        b"EXTEND  " => check_keyword_and_val(kw_record, b"EXTEND  ", b"F"),
1516        b"PRODTYPE" => {
1517          check_keyword_and_str_val(kw_record, b"PRODTYPE", b"BMOC").map(|()| prodtype_found = true)
1518        }
1519        b"HPXORDER" => parse_uint_val::<u8>(kw_record).map(|v| hpxoder = Some(v)),
1520        b"DTENDIAN" => check_keyword_and_str_val(kw_record, b"DTENDIAN", b"LITTLE")
1521          .map(|()| dtendian_found = true),
1522        b"DATE    " => get_str_val_no_quote(kw_record).map(|v| {
1523          date = unsafe { str::from_utf8_unchecked(v) }
1524            .parse::<DateTime<Utc>>()
1525            .ok()
1526            .map(|dt| dt.into())
1527        }),
1528        b"CREATOR " => continue,
1529        b"END     " => {
1530          end_found = true;
1531          break;
1532        }
1533        _ => {
1534          debug!("Ignored FITS card: {}", unsafe {
1535            str::from_utf8_unchecked(kw_record)
1536          });
1537          continue;
1538        }
1539      }?;
1540    }
1541    // Check keywords
1542    if !end_found {
1543      return Err(FitsError::new_custom(String::from(
1544        "'END' keyword not found in the first 36 primary header cards.",
1545      )));
1546    }
1547    if !(prodtype_found & dtendian_found) {
1548      return Err(FitsError::new_custom(String::from(
1549        "One of the BMOC mandatory cards is missing in the FITS header!",
1550      )));
1551    }
1552    let depth = hpxoder.ok_or_else(|| {
1553      FitsError::new_custom(String::from(
1554        "'HPXORDER' keyword not found in the primary header cards.",
1555      ))
1556    })?;
1557    match zuniq_n_bytes {
1558      4 => (0..n_rows)
1559        .into_iter()
1560        .map(|_| reader.read_u32::<LittleEndian>().map(|v| v as u64))
1561        .collect::<Result<Vec<u64>, IoError>>()
1562        .map_err(FitsError::Io),
1563      8 => {
1564        // We could have mmapped the data to handle large BMOCs... let's do it another day
1565        // (or make an iterator and perform operation from iterators, like in MOCs...).
1566        (0..n_rows)
1567          .into_iter()
1568          .map(|_| reader.read_u64::<LittleEndian>())
1569          .collect::<Result<Vec<u64>, IoError>>()
1570          .map_err(FitsError::Io)
1571      }
1572      _ => Err(FitsError::new_custom(format!(
1573        "Wrong 'NAXIS1'. Expected: 4 or 8. Actual: {}",
1574        zuniq_n_bytes
1575      ))),
1576    }
1577    .map(|entries| BMOC::create_unsafe(depth, entries.into_boxed_slice()))
1578  }
1579
1580  /// FITS serialization aiming at been more inter-operable than its alternative.
1581  /// # Remark about generalization
1582  /// * could be the same as a ZUNIQ MOC with a extra boolean column
1583  /// * MOM could be the same with either:
1584  ///     + any primitive type column
1585  ///     + a column of pointer pointing to the FITS HEAP for variable size content (e.g CSV, gzipped CSV, VOTable, ...)
1586  /// * Very similar to EXPLICIT skymaps by at various orders?!!
1587  pub fn to_bintable_fits<W: Write>(&self, mut writer: W) -> Result<(), IoError> {
1588    self
1589      .write_bintable_fits_header(&mut writer)
1590      .and_then(|()| self.write_bintable_fits_data(&mut writer))
1591      .and_then(|n_bytes_written| write_final_padding_ioerr(writer, n_bytes_written))
1592  }
1593  fn write_bintable_fits_header<W: Write>(&self, mut writer: W) -> Result<(), IoError> {
1594    write_primary_hdu_ioerr(&mut writer)?;
1595    let mut header_block = [b' '; 2880];
1596    let mut it = header_block.chunks_mut(80);
1597    // Write BINTABLE specific keywords in the buffer
1598    it.next().unwrap()[0..20].copy_from_slice(b"XTENSION= 'BINTABLE'");
1599    it.next().unwrap()[0..30].copy_from_slice(b"BITPIX  =                    8");
1600    it.next().unwrap()[0..30].copy_from_slice(b"NAXIS   =                    2");
1601    it.next().unwrap()[0..30].copy_from_slice(b"NAXIS1  =                    9"); // 8 bytes for u64 + 1 byte for boolean
1602    write_uint_mandatory_keyword_record(it.next().unwrap(), b"NAXIS2  ", self.entries.len() as u64);
1603    it.next().unwrap()[0..30].copy_from_slice(b"PCOUNT  =                    0");
1604    it.next().unwrap()[0..30].copy_from_slice(b"GCOUNT  =                    1");
1605    it.next().unwrap()[0..30].copy_from_slice(b"TFIELDS =                    2");
1606    it.next().unwrap()[0..20].copy_from_slice(b"TTYPE1  = 'ZUNIQ   '");
1607    it.next().unwrap()[0..20].copy_from_slice(b"TFORM1  = 'K       '");
1608    it.next().unwrap()[0..25].copy_from_slice(b"TTYPE2  = 'FULLY_COVERED'");
1609    it.next().unwrap()[0..20].copy_from_slice(b"TFORM2  = 'L       '");
1610    it.next().unwrap()[0..16].copy_from_slice(b"PRODTYPE= 'BMOC'");
1611    it.next().unwrap()[0..20].copy_from_slice(b"COORDSYS= 'C       '");
1612    it.next().unwrap()[0..20].copy_from_slice(b"EXTNAME = 'xtension'");
1613    write_uint_mandatory_keyword_record(it.next().unwrap(), b"MAXORDER", self.depth_max as u64);
1614    write_keyword_record(
1615      it.next().unwrap(),
1616      b"DATE    ",
1617      format!(
1618        "'{}'",
1619        Utc::now()
1620          .to_rfc3339_opts(SecondsFormat::Secs, true)
1621          .trim_end_matches('Z')
1622      )
1623      .as_str(),
1624    );
1625    write_keyword_record(
1626      it.next().unwrap(),
1627      b"CREATOR ",
1628      format!(
1629        "'Rust crate {} {}'",
1630        env!("CARGO_PKG_NAME"),
1631        env!("CARGO_PKG_VERSION")
1632      )
1633      .as_str(),
1634    );
1635    it.next().unwrap()[0..3].copy_from_slice(b"END");
1636    // Do write the header
1637    writer.write_all(&header_block[..])
1638  }
1639  /// Returns the number of bytes written.
1640  fn write_bintable_fits_data<W: Write>(&self, mut writer: W) -> Result<usize, IoError> {
1641    let mut n = 0_usize;
1642    for fzuniq in self.entries.iter().cloned() {
1643      let is_full = if (fzuniq as u8 & 1_u8) == 1_u8 {
1644        b'T'
1645      } else {
1646        b'F'
1647      };
1648      let zuniq = fzuniq >> 1;
1649      writer
1650        .write_all(zuniq.to_be_bytes().as_ref())
1651        .and_then(|()| writer.write_u8(is_full))?;
1652      n += 1;
1653    }
1654    Ok(n * (size_of::<u64>() + size_of::<u8>()))
1655  }
1656
1657  pub fn to_csv<W: Write>(&self, mut writer: W) -> Result<(), IoError> {
1658    write!(
1659      &mut writer,
1660      "# BMOC order max: {}; File sorted by ZUNIQ(order, ipix).\n",
1661      self.depth_max
1662    )?;
1663    write!(&mut writer, "order,ipix,is_fully_covered_flag\n")?;
1664    for fzuniq in self.entries.iter().cloned() {
1665      let is_full = fzuniq as u8 & 1_u8;
1666      let zuniq = fzuniq >> 1; // Remove flag bit
1667      let n_trailing_zero = zuniq.trailing_zeros(); // must be even
1668      let hash = zuniq >> (1 | n_trailing_zero); // remove trailing zero plus sentinel bit
1669      let depth = self.depth_max - (n_trailing_zero as u8 >> 1);
1670      write!(&mut writer, "{},{},{}\n", depth, hash, is_full)?;
1671    }
1672    Ok(())
1673  }
1674
1675  // to_json
1676  // to_ascii => pack by depth and Full/Partial ?
1677}
1678
1679#[inline]
1680fn consume_while_overlapped(low_resolution: &Cell, iter: &mut BMOCIter) -> Option<Cell> {
1681  let mut cell = iter.next();
1682  while {
1683    match &cell {
1684      Some(c) => is_in(low_resolution, c),
1685      None => false,
1686    }
1687  } {
1688    cell = iter.next();
1689  }
1690  cell
1691}
1692
1693/// Returns boolean:
1694/// - false = returned cell do not overlap any more
1695/// - true =  returned cell overlap and its flag is 'full'
1696#[inline]
1697fn consume_while_overlapped_and_partial(
1698  low_resolution: &Cell,
1699  iter: &mut BMOCIter,
1700  res_is_overlapped: &mut bool,
1701) -> Option<Cell> {
1702  let mut cell = iter.next();
1703  while {
1704    match &cell {
1705      Some(c) => {
1706        if is_in(low_resolution, c) {
1707          if c.is_full {
1708            *res_is_overlapped = true;
1709            false
1710          } else {
1711            true
1712          }
1713        } else {
1714          false
1715        }
1716      }
1717      None => false,
1718    }
1719  } {
1720    cell = iter.next();
1721  }
1722  cell
1723  /*let mut cell = iter.next();
1724  while {
1725    match &cell {
1726      Some(c) => is_in(low_res_depth, low_res_hash,  c.depth, c.hash),
1727      None => false,
1728    }
1729  } {
1730    if cell.is_full {
1731      *res_is_overlapped = true;
1732      return cell;
1733    }
1734    cell = iter.next();
1735  }
1736  *res_is_overlapped = false;
1737  cell*/
1738}
1739
1740#[inline]
1741fn dd_4_go_up(d: u8, h: u64, next_d: u8, next_h: u64) -> u8 {
1742  // debug_assert!(d != next_d || h != next_h);
1743  let target_h_at_d = if next_d < d {
1744    // previous hash deeper than current hash => need to go up
1745    next_h << ((d - next_d) << 1)
1746  } else {
1747    // current hash deeper then (or equal to) previous hash => need to go up only if current hash
1748    next_h >> ((next_d - d) << 1)
1749  };
1750  // - look at the difference to see if we have to go up to add lower level cells
1751  // We look at the depth of the deeper common cell (i.e. all most significant bits are the same)
1752  // With XOR (^), we only set to 1 the bits which are set to 1 in a value and 0 in the other.
1753  // If number of leading = 64 => the two cell are identical, WRONG :/
1754  // If number of leading zero = 63 or 62 => are in the same cell => dd = 0
1755  // If number of leading zero = 60 or 61 => dd = 1
1756  // We just have to add .min(d) since base cells are coded on 4 bits (not 2)
1757  let xor = h ^ target_h_at_d;
1758  if xor != 0 {
1759    ((63_u8 - (xor.leading_zeros() as u8)) >> 1).min(d)
1760  } else {
1761    0
1762  }
1763}
1764
1765/// Returns `true` if the given high resolution cell is in the low resolution cell
1766#[inline]
1767fn is_in(low_resolution: &Cell, high_resolution: &Cell) -> bool {
1768  low_resolution.depth <= high_resolution.depth
1769    && low_resolution.hash
1770      == (high_resolution.hash >> ((high_resolution.depth - low_resolution.depth) << 1))
1771}
1772/*
1773fn is_in(low_res_depth: u8, low_res_hash: u64, high_res_depth: u8, high_res_hash: u64) -> bool {
1774  low_res_depth < high_res_depth
1775    && low_res_hash == (high_res_hash >> (high_res_depth - low_res_depth) << 1)
1776}*/
1777
1778#[inline]
1779fn rm_flag(raw_value: u64) -> u64 {
1780  raw_value >> 1
1781}
1782
1783#[inline]
1784fn is_partial(raw_value: u64) -> bool {
1785  (raw_value & 1_u64) == 0_u64
1786}
1787
1788#[inline]
1789fn is_not_first_cell_of_larger_cell(hash: u64) -> bool {
1790  (hash & 3_u64) != 0_u64
1791}
1792
1793#[inline]
1794fn get_depth(raw_value: u64, depth_max: u8) -> u8 {
1795  get_depth_no_flag(rm_flag(raw_value), depth_max)
1796}
1797
1798#[inline]
1799fn get_depth_no_flag(raw_value_no_flag: u64, depth_max: u8) -> u8 {
1800  depth_max - (raw_value_no_flag.trailing_zeros() >> 1) as u8
1801}
1802
1803#[inline]
1804fn get_hash_from_delta_depth(raw_value: u64, delta_depth: u8) -> u64 {
1805  raw_value >> (2 + (delta_depth << 1))
1806}
1807
1808pub struct BMOCIntoFlatIter {
1809  depth_max: u8,
1810  deep_size: usize,
1811  raw_val_iter: IntoIter<u64>,
1812  curr_val: Option<u64>,
1813  curr_val_max: u64,
1814  n_returned: usize,
1815}
1816
1817impl BMOCIntoFlatIter {
1818  fn new(depth_max: u8, deep_size: usize, raw_val_iter: IntoIter<u64>) -> BMOCIntoFlatIter {
1819    let mut flat_iter = BMOCIntoFlatIter {
1820      depth_max,
1821      deep_size,
1822      raw_val_iter,
1823      curr_val: None,
1824      curr_val_max: 0_u64,
1825      n_returned: 0_usize,
1826    };
1827    flat_iter.next_cell();
1828    flat_iter
1829  }
1830
1831  pub fn deep_size(&self) -> usize {
1832    self.deep_size
1833  }
1834
1835  pub fn depth(&self) -> u8 {
1836    self.depth_max
1837  }
1838
1839  fn next_cell(&mut self) -> Option<u64> {
1840    match self.raw_val_iter.next() {
1841      None => self.curr_val.take(),
1842      Some(raw_value) => {
1843        // Remove the flag bit, then divide by 2 (2 bits per level)
1844        let delta_depth = ((raw_value >> 1).trailing_zeros() >> 1) as u8;
1845        let twice_delta_depth = delta_depth << 1;
1846        // Remove 2 bits per depth difference + 1 sentinel bit + 1 flag bit
1847        let hash = raw_value >> (2 + twice_delta_depth);
1848        let val = hash << twice_delta_depth;
1849        self.curr_val_max = val | ((1_u64 << twice_delta_depth) - 1_u64);
1850        self.curr_val.replace(val)
1851      }
1852    }
1853  }
1854}
1855
1856impl Iterator for BMOCIntoFlatIter {
1857  type Item = u64;
1858
1859  fn next(&mut self) -> Option<u64> {
1860    if let Some(val) = self.curr_val {
1861      self.n_returned += 1;
1862      if val < self.curr_val_max {
1863        self.curr_val.replace(val + 1)
1864      } else {
1865        self.next_cell()
1866      }
1867    } else {
1868      None
1869    }
1870  }
1871
1872  fn size_hint(&self) -> (usize, Option<usize>) {
1873    let n = self.deep_size - self.n_returned;
1874    (n, Some(n))
1875  }
1876}
1877
1878pub struct BMOCFlatIter<'a> {
1879  depth_max: u8,
1880  deep_size: usize,
1881  raw_val_iter: Iter<'a, u64>,
1882  curr_val: Option<u64>,
1883  curr_val_max: u64,
1884  n_returned: usize,
1885}
1886
1887impl<'a> BMOCFlatIter<'a> {
1888  fn new(depth_max: u8, deep_size: usize, raw_val_iter: Iter<'a, u64>) -> BMOCFlatIter<'a> {
1889    let mut flat_iter = BMOCFlatIter {
1890      depth_max,
1891      deep_size,
1892      raw_val_iter,
1893      curr_val: None,
1894      curr_val_max: 0_u64,
1895      n_returned: 0_usize,
1896    };
1897    flat_iter.next_cell();
1898    flat_iter
1899  }
1900
1901  pub fn deep_size(&self) -> usize {
1902    self.deep_size
1903  }
1904
1905  pub fn depth(&self) -> u8 {
1906    self.depth_max
1907  }
1908
1909  fn next_cell(&mut self) -> Option<u64> {
1910    match self.raw_val_iter.next() {
1911      None => self.curr_val.take(),
1912      Some(&raw_value) => {
1913        // Remove the flag bit, then divide by 2 (2 bits per level)
1914        let delta_depth = ((raw_value >> 1).trailing_zeros() >> 1) as u8;
1915        let twice_delta_depth = delta_depth << 1;
1916        // Remove 2 bits per depth difference + 1 sentinel bit + 1 flag bit
1917        let hash = raw_value >> (2 + twice_delta_depth);
1918        let val = hash << twice_delta_depth;
1919        self.curr_val_max = val | ((1_u64 << twice_delta_depth) - 1_u64);
1920        self.curr_val.replace(val)
1921        /*// Remove the flag bit, then divide by 2 (2 bits per level)
1922        let twice_delta_depth = (raw_value >> 1).trailing_zeros() as u8;
1923        // Remove 2 bits per depth difference + 1 sentinel bit + 1 flag bit
1924        let mask = 0xFFFFFFFFFFFFFFFC_u64 << twice_delta_depth;
1925        let min = raw_value & mask;
1926        self.curr_val_max = min | ((!mask) >> 1);
1927        self.curr_val.replace(min)*/
1928      }
1929    }
1930  }
1931}
1932
1933impl Iterator for BMOCFlatIter<'_> {
1934  type Item = u64;
1935
1936  fn next(&mut self) -> Option<u64> {
1937    if let Some(val) = self.curr_val {
1938      self.n_returned += 1;
1939      if val < self.curr_val_max {
1940        self.curr_val.replace(val + 1)
1941      } else {
1942        self.next_cell()
1943      }
1944    } else {
1945      None
1946    }
1947  }
1948
1949  fn size_hint(&self) -> (usize, Option<usize>) {
1950    let n = self.deep_size - self.n_returned;
1951    (n, Some(n))
1952  }
1953}
1954
1955pub struct BMOCFlatIterCell<'a> {
1956  depth_max: u8,
1957  deep_size: usize,
1958  raw_val_iter: Iter<'a, u64>,
1959
1960  //curr_raw_val: u64,
1961  //curr_flag: bool,
1962  curr_val: Option<Cell>,
1963  curr_val_max: u64,
1964
1965  n_returned: usize,
1966}
1967
1968impl<'a> BMOCFlatIterCell<'a> {
1969  fn new(depth_max: u8, deep_size: usize, raw_val_iter: Iter<'a, u64>) -> BMOCFlatIterCell<'a> {
1970    let mut flat_iter = BMOCFlatIterCell {
1971      depth_max,
1972      deep_size,
1973      raw_val_iter,
1974      curr_val: None,
1975      curr_val_max: 0_u64,
1976      n_returned: 0_usize,
1977    };
1978    flat_iter.next_cell();
1979    flat_iter
1980  }
1981
1982  pub fn deep_size(&self) -> usize {
1983    self.deep_size
1984  }
1985
1986  pub fn depth(&self) -> u8 {
1987    self.depth_max
1988  }
1989
1990  fn next_cell(&mut self) -> Option<Cell> {
1991    match self.raw_val_iter.next() {
1992      None => self.curr_val.take(),
1993      Some(&raw_value) => {
1994        // Remove the flag bit, then divide by 2 (2 bits per level)
1995        let delta_depth = ((raw_value >> 1).trailing_zeros() >> 1) as u8;
1996        let twice_delta_depth = delta_depth << 1;
1997        // Remove 2 bits per depth difference + 1 sentinel bit + 1 flag bit
1998        let hash = raw_value >> (2 + twice_delta_depth);
1999        let val = hash << twice_delta_depth;
2000        self.curr_val_max = val | ((1_u64 << twice_delta_depth) - 1_u64);
2001        self.curr_val.replace(Cell {
2002          raw_value,
2003          depth: self.depth_max,
2004          hash: val,
2005          is_full: (raw_value & 1_u64) == 1_u64,
2006        })
2007      }
2008    }
2009  }
2010}
2011
2012impl Iterator for BMOCFlatIterCell<'_> {
2013  type Item = Cell;
2014
2015  fn next(&mut self) -> Option<Cell> {
2016    if let Some(cell) = &self.curr_val {
2017      self.n_returned += 1;
2018      if cell.hash < self.curr_val_max {
2019        let new_cell = Cell {
2020          raw_value: cell.raw_value,
2021          depth: self.depth_max,
2022          hash: cell.hash + 1,
2023          is_full: cell.is_full,
2024        };
2025        self.curr_val.replace(new_cell)
2026      } else {
2027        self.next_cell()
2028      }
2029    } else {
2030      None
2031    }
2032  }
2033
2034  fn size_hint(&self) -> (usize, Option<usize>) {
2035    let n = self.deep_size - self.n_returned;
2036    (n, Some(n))
2037  }
2038}
2039
2040pub struct BMOCIter<'a> {
2041  depth_max: u8,
2042  iter: Iter<'a, u64>,
2043}
2044
2045impl Iterator for BMOCIter<'_> {
2046  type Item = Cell;
2047
2048  fn next(&mut self) -> Option<Cell> {
2049    match self.iter.next() {
2050      None => None,
2051      Some(&raw_value) => Some(Cell::new(raw_value, self.depth_max)),
2052    }
2053  }
2054
2055  fn size_hint(&self) -> (usize, Option<usize>) {
2056    self.iter.size_hint()
2057  }
2058}
2059
2060impl<'a> IntoIterator for &'a BMOC {
2061  type Item = Cell;
2062  type IntoIter = BMOCIter<'a>;
2063
2064  fn into_iter(self) -> Self::IntoIter {
2065    BMOCIter {
2066      depth_max: self.depth_max,
2067      iter: self.entries.iter(),
2068    }
2069  }
2070}
2071
2072/// Create a BMOC raw value coding the depth, the hash and a flag in a way such that
2073/// the natural ordering follow a z-order curve.
2074///
2075/// # Inputs
2076/// - `depth`: depth of the hash value
2077/// - `hash`: hash value
2078/// - `is_full`: must be `false` (not full) or `true` (full)
2079/// - `depth_max`: the depth of the BMOC (we can use 29 for a unique raw value, but it will work
2080///   only with languages supporting unsigned 64 bit integers)
2081///
2082/// # Outputs
2083/// - the value coded like this:
2084///   - BBBBxx...xxS00...00F if depth < depth_max
2085///   - BBBBxx...xxxx...xxSF if depth = depht_max
2086///   - with in bith cases:
2087///     -  B: the 4 bits coding the base hash [0- 11]
2088///     - xx: the 2 bits of level x
2089///     -  S: the sentinel bit coding the depth
2090///     - 00: if (depth != depht_max) those bits are unused bits
2091///     -  F: the flag bit (0: partial, 1: full)
2092#[inline]
2093fn build_raw_value(depth: u8, hash: u64, is_full: bool, depth_max: u8) -> u64 {
2094  // Set the sentinel bit
2095  let mut hash = (hash << 1) | 1_u64;
2096  // Shift according to the depth and add space for the flag bit
2097  hash <<= 1 + ((depth_max - depth) << 1);
2098  // Set the flag bit if needed
2099  hash | (is_full as u64) // see https://doc.rust-lang.org/std/primitive.bool.html
2100}
2101
2102/// Fill with all cells from `start_hash` at `start_depth` to `start_hash_at_target_depth + 1`.
2103/// with `target_depth` = `start_depth - delta_depth`.
2104/// - `flag`: value of the is_full flag to be set in cells while going up
2105///
2106/// The output depth is the input depth minus delta_depth
2107/// The output hash value is the input hash at the output depth, plus one
2108fn go_up(
2109  start_depth: &mut u8,
2110  start_hash: &mut u64,
2111  delta_depth: u8,
2112  flag: bool,
2113  builder: &mut BMOCBuilderUnsafe,
2114) {
2115  // let output_depth = *start_depth - delta_depth;       // For debug only
2116  // let output_hash = (*start_hash >> (delta_depth << 1)) + 1; // For debug only
2117  for _ in 0_u8..delta_depth {
2118    let target_hash = *start_hash | 3_u64;
2119    for h in (*start_hash + 1)..=target_hash {
2120      builder.push(*start_depth, h, flag);
2121    }
2122    *start_hash >>= 2;
2123    *start_depth -= 1;
2124  }
2125  *start_hash += 1;
2126  // debug_assert_eq!(*start_depth, output_depth);
2127  // debug_assert_eq!(*start_hash, output_hash);
2128}
2129
2130fn go_down(
2131  start_depth: &mut u8,
2132  start_hash: &mut u64,
2133  target_depth: u8,
2134  target_hash: u64,
2135  flag: bool,
2136  builder: &mut BMOCBuilderUnsafe,
2137) {
2138  debug_assert!(target_depth >= *start_depth);
2139  let mut twice_dd = (target_depth - *start_depth) << 1;
2140  for d in *start_depth..=target_depth {
2141    //range(0, target_depth - start_depth).rev() {
2142    let target_h_at_d = target_hash >> twice_dd;
2143    for h in *start_hash..target_h_at_d {
2144      builder.push(d, h, flag);
2145    }
2146    if d != target_depth {
2147      *start_hash = target_h_at_d << 2;
2148      twice_dd -= 2;
2149    }
2150  }
2151  *start_depth = target_depth;
2152  *start_hash = target_hash;
2153}
2154
2155pub struct CompressedMOCBuilder {
2156  moc: Vec<u8>,
2157  depth_max: u8,
2158  ibyte: usize,
2159  ibit: u8,
2160}
2161
2162impl CompressedMOCBuilder {
2163  /// Capacity = number of bytes.
2164  fn new(depth_max: u8, capacity: usize) -> CompressedMOCBuilder {
2165    let mut moc = vec![0_u8; capacity + 1];
2166    moc[0] = depth_max;
2167    CompressedMOCBuilder {
2168      moc,
2169      depth_max,
2170      ibyte: 1,
2171      ibit: 0,
2172    }
2173  }
2174
2175  #[allow(clippy::wrong_self_convention)]
2176  fn to_compressed_moc(mut self) -> CompressedMOC {
2177    self.moc.resize(
2178      if self.ibit == 0 {
2179        self.ibyte
2180      } else {
2181        self.ibyte + 1
2182      },
2183      0,
2184    );
2185    CompressedMOC {
2186      moc: self.moc.into_boxed_slice(),
2187      depth_max: self.depth_max,
2188    }
2189  }
2190
2191  fn push_0(&mut self) {
2192    self.ibyte += (self.ibit == 7) as usize;
2193    self.ibit += 1;
2194    self.ibit &= 7;
2195  }
2196  fn push_1(&mut self) {
2197    self.moc[self.ibyte] |= 1_u8 << self.ibit;
2198    self.push_0();
2199  }
2200  fn push_node_empty(&mut self) {
2201    self.push_1();
2202    self.push_0();
2203  }
2204  fn push_node_full(&mut self) {
2205    self.push_1();
2206    self.push_1();
2207  }
2208  fn push_node_partial(&mut self) {
2209    self.push_0();
2210  }
2211  fn push_leaf_empty(&mut self) {
2212    self.push_0();
2213  }
2214  fn push_leaf_full(&mut self) {
2215    self.push_1();
2216  }
2217}
2218
2219pub struct CompressedMOCDecompHelper<'a> {
2220  moc: &'a [u8],
2221  ibyte: usize,
2222  ibit: u8,
2223}
2224
2225impl<'a> CompressedMOCDecompHelper<'a> {
2226  fn new(moc: &'a [u8]) -> CompressedMOCDecompHelper<'a> {
2227    CompressedMOCDecompHelper {
2228      moc,
2229      ibyte: 1,
2230      ibit: 0,
2231    }
2232  }
2233
2234  fn get(&mut self) -> bool {
2235    let r = self.moc[self.ibyte] & (1_u8 << self.ibit) != 0;
2236    self.ibyte += (self.ibit == 7) as usize;
2237    self.ibit += 1;
2238    self.ibit &= 7;
2239    r
2240  }
2241}
2242
2243/// First elements contains the maximum depth
2244pub struct CompressedMOC {
2245  moc: Box<[u8]>,
2246  depth_max: u8,
2247}
2248
2249impl CompressedMOC {
2250  pub fn depth(&self) -> u8 {
2251    self.depth_max
2252  }
2253
2254  pub fn byte_size(&self) -> usize {
2255    self.moc.len()
2256  }
2257
2258  pub fn to_b64(&self) -> String {
2259    STANDARD.encode(&self.moc)
2260  }
2261
2262  pub fn from_b64(b64_encoded: String) -> Result<CompressedMOC, DecodeError> {
2263    let decoded = STANDARD.decode(b64_encoded)?;
2264    let depth_max = decoded[0];
2265    Ok(CompressedMOC {
2266      moc: decoded.into_boxed_slice(),
2267      depth_max,
2268    })
2269  }
2270
2271  pub fn self_decompress(&self) -> BMOC {
2272    CompressedMOC::decompress(&self.moc)
2273  }
2274
2275  // TODO: create an iterator (to iterate on cells while decompressing)
2276  pub fn decompress(cmoc: &[u8]) -> BMOC {
2277    let depth_max = cmoc[0];
2278    let mut moc_builder = BMOCBuilderUnsafe::new(depth_max, 8 * (cmoc.len() - 1));
2279    let mut bits = CompressedMOCDecompHelper::new(cmoc);
2280    let mut depth = 0_u8;
2281    let mut hash = 0_u64;
2282    while depth != 0 || hash != 12 {
2283      if bits.get() {
2284        // bit = 1
2285        if depth == depth_max || bits.get() {
2286          moc_builder.push(depth, hash, true);
2287        }
2288        // go up if needed
2289        while hash & 3 == 3 && depth > 0 {
2290          hash >>= 2;
2291          depth -= 1;
2292        }
2293        // take next hash
2294        hash += 1;
2295      } else {
2296        // bit = 0
2297        if depth == depth_max {
2298          // go up if needed
2299          while hash & 3 == 3 && depth > 0 {
2300            hash >>= 2;
2301            depth -= 1;
2302          }
2303          // take next hash
2304          hash += 1;
2305        } else {
2306          debug_assert!(depth < depth_max);
2307          // go down of 1 level
2308          hash <<= 2;
2309          depth += 1;
2310        }
2311      }
2312    }
2313    moc_builder.to_bmoc()
2314  }
2315}
2316
2317#[cfg(test)]
2318mod tests {
2319  use super::*;
2320  use crate::nested::{cone_coverage_approx_custom, polygon_coverage};
2321
2322  fn build_compressed_moc_empty(depth: u8) -> CompressedMOC {
2323    let mut builder = BMOCBuilderFixedDepth::new(depth, true);
2324    builder.to_bmoc().unwrap().compress_lossy()
2325  }
2326
2327  fn build_compressed_moc_full(depth: u8) -> CompressedMOC {
2328    let mut builder = BMOCBuilderFixedDepth::new(depth, true);
2329    for icell in 0..12 * (1 << (depth << 1)) {
2330      builder.push(icell)
2331    }
2332    let bmoc = builder.to_bmoc().unwrap();
2333    eprintln!("Entries: {}", bmoc.entries.len());
2334    bmoc.compress_lossy()
2335  }
2336
2337  fn to_aladin_moc(bmoc: &BMOC) {
2338    print!("draw moc {}/", bmoc.get_depth_max());
2339    for cell in bmoc.flat_iter() {
2340      print!("{}, ", cell);
2341    }
2342  }
2343
2344  #[test]
2345  fn testok_compressed_moc_empty_d0() {
2346    let compressed = build_compressed_moc_empty(0);
2347    assert_eq!(compressed.byte_size(), 1 + 2);
2348    assert_eq!(compressed.moc, vec![0_u8, 0_u8, 0_u8].into_boxed_slice());
2349    let b64 = compressed.to_b64();
2350    assert_eq!(b64, "AAAA");
2351    assert_eq!(
2352      CompressedMOC::decompress(&compressed.moc)
2353        .compress_lossy()
2354        .to_b64(),
2355      b64
2356    );
2357  }
2358
2359  #[test]
2360  fn testok_compressed_moc_empty_d1() {
2361    let compressed = build_compressed_moc_empty(1);
2362    assert_eq!(compressed.byte_size(), 1 + 24 / 8);
2363    assert_eq!(
2364      compressed.moc,
2365      vec![1_u8, 85_u8, 85_u8, 85_u8].into_boxed_slice()
2366    );
2367    let b64 = compressed.to_b64();
2368    assert_eq!(b64, "AVVVVQ==");
2369    assert_eq!(
2370      CompressedMOC::decompress(&compressed.moc)
2371        .compress_lossy()
2372        .to_b64(),
2373      b64
2374    );
2375  }
2376
2377  #[test]
2378  fn testok_compressed_moc_full_d0() {
2379    let compressed = build_compressed_moc_full(0);
2380    assert_eq!(compressed.byte_size(), 1 + 2);
2381    assert_eq!(compressed.moc, vec![0_u8, 255_u8, 15_u8].into_boxed_slice());
2382    // eprintln!("{}", compressed.to_b64());
2383    let b64 = compressed.to_b64();
2384    assert_eq!(b64, "AP8P");
2385    assert_eq!(
2386      CompressedMOC::decompress(&compressed.moc)
2387        .compress_lossy()
2388        .to_b64(),
2389      b64
2390    );
2391  }
2392
2393  #[test]
2394  fn testok_compressed_moc_full_d1() {
2395    let compressed = build_compressed_moc_full(1);
2396    assert_eq!(compressed.byte_size(), 1 + 24 / 8);
2397    eprintln!("{:?}", compressed.moc);
2398    eprintln!("{}", compressed.to_b64());
2399    let b64 = compressed.to_b64();
2400    assert_eq!(b64, "Af///w==");
2401    assert_eq!(
2402      CompressedMOC::decompress(&compressed.moc)
2403        .compress_lossy()
2404        .to_b64(),
2405      b64
2406    );
2407  }
2408
2409  #[test]
2410  fn test_ok_allsky_and_empty_bmoc() {
2411    let bmoc_allsky = BMOC::new_allsky(18);
2412    assert_eq!(bmoc_allsky.entries.len(), 12);
2413    let bmoc_empty = BMOC::new_empty(18);
2414    assert_eq!(bmoc_empty.entries.len(), 0);
2415    assert_eq!(bmoc_allsky.not(), bmoc_empty);
2416    assert_eq!(bmoc_allsky, bmoc_empty.not());
2417  }
2418
2419  #[test]
2420  fn test_ok_bmoc_not_round_trip() {
2421    let mut poly_vertices_deg: [f64; 14] = [
2422      272.511081, -19.487278, 272.515300, -19.486595, 272.517029, -19.471442, 272.511714,
2423      -19.458837, 272.506430, -19.459001, 272.496401, -19.474322, 272.504821, -19.484924,
2424    ];
2425    for v in poly_vertices_deg.iter_mut() {
2426      *v = (*v).to_radians();
2427    }
2428    let vertices: Vec<(f64, f64)> = poly_vertices_deg
2429      .iter()
2430      .copied()
2431      .step_by(2)
2432      .zip(poly_vertices_deg.iter().copied().skip(1).step_by(2))
2433      .collect();
2434    let moc_org = polygon_coverage(14, vertices.as_slice(), true);
2435    let moc_not = moc_org.not();
2436    let moc_out = moc_not.not();
2437    println!("len: {}", moc_org.size());
2438    println!("len: {}", moc_not.size());
2439    println!("len: {}", moc_out.size());
2440    assert_eq!(moc_org, moc_out);
2441  }
2442
2443  #[test]
2444  fn test_ok_bmoc_union_and_not() {
2445    let mut poly1_vertices_deg: [f64; 14] = [
2446      272.511081, -19.487278, 272.515300, -19.486595, 272.517029, -19.471442, 272.511714,
2447      -19.458837, 272.506430, -19.459001, 272.496401, -19.474322, 272.504821, -19.484924,
2448    ];
2449    for v in poly1_vertices_deg.iter_mut() {
2450      *v = (*v).to_radians();
2451    }
2452    let mut poly2_vertices_deg: [f64; 8] = [
2453      272.630446, -19.234210, 272.637274, -19.248542, 272.638942, -19.231476, 272.630868,
2454      -19.226364,
2455    ];
2456    for v in poly2_vertices_deg.iter_mut() {
2457      *v = (*v).to_radians();
2458    }
2459    let v1: Vec<(f64, f64)> = poly1_vertices_deg
2460      .iter()
2461      .copied()
2462      .step_by(2)
2463      .zip(poly1_vertices_deg.iter().copied().skip(1).step_by(2))
2464      .collect();
2465    let v2: Vec<(f64, f64)> = poly2_vertices_deg
2466      .iter()
2467      .copied()
2468      .step_by(2)
2469      .zip(poly2_vertices_deg.iter().copied().skip(1).step_by(2))
2470      .collect();
2471    let moc1 = polygon_coverage(14, v1.as_slice(), true);
2472    let moc2 = polygon_coverage(14, v2.as_slice(), true);
2473    let not_moc1 = moc1.not();
2474    let not_moc2 = moc2.not();
2475    let union = moc1.or(&moc2);
2476    let not_inter = not_moc1.and(&not_moc2);
2477    let union_bis = not_inter.not();
2478    //to_aladin_moc(&moc1);
2479    //println!("\n");
2480    //to_aladin_moc(&moc2);
2481    //println!("\n");
2482    // to_aladin_moc(&union);
2483    //println!("\n");
2484    // to_aladin_moc(&union_bis);
2485    //println!("\n");
2486    assert_eq!(union, union_bis);
2487  }
2488
2489  #[test]
2490  fn test_ok_bmoc_xor_minus_coherency() {
2491    // No overlapping parts, so we do no test thoroughly XOR and MINUS!!
2492    let mut poly1_vertices_deg: [f64; 14] = [
2493      272.511081, -19.487278, 272.515300, -19.486595, 272.517029, -19.471442, 272.511714,
2494      -19.458837, 272.506430, -19.459001, 272.496401, -19.474322, 272.504821, -19.484924,
2495    ];
2496    for v in poly1_vertices_deg.iter_mut() {
2497      *v = (*v).to_radians();
2498    }
2499    let mut poly2_vertices_deg: [f64; 8] = [
2500      272.630446, -19.234210, 272.637274, -19.248542, 272.638942, -19.231476, 272.630868,
2501      -19.226364,
2502    ];
2503    for v in poly2_vertices_deg.iter_mut() {
2504      *v = (*v).to_radians();
2505    }
2506    let mut poly3_vertices_deg: [f64; 178] = [
2507      272.536719, -19.461249, 272.542612, -19.476380, 272.537389, -19.491509, 272.540192,
2508      -19.499823, 272.535455, -19.505218, 272.528024, -19.505216, 272.523437, -19.500298,
2509      272.514082, -19.503376, 272.502271, -19.500966, 272.488647, -19.490390, 272.481932,
2510      -19.490913, 272.476737, -19.486589, 272.487633, -19.455645, 272.500386, -19.444996,
2511      272.503003, -19.437557, 272.512303, -19.432436, 272.514132, -19.423973, 272.522103,
2512      -19.421523, 272.524511, -19.413250, 272.541021, -19.400024, 272.566264, -19.397500,
2513      272.564202, -19.389111, 272.569055, -19.383210, 272.588186, -19.386539, 272.593376,
2514      -19.381832, 272.596327, -19.370541, 272.624911, -19.358915, 272.629256, -19.347842,
2515      272.642277, -19.341020, 272.651322, -19.330424, 272.653174, -19.325079, 272.648903,
2516      -19.313708, 272.639616, -19.311098, 272.638128, -19.303083, 272.632705, -19.299839,
2517      272.627971, -19.289408, 272.628226, -19.276293, 272.633750, -19.270590, 272.615109,
2518      -19.241810, 272.614704, -19.221196, 272.618224, -19.215572, 272.630809, -19.209945,
2519      272.633540, -19.198681, 272.640711, -19.195292, 272.643028, -19.186751, 272.651477,
2520      -19.182729, 272.649821, -19.174859, 272.656782, -19.169272, 272.658933, -19.161883,
2521      272.678012, -19.159481, 272.689173, -19.176982, 272.689395, -19.183512, 272.678006,
2522      -19.204016, 272.671112, -19.206598, 272.664854, -19.203523, 272.662760, -19.211156,
2523      272.654435, -19.214434, 272.652969, -19.222085, 272.656724, -19.242136, 272.650071,
2524      -19.265092, 272.652868, -19.274296, 272.660871, -19.249462, 272.670041, -19.247807,
2525      272.675533, -19.254935, 272.673291, -19.273917, 272.668710, -19.279245, 272.671460,
2526      -19.287043, 272.667507, -19.293933, 272.669261, -19.300601, 272.663969, -19.307130,
2527      272.672626, -19.308954, 272.675225, -19.316490, 272.657188, -19.349105, 272.657638,
2528      -19.367455, 272.662447, -19.372035, 272.662232, -19.378566, 272.652479, -19.386871,
2529      272.645819, -19.387933, 272.642279, -19.398277, 272.629282, -19.402739, 272.621487,
2530      -19.398197, 272.611782, -19.405716, 272.603367, -19.404667, 272.586162, -19.422703,
2531      272.561792, -19.420008, 272.555815, -19.413012, 272.546500, -19.415611, 272.537427,
2532      -19.424213, 272.533081, -19.441402,
2533    ];
2534    for v in poly3_vertices_deg.iter_mut() {
2535      *v = (*v).to_radians();
2536    }
2537    let v1: Vec<(f64, f64)> = poly1_vertices_deg
2538      .iter()
2539      .copied()
2540      .step_by(2)
2541      .zip(poly1_vertices_deg.iter().copied().skip(1).step_by(2))
2542      .collect();
2543    let v2: Vec<(f64, f64)> = poly2_vertices_deg
2544      .iter()
2545      .copied()
2546      .step_by(2)
2547      .zip(poly2_vertices_deg.iter().copied().skip(1).step_by(2))
2548      .collect();
2549    let v3: Vec<(f64, f64)> = poly3_vertices_deg
2550      .iter()
2551      .copied()
2552      .step_by(2)
2553      .zip(poly3_vertices_deg.iter().copied().skip(1).step_by(2))
2554      .collect();
2555    let moc1 = polygon_coverage(18, v1.as_slice(), true);
2556    let moc2 = polygon_coverage(18, v2.as_slice(), true);
2557    let moc3 = polygon_coverage(18, v3.as_slice(), true);
2558
2559    let union12 = moc1.or(&moc2);
2560    let res1 = moc3.xor(&union12);
2561    let res2 = moc3.minus(&union12);
2562    let res3 = moc3.and(&union12.not());
2563
2564    assert_eq!(res1, res2);
2565    assert_eq!(res1, res3);
2566  }
2567
2568  #[test]
2569  fn test_ok_bmoc_not() {
2570    let lon = 13.158329_f64.to_radians();
2571    let lat = -72.80028_f64.to_radians();
2572    let radius = 5.64323_f64.to_radians();
2573    let depth = 6;
2574    let delta_depth = 5;
2575
2576    let bmoc = cone_coverage_approx_custom(depth, delta_depth, lon, lat, radius);
2577
2578    let not_bmoc = bmoc.not();
2579
2580    println!("BMOC");
2581    for (range, flag) in bmoc.to_flagged_ranges() {
2582      println!("flag: {}; range: {:?}", flag, range);
2583    }
2584    println!("NOT BMOC");
2585    for (range, flag) in not_bmoc.to_flagged_ranges() {
2586      println!("flag: {}; range: {:?}", flag, range);
2587    }
2588    // Asssert that the first range has the flag 'true'.
2589    assert!(not_bmoc.to_flagged_ranges().first().unwrap().1)
2590  }
2591}