krill/upgrades/
mod.rs

1//! Support Krill upgrades, e.g.:
2//! - Updating the format of commands or events
3//! - Export / Import data
4
5use std::collections::HashMap;
6use std::fmt;
7use std::str::FromStr;
8
9use clap::crate_version;
10use log::{debug, error, info, trace, warn};
11use rpki::{
12    ca::idexchange::{CaHandle, MyHandle},
13    repository::x509::Time,
14};
15use serde::{Deserialize, Serialize};
16use serde::de::DeserializeOwned;
17
18use crate::{
19    commons::{
20        error::KrillIoError,
21        eventsourcing::{
22            Aggregate, AggregateStore, AggregateStoreError,
23            Storable, StoredCommand, WalStoreError,
24            WithStorableDetails,
25        },
26        storage::{Key, KeyValueError, KeyValueStore, Scope, Segment},
27        version::KrillVersion,
28        KrillResult,
29    },
30    constants::{
31        ACTOR_DEF_KRILL, CASERVER_NS, CA_OBJECTS_NS, KEYS_NS,
32        PROPERTIES_NS, PUBSERVER_CONTENT_NS, PUBSERVER_NS, SIGNERS_NS,
33        STATUS_NS, TA_PROXY_SERVER_NS, TA_SIGNER_SERVER_NS, TASK_QUEUE_NS,
34    },
35    config::Config,
36    server::{
37        manager::KrillManager,
38        properties::PropertiesManager,
39    },
40    upgrades::pre_0_14_0::{
41        OldStoredCommand, OldStoredEffect, OldStoredEvent,
42    },
43};
44use crate::api::aspa::{
45    AspaDefinition, AspaDefinitionUpdates, CustomerAsn, ProviderAsn,
46};
47use crate::server::ca::upgrades as ca;
48use crate::server::pubd::upgrades as pubd;
49
50#[cfg(feature = "hsm")]
51use rpki::crypto::KeyIdentifier;
52
53#[cfg(feature = "hsm")]
54use crate::commons::crypto::SignerHandle;
55
56use self::pre_0_14_0::OldCommandKey;
57
58pub mod data_migration;
59
60pub mod pre_0_14_0;
61
62//------------ UpgradeResult -------------------------------------------------
63
64pub type UpgradeResult<T> = Result<T, UpgradeError>;
65
66//------------ CommandMigrationEffect ----------------------------------------
67
68pub enum CommandMigrationEffect<A: Aggregate> {
69    StoredCommand(StoredCommand<A>),
70    AspaObjectsUpdates(AspaMigrationConfigUpdates),
71    Nothing,
72}
73
74//------------ AspaMigrationConfigUpdates ------------------------------------
75
76pub struct AspaMigrationConfigUpdates {
77    pub ca: CaHandle,
78    pub added_or_updated: HashMap<CustomerAsn, Vec<ProviderAsn>>,
79    pub removed: Vec<CustomerAsn>,
80}
81
82//------------ AspaMigrationConfigs ------------------------------------------
83
84#[derive(Debug, Default)]
85pub struct AspaMigrationConfigs(HashMap<CaHandle, Vec<AspaDefinition>>);
86
87impl AspaMigrationConfigs {
88    pub fn is_empty(&self) -> bool {
89        self.0.is_empty()
90    }
91}
92
93impl IntoIterator for AspaMigrationConfigs {
94    type Item = (CaHandle, Vec<AspaDefinition>);
95    type IntoIter =
96        std::collections::hash_map::IntoIter<CaHandle, Vec<AspaDefinition>>;
97
98    fn into_iter(self) -> Self::IntoIter {
99        self.0.into_iter()
100    }
101}
102
103//------------ KrillUpgradeReport --------------------------------------------
104
105#[derive(Debug)]
106pub struct UpgradeReport {
107    aspa_migration_configs: AspaMigrationConfigs,
108    data_migration: bool,
109    versions: UpgradeVersions,
110}
111
112impl UpgradeReport {
113    pub fn new(
114        aspa_migration_configs: AspaMigrationConfigs,
115        data_migration: bool,
116        versions: UpgradeVersions,
117    ) -> Self {
118        UpgradeReport {
119            aspa_migration_configs,
120            data_migration,
121            versions,
122        }
123    }
124
125    pub fn into_aspa_configs(self) -> AspaMigrationConfigs {
126        self.aspa_migration_configs
127    }
128
129    pub fn data_migration(&self) -> bool {
130        self.data_migration
131    }
132
133    pub fn versions(&self) -> &UpgradeVersions {
134        &self.versions
135    }
136}
137
138//------------ KrillUpgradeVersions ------------------------------------------
139
140#[derive(Debug, Eq, PartialEq)]
141pub struct UpgradeVersions {
142    pub from: KrillVersion,
143    pub to: KrillVersion,
144}
145
146impl UpgradeVersions {
147    /// Returns a KrillUpgradeVersions if the krill code version is newer
148    /// than the provided current version.
149    pub fn for_current(
150        current: KrillVersion,
151    ) -> Result<Option<Self>, UpgradeError> {
152        let code_version = KrillVersion::code_version();
153        match code_version.cmp(&current) {
154            std::cmp::Ordering::Greater => Ok(Some(UpgradeVersions {
155                from: current,
156                to: code_version,
157            })),
158            std::cmp::Ordering::Equal => Ok(None),
159            std::cmp::Ordering::Less => {
160                Err(UpgradeError::CodeOlderThanData(code_version, current))
161            }
162        }
163    }
164
165    pub fn from(&self) -> &KrillVersion {
166        &self.from
167    }
168
169    pub fn to(&self) -> &KrillVersion {
170        &self.to
171    }
172}
173
174//------------ UpgradeError --------------------------------------------------
175
176#[derive(Debug)]
177#[allow(clippy::large_enum_variant)]
178pub enum UpgradeError {
179    AggregateStoreError(AggregateStoreError),
180    WalStoreError(WalStoreError),
181    KeyStoreError(KeyValueError),
182    IoError(KrillIoError),
183    Unrecognised(String),
184    CannotLoadAggregate(MyHandle),
185    IdExchange(String),
186    OldTaMigration,
187    CodeOlderThanData(KrillVersion, KrillVersion),
188    Custom(String),
189}
190
191impl fmt::Display for UpgradeError {
192    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
193        let cause = match &self {
194            UpgradeError::AggregateStoreError(e) => format!("Aggregate Error: {e}"),
195            UpgradeError::WalStoreError(e) => format!("Write-Ahead-Log Store Error: {e}"),
196            UpgradeError::KeyStoreError(e) => format!("Keystore Error: {e}"),
197            UpgradeError::IoError(e) => format!("I/O Error: {e}"),
198            UpgradeError::Unrecognised(s) => format!("Unrecognised: {s}"),
199            UpgradeError::CannotLoadAggregate(h) => format!("Cannot load: {h}"),
200            UpgradeError::IdExchange(s) => format!("Could not use exchanged id info: {s}"),
201            UpgradeError::OldTaMigration => "Your installation cannot be upgraded to Krill 0.13.0 or later because it includes a CA called \"ta\". These CAs were used for the preliminary Trust Anchor support needed by testbed and benchmark setups. They cannot be migrated to the production grade Trust Anchor support that was introduced in Krill 0.13.0. If you want to continue to use your existing installation we recommend that you downgrade to Krill 0.12.1 or earlier. If you want to operate a testbed using Krill 0.13.0 or later, then you can create a fresh testbed instead of migrating your existing testbed. If you believe that you should not have a CA called \"ta\" - i.e. it may have been left over from an abandoned testbed set up - then you can delete the \"ta\" directory under your krill data \"cas\" directory and restart Krill.".to_string(),
202            UpgradeError::CodeOlderThanData(code, data) => format!("Krill version {code} is older than data version {data}. You either need to upgrade krill, or restore the data from version {code}."),
203            UpgradeError::Custom(s) => s.clone(),
204        };
205
206        write!(f, "Upgrade preparation failed because of: {cause}")
207    }
208}
209impl UpgradeError {
210    pub fn custom(msg: impl fmt::Display) -> Self {
211        UpgradeError::Custom(msg.to_string())
212    }
213
214    pub fn unrecognised(msg: impl fmt::Display) -> Self {
215        UpgradeError::Unrecognised(msg.to_string())
216    }
217}
218
219impl From<AggregateStoreError> for UpgradeError {
220    fn from(e: AggregateStoreError) -> Self {
221        UpgradeError::AggregateStoreError(e)
222    }
223}
224
225impl From<WalStoreError> for UpgradeError {
226    fn from(e: WalStoreError) -> Self {
227        UpgradeError::WalStoreError(e)
228    }
229}
230
231impl From<KeyValueError> for UpgradeError {
232    fn from(e: KeyValueError) -> Self {
233        UpgradeError::KeyStoreError(e)
234    }
235}
236
237impl From<KrillIoError> for UpgradeError {
238    fn from(e: KrillIoError) -> Self {
239        UpgradeError::IoError(e)
240    }
241}
242
243impl From<crate::commons::error::Error> for UpgradeError {
244    fn from(e: crate::commons::error::Error) -> Self {
245        UpgradeError::Custom(e.to_string())
246    }
247}
248
249impl From<rpki::ca::idexchange::Error> for UpgradeError {
250    fn from(e: rpki::ca::idexchange::Error) -> Self {
251        UpgradeError::IdExchange(e.to_string())
252    }
253}
254
255impl std::error::Error for UpgradeError {}
256
257//------------ DataUpgradeInfo -----------------------------------------------
258#[derive(Clone, Debug, Default, Deserialize, Eq, PartialEq, Serialize)]
259pub struct DataUpgradeInfo {
260    // version of the source command
261    pub last_migrated_command: Option<u64>,
262
263    // Version of migrated aggregate. Note certain source commands may be
264    // dropped.
265    pub migration_version: u64,
266
267    pub aspa_configs: HashMap<CustomerAsn, Vec<ProviderAsn>>,
268}
269
270impl DataUpgradeInfo {
271    fn increment_last_migrated_command(&mut self) {
272        if let Some(last_command) = self.last_migrated_command {
273            self.last_migrated_command = Some(last_command + 1);
274        } else {
275            self.last_migrated_command = Some(0)
276        }
277    }
278
279    fn update_aspa_configs(&mut self, updates: AspaMigrationConfigUpdates) {
280        for removed in updates.removed {
281            self.aspa_configs.remove(&removed);
282        }
283        for (customer, providers) in updates.added_or_updated {
284            self.aspa_configs.insert(customer, providers);
285        }
286    }
287}
288
289#[derive(Clone, Copy, Debug)]
290pub enum UpgradeMode {
291    PrepareOnly,
292    PrepareToFinalise,
293}
294
295impl UpgradeMode {
296    pub fn is_prepare_only(&self) -> bool {
297        matches!(*self, UpgradeMode::PrepareOnly)
298    }
299
300    pub fn is_finalise(&self) -> bool {
301        matches!(*self, UpgradeMode::PrepareToFinalise)
302    }
303}
304
305//------------ UnconvertedEffect ---------------------------------------------
306
307pub enum UnconvertedEffect<T> {
308    Error { msg: String },
309    Success { events: Vec<T> },
310}
311
312impl<T> UnconvertedEffect<T> {
313    pub fn into_events(self) -> Option<Vec<T>> {
314        match self {
315            UnconvertedEffect::Error { .. } => None,
316            UnconvertedEffect::Success { events } => Some(events),
317        }
318    }
319}
320
321//------------ UpgradeStore --------------------------------------------------
322
323/// Implement this for automatic upgrades to key stores
324pub trait UpgradeAggregateStorePre0_14 {
325    type Aggregate: Aggregate;
326
327    type OldInitEvent: fmt::Display + Eq + PartialEq + Storable + 'static;
328    type OldEvent: fmt::Display + Eq + PartialEq + Storable + 'static;
329    type OldStorableDetails: WithStorableDetails;
330
331    //--- Mandatory functions to implement
332
333    fn store_name(&self) -> &str;
334
335    fn deployed_store(&self) -> &KeyValueStore;
336
337    fn preparation_key_value_store(&self) -> &KeyValueStore;
338
339    fn preparation_aggregate_store(&self)
340        -> &AggregateStore<Self::Aggregate>;
341
342    /// Implement this to convert the old init event to a new
343    /// StoredCommand for the init.
344    fn convert_init_event(
345        &self,
346        old_init: Self::OldInitEvent,
347        handle: MyHandle,
348        actor: String,
349        time: Time,
350    ) -> UpgradeResult<StoredCommand<Self::Aggregate>>;
351
352    /// Implement this to convert an old command and convert the
353    /// included old events.
354    ///
355    /// The version for the new command is given, as it might differ
356    /// from the old command sequence.
357    fn convert_old_command(
358        &self,
359        old_command: OldStoredCommand<Self::OldStorableDetails>,
360        old_effect: UnconvertedEffect<Self::OldEvent>,
361        version: u64,
362    ) -> UpgradeResult<CommandMigrationEffect<Self::Aggregate>>;
363
364    /// Override this to get a call when the migration of commands for
365    /// an aggregate is done.
366    fn post_command_migration(&self, handle: &MyHandle) -> UpgradeResult<()> {
367        trace!("default post migration hook called for '{handle}'");
368        Ok(())
369    }
370
371    /// Upgrades pre 0.14.x AggregateStore.
372    ///
373    /// Expects implementers of this trait to provide function for converting
374    /// old command/event/init types to the current types.
375    fn upgrade(
376        &self,
377        mode: UpgradeMode,
378    ) -> UpgradeResult<AspaMigrationConfigs> {
379        // check existing version, wipe it if there is an unfinished upgrade
380        // in progress for another Krill version.
381        self.preparation_store_prepare()?;
382
383        info!(
384            "Prepare upgrading {} to Krill version {}",
385            self.store_name(),
386            crate_version!(),
387        );
388
389        // Migrate the event sourced data for each scope and create new
390        // snapshots
391        for scope in self.deployed_store().scopes()? {
392            // We only need top-level scopes, not sub-scopes such as 'surplus'
393            // archive dirs
394            if scope.len() != 1 {
395                trace!("Skipping migration for sub-scope: {scope}");
396                continue;
397            }
398
399            // Skip a scope [`.locks`]. This is the old locks directory.
400            if let Some(segment) = scope.first_segment() {
401                if segment.as_str() == ".locks" {
402                    continue
403                }
404            }
405
406            // Getting the Handle should never fail, but if it does then we
407            // should bail out asap.
408            let handle =
409                MyHandle::from_str(&scope.to_string()).map_err(|_| {
410                    UpgradeError::Custom(format!(
411                        "Found invalid handle '{scope}'"
412                    ))
413                })?;
414
415            // Get the upgrade info to see where we got to.
416            // We may be continuing from an earlier migration, e.g. by
417            // krillup.
418
419            let mut data_upgrade_info = self.data_upgrade_info(&scope)?;
420
421            // Get the list of commands to prepare, starting with the
422            // last_command we got to (may be 0)
423            let old_cmd_keys = self.command_keys(
424                &scope,
425                data_upgrade_info.last_migrated_command.unwrap_or(0),
426            )?;
427
428            // Migrate the initialisation event, if not done in a previous
429            // run. This is a special event that has no command,
430            // so we need to do this separately.
431            if data_upgrade_info.last_migrated_command.is_none() {
432                let old_init_key = Self::event_key(scope.clone(), 0);
433
434                let old_init: OldStoredEvent<Self::OldInitEvent> =
435                    self.get(&old_init_key)?;
436                let old_init = old_init.into_details();
437
438                // From 0.14.x and up we will have command '0' for the init,
439                // where beforehand we only had an event. We
440                // will have to make up some values for the actor and time.
441                let actor = ACTOR_DEF_KRILL;
442
443                // The time is tricky.. our best guess is to set this to the
444                // same value as the first command, if there
445                // is any. In the very unlikely
446                // case that there is no first command, then we might as well
447                // set it to now.
448                let time = if let Some(first_command) = old_cmd_keys.first() {
449                    let cmd: OldStoredCommand<Self::OldStorableDetails> =
450                        self.get(first_command)?;
451                    cmd.time()
452                } else {
453                    Time::now()
454                };
455
456                // We need to ask the implementer of this trait to convert the
457                // init event we found to a StoredCommand that we can save.
458                let command = self.convert_init_event(
459                    old_init,
460                    handle.clone(),
461                    actor.audit_name(),
462                    time,
463                )?;
464
465                self.store_new_command(&scope, &command)?;
466                data_upgrade_info.increment_last_migrated_command();
467            }
468
469            // Track commands migrated and time spent so we can report
470            // progress
471            let mut total_migrated = 0;
472            let total_commands = old_cmd_keys.len(); // excludes migrated commands
473            let time_started = Time::now();
474
475            // Report the amount of (remaining) work (old)
476            Self::report_remaining_work(
477                total_commands,
478                &handle,
479                &data_upgrade_info,
480            )?;
481
482            // Process remaining commands
483            for old_cmd_key in old_cmd_keys {
484                // Read and parse the command.
485                trace!("  +- command: {old_cmd_key}");
486                let old_command: OldStoredCommand<Self::OldStorableDetails> =
487                    self.get(&old_cmd_key)?;
488
489                // And the unconverted effects
490                let old_effect = match old_command.effect() {
491                    OldStoredEffect::Success { events } => {
492                        let mut full_events: Vec<Self::OldEvent> = vec![]; // We just had numbers, we need to include the full
493                                                                           // events
494                        for v in events {
495                            let event_key =
496                                Self::event_key(scope.clone(), *v);
497                            trace!("    +- event: {event_key}");
498                            let evt: OldStoredEvent<Self::OldEvent> = self
499                                .deployed_store()
500                                .get(&event_key)?
501                                .ok_or_else(|| {
502                                    UpgradeError::Custom(format!(
503                                        "Cannot parse old event: {event_key}"
504                                    ))
505                                })?;
506                            full_events.push(evt.into_details());
507                        }
508                        UnconvertedEffect::Success {
509                            events: full_events,
510                        }
511                    }
512                    OldStoredEffect::Error { msg } => {
513                        UnconvertedEffect::Error { msg: msg.clone() }
514                    }
515                };
516
517                // The migration version matches the version of the resulting
518                // aggregate when commands are applied. It
519                // starts with 0 for the init command, in which case the
520                // version in the data_upgrade_info is not
521                // updated.
522                //
523                // For commands we set the target version of the migrated to
524                // command to the current version of the
525                // aggregate, plus 1. If there is a an actual resulting
526                // command (with events or even an error) to
527                // be saved, then we save this command and increment the
528                // migration_version.
529                //
530                // Unfortunately, we do need this double bookkeeping of
531                // versions of source commands
532                // that are migrated vs the version of the aggregate, because
533                // some commands - such as pre 0.14.0 ASPA
534                // update commands may be dropped.
535                match self.convert_old_command(
536                    old_command,
537                    old_effect,
538                    data_upgrade_info.migration_version + 1,
539                )? {
540                    CommandMigrationEffect::StoredCommand(command) => {
541                        self.store_new_command(&scope, &command)?;
542                        // we only increment this when a command is saved
543                        data_upgrade_info.migration_version += 1;
544                    }
545                    CommandMigrationEffect::AspaObjectsUpdates(updates) => {
546                        data_upgrade_info.update_aspa_configs(updates);
547                    }
548                    CommandMigrationEffect::Nothing => {
549                        // nothing to do
550                    }
551                }
552
553                total_migrated += 1;
554                data_upgrade_info.increment_last_migrated_command();
555
556                // Report progress and expected time to finish on every 100
557                // commands evaluated.
558                if total_migrated % 100 == 0 {
559                    // expected time: (total_migrated / (now - started)) *
560                    // total
561
562                    let mut time_passed = (Time::now().timestamp()
563                        - time_started.timestamp())
564                        as usize;
565                    if time_passed == 0 {
566                        time_passed = 1; // avoid divide by zero.. we are
567                                         // doing approximate estimates here
568                    }
569                    let migrated_per_second: f64 =
570                        total_migrated as f64 / time_passed as f64;
571                    let expected_seconds =
572                        (total_commands as f64 / migrated_per_second) as i64;
573                    let eta = time_started
574                        + chrono::Duration::seconds(expected_seconds);
575                    info!(
576                        "  migrated {} commands, expect to finish: {}",
577                        total_migrated,
578                        eta.to_rfc3339()
579                    );
580                }
581            }
582
583            info!("Finished migrating commands for '{scope}'");
584
585            // Verify migration
586            info!(
587                "Will verify the migration by rebuilding '{}' from migrated commands",
588                &scope
589            );
590            let _latest = self.preparation_aggregate_store().save_snapshot(&handle).map_err(|e| {
591                UpgradeError::Custom(format!(
592                    "Could not rebuild state after migrating CA '{handle}'! Error was: {e}."
593                ))
594            })?;
595
596            // Call the post command migration hook, this will do nothing
597            // unless the implementer of this trait overrode it.
598            self.post_command_migration(&handle)?;
599
600            // Update the upgrade info as this could be a prepare only
601            // run, and this migration could be resumed later after more
602            // changes were applied.
603            self.update_data_upgrade_info(&scope, &data_upgrade_info)?;
604
605            info!("Verified migration of '{handle}'");
606        }
607
608        match mode {
609            UpgradeMode::PrepareOnly => {
610                info!(
611                    "Prepared migrating data to Krill version {}. Will save progress for final upgrade when Krill restarts.",
612                    crate_version!(),
613                );
614                Ok(AspaMigrationConfigs::default())
615            }
616            UpgradeMode::PrepareToFinalise => {
617                let mut aspa_configs = AspaMigrationConfigs::default();
618                for scope in self.deployed_store().scopes()? {
619                    if scope.len() != 1 {
620                        continue;
621                    }
622
623                    // Getting the Handle should never fail, but if it does
624                    // then we should bail out asap.
625                    let ca = MyHandle::from_str(&scope.to_string()).map_err(
626                        |_| {
627                            UpgradeError::Custom(format!(
628                                "Found invalid handle '{scope}'"
629                            ))
630                        },
631                    )?;
632                    let info = self.data_upgrade_info(&scope)?;
633                    let aspa_configs_for_ca: Vec<AspaDefinition> = info
634                        .aspa_configs
635                        .into_iter()
636                        .map(|(customer, providers)| {
637                            AspaDefinition { customer, providers }
638                        })
639                        .collect();
640
641                    if !aspa_configs_for_ca.is_empty() {
642                        aspa_configs.0.insert(ca, aspa_configs_for_ca);
643                    }
644                }
645                self.clean_migration_help_files()?;
646                info!(
647                    "Prepared migrating data to Krill version {}.",
648                    crate_version!(),
649                );
650
651                Ok(aspa_configs)
652            }
653        }
654    }
655
656    //-- Internal helper functions for this trait. Should not be used or
657    //   overridden.
658
659    /// Saves the version of the target upgrade. Wipes the store if there is
660    /// another version set as the target.
661    fn preparation_store_prepare(&self) -> UpgradeResult<()> {
662        let code_version = KrillVersion::code_version();
663        let version_key = Key::new_global(
664            const { Segment::make("version") }
665        );
666
667        if let Ok(Some(existing_migration_version)) = self
668            .preparation_key_value_store()
669            .get::<KrillVersion>(&version_key)
670        {
671            if existing_migration_version != code_version {
672                warn!("Found prepared data for Krill version {existing_migration_version}, will remove it and start from scratch for {code_version}");
673                self.preparation_key_value_store().wipe()?;
674            }
675        }
676
677        self.preparation_key_value_store()
678            .store(&version_key, &code_version)?;
679
680        Ok(())
681    }
682
683    fn report_remaining_work(
684        total_remaining: usize,
685        handle: &MyHandle,
686        data_upgrade_info: &DataUpgradeInfo,
687    ) -> UpgradeResult<()> {
688        // Unwrap is safe here, because if there was no last_command
689        // then we would have converted the init event above, and would
690        // have set this.
691        let last_command =
692            data_upgrade_info
693                .last_migrated_command
694                .ok_or(UpgradeError::custom(
695                "called report_remaining_work before converting init event",
696            ))?;
697
698        if last_command == 0 {
699            info!(
700                "Will migrate {total_remaining} commands for '{handle}'"
701            );
702        } else {
703            info!(
704                "Will resume migration of {total_remaining} remaining commands for '{handle}'"
705            );
706        }
707
708        Ok(())
709    }
710
711    fn store_new_command(
712        &self,
713        scope: &Scope,
714        command: &StoredCommand<Self::Aggregate>,
715    ) -> UpgradeResult<()> {
716        let key =
717            Self::new_stored_command_key(scope.clone(), command.version());
718        self.preparation_key_value_store()
719            .store_new(&key, command)
720            .map_err(UpgradeError::KeyStoreError)
721    }
722
723    fn data_upgrade_info_key(scope: Scope) -> Key {
724        Key::new_scoped(scope, const { Segment::make("upgrade_info.json") })
725    }
726
727    /// Return the DataUpgradeInfo telling us to where we got to with this
728    /// migration.
729    fn data_upgrade_info(
730        &self,
731        scope: &Scope,
732    ) -> UpgradeResult<DataUpgradeInfo> {
733        self.preparation_key_value_store()
734            .get(&Self::data_upgrade_info_key(scope.clone()))
735            .map(|opt| opt.unwrap_or_default())
736            .map_err(UpgradeError::KeyStoreError)
737    }
738
739    /// Update the DataUpgradeInfo
740    fn update_data_upgrade_info(
741        &self,
742        scope: &Scope,
743        info: &DataUpgradeInfo,
744    ) -> UpgradeResult<()> {
745        self.preparation_key_value_store()
746            .store(&Self::data_upgrade_info_key(scope.clone()), info)
747            .map_err(UpgradeError::KeyStoreError)
748    }
749
750    /// Clean up keys used for tracking migration progress
751    fn clean_migration_help_files(&self) -> UpgradeResult<()> {
752        let version_key = Key::new_global(
753            const { Segment::make("version")
754        });
755        self.preparation_key_value_store()
756            .drop_key(&version_key)
757            .map_err(UpgradeError::KeyStoreError)?;
758
759        for scope in self.preparation_key_value_store().scopes()? {
760            self.preparation_key_value_store()
761                .drop_key(&Self::data_upgrade_info_key(scope))
762                .map_err(UpgradeError::KeyStoreError)?;
763        }
764        Ok(())
765    }
766
767    /// Find all command keys for the scope, starting from the provided
768    /// sequence. Then sort them by sequence and turn them back into key
769    /// store keys for further processing.
770    fn command_keys(
771        &self,
772        scope: &Scope,
773        from: u64,
774    ) -> Result<Vec<Key>, UpgradeError> {
775        let keys = self.deployed_store().keys(scope, "command--")?;
776        let mut cmd_keys: Vec<OldCommandKey> = vec![];
777        for key in keys {
778            let cmd_key = OldCommandKey::from_str(key.name().as_str())
779                .map_err(|_| {
780                    UpgradeError::Custom(format!(
781                        "Found invalid command key: {} for ca: {}",
782                        key.name(),
783                        scope
784                    ))
785                })?;
786            if cmd_key.sequence > from {
787                cmd_keys.push(cmd_key);
788            }
789        }
790        cmd_keys.sort_by_key(|k| k.sequence);
791        let cmd_keys = cmd_keys
792            .into_iter()
793            .map(|ck| {
794                Key::new_scoped(
795                    scope.clone(),
796                    Segment::parse_lossy(&format!("{ck}.json")),
797                )
798            }) // ck should always be a valid Segment
799            .collect();
800
801        Ok(cmd_keys)
802    }
803
804    fn get<V: DeserializeOwned>(&self, key: &Key) -> Result<V, UpgradeError> {
805        self.deployed_store().get(key)?.ok_or_else(|| {
806            UpgradeError::Custom(format!("Cannot read key: {key}"))
807        })
808    }
809
810    fn event_key(scope: Scope, nr: u64) -> Key {
811        // cannot panic as a u64 cannot contain a Scope::SEPARATOR
812        Key::new_scoped(
813            scope,
814            Segment::parse(&format!("delta-{nr}.json")).unwrap(),
815        )
816    }
817
818    fn new_stored_command_key(scope: Scope, version: u64) -> Key {
819        Key::new_scoped(
820            scope,
821            Segment::parse(&format!("command-{version}.json")).unwrap(),
822        )
823    }
824}
825
826/// Prepares a Krill upgrade related data migration. If no data migration is
827/// needed then this will simply be a no-op. Returns the
828/// `KrillUpgradeVersions` if the currently deployed Krill version differs
829/// from the code version. Note that the version may have increased even if
830/// there is no data migration needed.
831///
832/// In case data needs to be migrated, then new data will be prepared under
833/// the directory returned by `config.storage_uri()`. By design, this
834/// migration can be executed while Krill is running as it does not affect any
835/// current state. It can be called multiple times and it will resume the
836/// migration from the point it got to earlier. The idea is that this will
837/// allow operators to prepare the work for a migration and (a) verify that
838/// the migration worked, and (b) minimize the downtime when Krill is
839/// restarted into a new version. When a new version Krill daemon is
840/// started, it will call this again - to do the final preparation for a
841/// migration - knowing that no changes are added to the event history at this
842/// time. After this, the migration will be finalised.
843pub fn prepare_upgrade_data_migrations(
844    mode: UpgradeMode,
845    config: &Config,
846    properties_manager: &PropertiesManager,
847) -> UpgradeResult<Option<UpgradeReport>> {
848    // First of all ALWAYS check the existing keys if the hsm feature is
849    // enabled. Remember that this feature - although enabled by default
850    // from 0.10.x - may be enabled by installing a new krill binary of
851    // the same Krill version as the the previous binary. In other words, we
852    // cannot rely on the KrillVersion to decide whether this is needed.
853    // On the other hand.. this is a fairly cheap operation that we can
854    // just do at startup. It is done here, because in effect it *is* a data
855    // migration.
856    #[cfg(feature = "hsm")]
857    record_preexisting_openssl_keys_in_signer_mapper(config)?;
858
859    match upgrade_versions(config, properties_manager)? {
860        None => Ok(None),
861        Some(versions) => {
862            info!(
863                "Preparing upgrade from {} to {}",
864                versions.from(),
865                versions.to()
866            );
867
868            // Check if there is any CA named "ta". If so, then we are trying
869            // to upgrade a Krill testbed or benchmark set up that
870            // uses the old deprecated trust anchor set up. These TAs cannot
871            // easily be migrated to the new setup in 0.13.0.
872            // Well.. it could be done, if there would be a strong use
873            // case to put in the effort, but there really isn't.
874            let ca_kv_store =
875                KeyValueStore::create(&config.storage_uri, CASERVER_NS)?;
876            if ca_kv_store.has_scope(&Scope::from_segment(
877                    const { Segment::make("ta") }
878                ))? {
879                return Err(UpgradeError::OldTaMigration);
880            }
881
882            if versions.from < KrillVersion::release(0, 6, 0) {
883                let msg = "Cannot upgrade Krill installations from before version 0.6.0. Please upgrade to 0.8.1 first, then upgrade to 0.12.3, and then upgrade to this version.";
884                error!("{msg}");
885                Err(UpgradeError::custom(msg))
886            } else if versions.from < KrillVersion::release(0, 9, 0) {
887                let msg = "Cannot upgrade Krill installations from before version 0.9.0. Please upgrade to 0.12.3 first, and then upgrade to this version.";
888                error!("{msg}");
889                Err(UpgradeError::custom(msg))
890            } else if versions.from < KrillVersion::candidate(0, 10, 0, 1) {
891                // Complex migrations involving command / event conversions
892                pubd::pre_0_10_0::PublicationServerRepositoryAccessMigration::upgrade(mode, config, &versions)?;
893                let aspa_configs =
894                    ca::pre_0_10_0::CasMigration::upgrade(mode, config)?;
895
896                // The way that pubd objects were stored was changed as well
897                // (since 0.13.0)
898                pubd::migrate_pre_0_12_pubd_objects(config)?;
899
900                // Migrate remaining aggregate stores used in < 0.10.0 to the
901                // new format in 0.14.0 where we combine
902                // commands and events into a single key-value pair.
903                pre_0_14_0::UpgradeAggregateStoreSignerInfo::upgrade(
904                    SIGNERS_NS, mode, config,
905                )?;
906
907                Ok(Some(UpgradeReport::new(aspa_configs, true, versions)))
908            } else if versions.from < KrillVersion::candidate(0, 10, 0, 3) {
909                Err(UpgradeError::custom(
910                    "Cannot upgrade from 0.10.0 RC1 or RC2. Please contact rpki-team@nlnetlabs.nl",
911                ))
912            } else if versions.from < KrillVersion::candidate(0, 12, 0, 2) {
913                info!(
914                    "Krill upgrade from {} to {}. Check if publication server objects need migration.",
915                    versions.from(),
916                    versions.to()
917                );
918
919                // The pubd objects storage changed in 0.13.0
920                pubd::migrate_pre_0_12_pubd_objects(config)?;
921
922                // Migrate aggregate stores used in < 0.12.0 to the new format
923                // in 0.14.0 where we combine commands and
924                // events into a single key-value pair.
925                pre_0_14_0::UpgradeAggregateStoreSignerInfo::upgrade(
926                    SIGNERS_NS, mode, config,
927                )?;
928                let aspa_configs =
929                    ca::pre_0_14_0::CasMigration::upgrade(mode, config)?;
930                pubd::pre_0_14_0::UpgradeAggregateStoreRepositoryAccess::upgrade(
931                    PUBSERVER_NS,
932                    mode,
933                    config,
934                )?;
935
936                Ok(Some(UpgradeReport::new(aspa_configs, true, versions)))
937            } else if versions.from < KrillVersion::candidate(0, 13, 0, 0) {
938                pubd::migrate_0_12_pubd_objects(config)?;
939
940                // Migrate aggregate stores used in < 0.13.0 to the new format
941                // in 0.14.0 where we combine commands and
942                // events into a single key-value pair.
943                pre_0_14_0::UpgradeAggregateStoreSignerInfo::upgrade(
944                    SIGNERS_NS, mode, config,
945                )?;
946                let aspa_configs =
947                    ca::pre_0_14_0::CasMigration::upgrade(mode, config)?;
948                pubd::pre_0_14_0::UpgradeAggregateStoreRepositoryAccess::upgrade(
949                    PUBSERVER_NS,
950                    mode,
951                    config,
952                )?;
953
954                Ok(Some(UpgradeReport::new(aspa_configs, true, versions)))
955            } else if versions.from < KrillVersion::candidate(0, 14, 0, 0) {
956                // Migrate aggregate stores used in < 0.14.0 to the new format
957                // in 0.14.0 where we combine commands and
958                // events into a single key-value pair.
959                let aspa_configs =
960                    ca::pre_0_14_0::CasMigration::upgrade(mode, config)?;
961                pubd::pre_0_14_0::UpgradeAggregateStoreRepositoryAccess::upgrade(
962                    PUBSERVER_NS,
963                    mode,
964                    config,
965                )?;
966                pre_0_14_0::UpgradeAggregateStoreSignerInfo::upgrade(
967                    SIGNERS_NS, mode, config,
968                )?;
969                pre_0_14_0::UpgradeAggregateStoreTrustAnchorSigner::upgrade(
970                    TA_SIGNER_SERVER_NS,
971                    mode,
972                    config,
973                )?;
974                pre_0_14_0::UpgradeAggregateStoreTrustAnchorProxy::upgrade(
975                    TA_PROXY_SERVER_NS,
976                    mode,
977                    config,
978                )?;
979
980                Ok(Some(UpgradeReport::new(aspa_configs, true, versions)))
981            } else {
982                Ok(Some(UpgradeReport::new(
983                    AspaMigrationConfigs::default(),
984                    false,
985                    versions,
986                )))
987            }
988        }
989    }
990}
991
992/// Finalise the data migration for an upgrade.
993///
994/// If there is any prepared data, then:
995/// - archive the current data
996/// - make the prepared data current
997pub fn finalise_data_migration(
998    upgrade: &UpgradeVersions,
999    config: &Config,
1000    properties_manager: &PropertiesManager,
1001) -> KrillResult<()> {
1002    // For each NS
1003    //
1004    // Check if upgrade store to this version exists.
1005    // If so:
1006    //  -- drop archive store if it exists
1007    //  -- archive current store (rename ns)
1008    //  -- move upgrade to current
1009    info!(
1010        "Finish data migrations for upgrade from {} to {}",
1011        upgrade.from(),
1012        upgrade.to()
1013    );
1014
1015    for ns in [
1016        CASERVER_NS,
1017        CA_OBJECTS_NS,
1018        KEYS_NS,
1019        PROPERTIES_NS,
1020        PUBSERVER_CONTENT_NS,
1021        PUBSERVER_NS,
1022        SIGNERS_NS,
1023        STATUS_NS,
1024        TA_PROXY_SERVER_NS,
1025        TA_SIGNER_SERVER_NS,
1026        TASK_QUEUE_NS,
1027    ] {
1028        // Check if there is a non-empty upgrade store for this namespace
1029        // that would need to be migrated.
1030        let mut upgrade_store =
1031            KeyValueStore::create_upgrade_store(&config.storage_uri, ns)?;
1032        if !upgrade_store.is_empty()? {
1033            info!("Migrate new data for {ns} and archive old");
1034            let mut current_store =
1035                KeyValueStore::create(&config.storage_uri, ns)?;
1036            if !current_store.is_empty()? {
1037                current_store.migrate_to_archive(&config.storage_uri, ns)?;
1038            }
1039
1040            upgrade_store.migrate_to_current(&config.storage_uri, ns)?;
1041        } else {
1042            // No migration needed, but check if we have a current store
1043            // for this namespace that still includes a version file. If
1044            // so, remove it.
1045            let current_store =
1046                KeyValueStore::create(&config.storage_uri, ns)?;
1047            let version_key = Key::new_global(
1048                const { Segment::make("version")
1049            });
1050            if current_store.has(&version_key)? {
1051                debug!("Removing excess version key in ns: {ns}");
1052                current_store.drop_key(&version_key)?;
1053            }
1054
1055            // If we migrate from before 0.15.0, delete the .locks scope.
1056            if upgrade.from() < &KrillVersion::release(0, 15, 0) {
1057                let _ = current_store.drop_scope(
1058                    &Scope::from_segment(
1059                        const { Segment::make(".locks") }
1060                    )
1061                );
1062            }
1063        }
1064    }
1065
1066    // Set the current version of the store to that of the running code
1067    let code_version = KrillVersion::code_version();
1068    info!("Finished upgrading Krill to version: {code_version}");
1069    if properties_manager.is_initialized() {
1070        properties_manager.upgrade_krill_version(code_version)?;
1071    } else {
1072        properties_manager.init(code_version)?;
1073    }
1074
1075    Ok(())
1076}
1077
1078/// Prior to Krill having HSM support there was no signer mapper as it wasn't
1079/// needed, keys were just created by OpenSSL and stored in files on disk in
1080/// KEYS_NS named by the string form of their Krill KeyIdentifier. If Krill
1081/// had created such keys and then the operator upgrades to a version of Krill
1082/// with HSM support, the keys will become unusable because Krill will not be
1083/// able to find a mapping from KeyIdentifier to signer as the mappings for
1084/// the keys were never created. So we detect the case that the signer store
1085/// SIGNERS_DIR directory has not yet been created, i.e. no signers have been
1086/// registered and no key mappings have been recorded, and then walk KEYS_NS
1087/// adding the keys one by one to the mapping in the signer store, if any.
1088#[cfg(feature = "hsm")]
1089fn record_preexisting_openssl_keys_in_signer_mapper(
1090    config: &Config,
1091) -> Result<(), UpgradeError> {
1092    let signers_key_store =
1093        KeyValueStore::create(&config.storage_uri, SIGNERS_NS)?;
1094    if signers_key_store.is_empty()? {
1095        let mut num_recorded_keys = 0;
1096        // If the key value store for the "signers" namespace is empty, then
1097        // it was not yet initialised and we may need to import keys
1098        // from a previous krill installation (earlier version, or a custom
1099        // build that has the hsm feature disabled.)
1100
1101        let keys_key_store =
1102            KeyValueStore::create(&config.storage_uri, KEYS_NS)?;
1103        info!(
1104            "Mapping OpenSSL signer keys, using uri: {}",
1105            config.storage_uri
1106        );
1107
1108        let probe_interval =
1109            std::time::Duration::from_secs(config.signer_probe_retry_seconds);
1110        let krill_signer = crate::commons::crypto::KrillSignerBuilder::new(
1111            &config.storage_uri,
1112            probe_interval,
1113            &config.signers,
1114        )
1115        .with_default_signer(config.default_signer())
1116        .with_one_off_signer(config.one_off_signer())
1117        .build()
1118        .unwrap();
1119
1120        // For every file (key) in the legacy OpenSSL signer keys directory
1121
1122        let mut openssl_signer_handle: Option<SignerHandle> = None;
1123
1124        for key in keys_key_store.keys(&Scope::global(), "")? {
1125            debug!("Found key: {key}");
1126            // Is it a key identifier?
1127            if let Ok(key_id) = KeyIdentifier::from_str(key.name().as_str()) {
1128                // Is the key already recorded in the mapper? It shouldn't be,
1129                // but asking will cause the initial
1130                // registration of the OpenSSL signer to occur and for it to
1131                // be assigned a handle. We need the handle so
1132                // that we can register keys with the mapper.
1133                if krill_signer.get_key_info(&key_id).is_err() {
1134                    // No, record it
1135
1136                    // Find out the handle of the OpenSSL signer used to
1137                    // create this key, if not yet known.
1138                    if openssl_signer_handle.is_none() {
1139                        // No, find it by asking each of the active signers if
1140                        // they have the key because one of
1141                        // them must have it and it should be the one and only
1142                        // OpenSSL signer that Krill was
1143                        // using previously. We can't just find and use the
1144                        // only OpenSSL signers as Krill may
1145                        // have been configured with more than one each with
1146                        // separate keys directories.
1147                        for (a_signer_handle, a_signer) in
1148                            krill_signer.get_active_signers().iter()
1149                        {
1150                            if a_signer.get_key_info(&key_id).is_ok() {
1151                                openssl_signer_handle =
1152                                    Some(a_signer_handle.clone());
1153                                break;
1154                            }
1155                        }
1156                    }
1157
1158                    // Record the key in the signer mapper as being owned by
1159                    // the found signer handle.
1160                    if let Some(signer_handle) = &openssl_signer_handle {
1161                        let internal_key_id = key_id.to_string();
1162                        if let Some(mapper) = krill_signer.get_mapper() {
1163                            mapper.add_key(
1164                                signer_handle,
1165                                &key_id,
1166                                &internal_key_id,
1167                            )?;
1168                            num_recorded_keys += 1;
1169                        }
1170                    }
1171                }
1172            } else {
1173                debug!("Could not parse key as key identifier: {key}");
1174            }
1175        }
1176
1177        info!(
1178            "Recorded {num_recorded_keys} key identifiers in the signer store"
1179        );
1180        Ok(())
1181    } else {
1182        debug!("Signers were set up before. No need to migrate keys.");
1183        Ok(())
1184    }
1185}
1186
1187/// Should be called after the KrillServer is started, but before the web
1188/// server is started and operators can make changes.
1189pub async fn post_start_upgrade(
1190    report: UpgradeReport,
1191    server: &KrillManager,
1192) -> KrillResult<()> {
1193    if report.versions().from() < &KrillVersion::candidate(0, 9, 3, 2) {
1194        info!("Reissue ROAs on upgrade to force short EE certificate subjects in the objects");
1195        server.force_renew_roas().await?;
1196    }
1197
1198    for (ca, configs) in report.into_aspa_configs().into_iter() {
1199        info!("Re-import ASPA configurations after migration for CA '{ca}'");
1200        let aspa_updates = AspaDefinitionUpdates {
1201            add_or_replace: configs,
1202            remove: Vec::new()
1203        };
1204        server.ca_aspas_definitions_update(
1205            ca,
1206            aspa_updates,
1207            server.system_actor(),
1208        )?;
1209    }
1210
1211    Ok(())
1212}
1213
1214/// Checks if we should upgrade:
1215///  - if the code is newer than the version used then we upgrade
1216///  - if the code is the same version then we do not upgrade
1217///  - if the code is older then we need to error out
1218fn upgrade_versions(
1219    config: &Config,
1220    properties_manager: &PropertiesManager,
1221) -> Result<Option<UpgradeVersions>, UpgradeError> {
1222    if properties_manager.is_initialized() {
1223        // The properties manager was introduced in Krill 0.14.0. However,
1224        // in 0.14.0, it is not being initialised unless a migration from
1225        // an older version happened. This changed in 0.15.0 where it gets
1226        // initialised even on a fresh install.
1227        //
1228        // If it's initialised then it MUST have a Krill Version.
1229        let current = properties_manager.current_krill_version()?;
1230        UpgradeVersions::for_current(current)
1231    }
1232    else {
1233        // No KrillVersion. So, this is an older Krill version.
1234        //
1235        // If this is an existing Krill installation before 0.14.0, then we
1236        // will find version files (keys) in one or more existing key value
1237        // stores used for the various entities in Krill.
1238        //
1239        // If can't find any versions then this is a new install being done
1240        // in the 0.14 series. We treat all of them as 0.14.0.
1241
1242        let mut current: Option<KrillVersion> = None;
1243
1244        // Scan the following data stores. The *latest* version seen will
1245        // determine the actual installed Krill version - this is
1246        // because these version files did not always get updated in
1247        // each store - but only in stores that needed an upgrade (at
1248        // least this is true for some past migrations). So, it's the
1249        // latest version (if any) that counts here.
1250        for ns in &[
1251            CASERVER_NS,
1252            CA_OBJECTS_NS,
1253            PUBSERVER_NS,
1254            PUBSERVER_CONTENT_NS,
1255        ] {
1256            let kv_store = KeyValueStore::create(&config.storage_uri, ns)?;
1257            let key = Key::new_global(
1258                const { Segment::make("version")
1259            });
1260
1261            if let Some(key_store_version) =
1262                kv_store.get::<KrillVersion>(&key)?
1263            {
1264                if let Some(last_seen) = &current {
1265                    if &key_store_version > last_seen {
1266                        current = Some(key_store_version)
1267                    }
1268                } else {
1269                    current = Some(key_store_version);
1270                }
1271            }
1272        }
1273
1274        match current {
1275            None => {
1276                UpgradeVersions::for_current(KrillVersion::release(0, 14, 0))
1277            }
1278            Some(current) => UpgradeVersions::for_current(current),
1279        }
1280    }
1281}
1282
1283//------------ Tests ---------------------------------------------------------
1284
1285#[cfg(test)]
1286mod tests {
1287    use std::{fs, path};
1288    use std::path::PathBuf;
1289    use log::LevelFilter;
1290    use tempfile::tempdir;
1291    use url::Url;
1292    use crate::commons::storage::Namespace;
1293    use crate::commons::test;
1294    use crate::server::ca::{CaStatus, CaStatusStore};
1295    use super::*;
1296
1297    fn copy_folder(src: impl AsRef<path::Path>, dst: impl AsRef<path::Path>) {
1298        fs::create_dir_all(&dst).unwrap();
1299        for item in fs::read_dir(src).unwrap() {
1300            let item = item.unwrap();
1301            let ft = item.file_type().unwrap();
1302            if ft.is_dir() {
1303                copy_folder(item.path(), dst.as_ref().join(item.file_name()));
1304            } 
1305            else if ft.is_file() {
1306                fs::copy(
1307                    item.path(), 
1308                    dst.as_ref().join(item.file_name())
1309                ).unwrap();
1310            }
1311        }
1312    }
1313
1314    fn test_upgrade(base_dir: &str, namespaces: &[&str]) {
1315        let temp_dir = tempdir().unwrap();
1316        copy_folder(base_dir, &temp_dir);
1317        
1318        // Copy data for the given names spaces into memory for testing.
1319        let mem_storage_base_uri = test::mem_storage();
1320
1321        // This is needed for tls_dir etc, but will be ignored here.
1322        let bogus_path = PathBuf::from("/dev/null");
1323
1324        let mut config = Config::test(
1325            &mem_storage_base_uri,
1326            Some(&bogus_path),
1327            false, false, false, false,
1328        );
1329        config.log_level = LevelFilter::Trace;
1330        let _ = config.init_logging();
1331
1332        let source_url = Url::parse(&format!(
1333                "local://{}", temp_dir.path().to_str().unwrap()
1334        )).unwrap();
1335
1336        for ns in namespaces {
1337            let namespace = Namespace::parse(ns).unwrap();
1338            let source_store = KeyValueStore::create(
1339                &source_url, namespace
1340            ).unwrap();
1341            let target_store = KeyValueStore::create(
1342                &mem_storage_base_uri, namespace
1343            ).unwrap();
1344
1345            target_store.import(&source_store, |_| true).unwrap();
1346        }
1347
1348        let properties_manager = PropertiesManager::create(
1349            &config.storage_uri,
1350            config.use_history_cache,
1351        ).unwrap();
1352
1353        prepare_upgrade_data_migrations(
1354            UpgradeMode::PrepareOnly,
1355            &config,
1356            &properties_manager,
1357        ).unwrap().unwrap();
1358
1359        // and continue - immediately, but still tests that this can pick up
1360        // again.
1361        let report = prepare_upgrade_data_migrations(
1362            UpgradeMode::PrepareToFinalise,
1363            &config,
1364            &properties_manager,
1365        ).unwrap().unwrap();
1366
1367        finalise_data_migration(
1368            report.versions(),
1369            &config,
1370            &properties_manager,
1371        ).unwrap();
1372    }
1373
1374    #[test]
1375    fn prepare_then_upgrade_0_9_6() {
1376        test_upgrade(
1377            "test-resources/migrations/v0_9_6/",
1378            &["ca_objects", "cas", "pubd", "pubd_objects"],
1379        );
1380    }
1381
1382    #[test]
1383    fn prepare_then_upgrade_0_9_5_pubserver() {
1384        test_upgrade(
1385            "test-resources/migrations/v0_9_5_pubserver/",
1386            &["ca_objects", "cas", "pubd", "pubd_objects"],
1387        );
1388    }
1389
1390    #[test]
1391    fn prepare_then_upgrade_0_10_3() {
1392        test_upgrade(
1393            "test-resources/migrations/v0_10_3/",
1394            &[
1395                "ca_objects",
1396                "cas",
1397                "pubd",
1398                "pubd_objects",
1399                "signers",
1400                "status",
1401            ],
1402        );
1403    }
1404
1405    #[test]
1406    fn prepare_then_upgrade_0_11_0() {
1407        test_upgrade(
1408            "test-resources/migrations/v0_11_0/",
1409            &[
1410                "ca_objects",
1411                "cas",
1412                "pubd",
1413                "pubd_objects",
1414                "signers",
1415                "status",
1416            ],
1417        );
1418    }
1419
1420    #[test]
1421    fn prepare_then_upgrade_0_12_1_pubserver() {
1422        test_upgrade(
1423            "test-resources/migrations/v0_12_1_pubserver/",
1424            &["pubd", "pubd_objects"],
1425        );
1426    }
1427
1428    #[test]
1429    fn prepare_then_upgrade_0_12_3() {
1430        test_upgrade(
1431            "test-resources/migrations/v0_12_3/",
1432            &[
1433                "ca_objects",
1434                "cas",
1435                "pubd",
1436                "pubd_objects",
1437                "signers",
1438                "status",
1439            ],
1440        );
1441    }
1442
1443    #[test]
1444    fn prepare_then_upgrade_0_13_1() {
1445        test_upgrade(
1446            "test-resources/migrations/v0_13_1/",
1447            &[
1448                "ca_objects",
1449                "cas",
1450                "pubd",
1451                "pubd_objects",
1452                "signers",
1453                "status",
1454            ],
1455        );
1456    }
1457
1458    #[test]
1459    fn prepare_then_upgrade_0_13_1_pubserver() {
1460        test_upgrade(
1461            "test-resources/migrations/v0_13_1_pubserver/",
1462            &[
1463                "ca_objects",
1464                "cas",
1465                "keys",
1466                "pubd",
1467                "pubd_objects",
1468                "signers",
1469                "status",
1470                "ta_proxy",
1471                "ta_signer",
1472            ],
1473        );
1474    }
1475
1476    #[test]
1477    fn parse_0_10_0_rc3_repository_content() {
1478        let json = include_str!(
1479            "../../test-resources/migrations/v0_10_0_pubserver/0.json"
1480        );
1481        let _repo: pubd::pre_0_13_0::OldRepositoryContent =
1482            serde_json::from_str(json).unwrap();
1483    }
1484
1485    #[cfg(all(
1486        feature = "hsm",
1487        not(any(feature = "hsm-tests-kmip", feature = "hsm-tests-pkcs11"))
1488    ))]
1489    fn unmapped_keys_test_core(do_upgrade: bool) {
1490        let temp_dir = tempdir().unwrap();
1491        copy_folder("test-resources/migrations/unmapped_keys/", &temp_dir);
1492
1493        let expected_key_id = KeyIdentifier::from_str(
1494            "5CBCAB14B810C864F3EEA8FD102B79F4E53FCC70",
1495        ).unwrap();
1496
1497        // Copy test data into test storage
1498        let mem_storage_base_uri = test::mem_storage();
1499
1500        let source_url = Url::parse(&format!(
1501            "local://{}", temp_dir.path().to_str().unwrap()
1502        )).unwrap();
1503        let source_store = KeyValueStore::create(
1504            &source_url, KEYS_NS
1505        ).unwrap();
1506
1507        let target_store = KeyValueStore::create(
1508            &mem_storage_base_uri, KEYS_NS
1509        ).unwrap();
1510        target_store.import(&source_store, |_| true).unwrap();
1511
1512        // This is needed for tls_dir etc, but will be ignored here.
1513        let bogus_path = PathBuf::from("/dev/null");
1514
1515        let mut config = Config::test(
1516            &mem_storage_base_uri,
1517            Some(&bogus_path),
1518            false, false, false, false,
1519        );
1520        let _ = config.init_logging();
1521        config.process().unwrap();
1522
1523        if do_upgrade {
1524            record_preexisting_openssl_keys_in_signer_mapper(
1525                &config
1526            ).unwrap();
1527        }
1528
1529        // Now test that a newly initialized `KrillSigner` with a default
1530        // OpenSSL signer is associated with the newly created mapper
1531        // store and is thus able to use the key that we placed on
1532        // disk.
1533        let probe_interval = std::time::Duration::from_secs(
1534            config.signer_probe_retry_seconds
1535        );
1536        let krill_signer = crate::commons::crypto::KrillSignerBuilder::new(
1537            &mem_storage_base_uri,
1538            probe_interval,
1539            &config.signers,
1540        ).with_default_signer(
1541            config.default_signer()
1542        ).with_one_off_signer(
1543            config.one_off_signer()
1544        ).build().unwrap();
1545
1546        // Trigger the signer to be bound to the one the migration just
1547        // registered in the mapper
1548        krill_signer.random_serial().unwrap();
1549
1550        // Verify that the mapper has a single registered signer
1551        let mapper = krill_signer.get_mapper().unwrap();
1552        let signer_handles = mapper.get_signer_handles().unwrap();
1553        assert_eq!(1, signer_handles.len());
1554
1555        if do_upgrade {
1556            // Verify that the mapper has a record of the test key belonging
1557            // to the signer
1558            mapper.get_signer_for_key(&expected_key_id).unwrap();
1559        }
1560        else {
1561            // Verify that the mapper does NOT have a record of the test key
1562            // belonging to the signer
1563            assert!(mapper.get_signer_for_key(&expected_key_id).is_err());
1564        }
1565    }
1566
1567    #[cfg(all(
1568        feature = "hsm",
1569        not(any(feature = "hsm-tests-kmip", feature = "hsm-tests-pkcs11"))
1570    ))]
1571    #[test]
1572    fn test_key_not_found_error_if_unmapped_keys_are_not_mapped_on_upgrade() {
1573        unmapped_keys_test_core(false);
1574    }
1575
1576    #[cfg(all(
1577        feature = "hsm",
1578        not(any(feature = "hsm-tests-kmip", feature = "hsm-tests-pkcs11"))
1579    ))]
1580    #[test]
1581    fn test_upgrading_with_unmapped_keys() {
1582        unmapped_keys_test_core(true);
1583    }
1584
1585    #[test]
1586    fn read_save_status() {
1587        let source_dir_path_str =
1588            "test-resources/status_store/migration-0.9.5/";
1589        let temp_dir = tempdir().unwrap();
1590        copy_folder(source_dir_path_str, &temp_dir);
1591        let source_dir_url = Url::parse(
1592            &format!("local://{}", &temp_dir.path().to_str().unwrap()))
1593                .unwrap();
1594
1595        let source_store =
1596            KeyValueStore::create(&source_dir_url, STATUS_NS).unwrap();
1597
1598        let test_storage_uri = test::mem_storage();
1599        let status_kv_store =
1600            KeyValueStore::create(&test_storage_uri, STATUS_NS).unwrap();
1601
1602        // copy the source KV store (files) into the test KV store (in memory)
1603        status_kv_store.import(
1604            &source_store,
1605            |scope| {
1606                match scope.first_segment() {
1607                    Some(segment) => segment.as_str() != ".locks",
1608                    None => true
1609                }
1610            }
1611        ).unwrap();
1612
1613        // get the status for testbed before initialising a StatusStore
1614        // using the copied the data - that will be done next and start
1615        // a migration.
1616        let testbed_status_key = Key::new_scoped(
1617            Scope::from_segment(const { Segment::make("testbed") }),
1618            Segment::parse("status.json").unwrap(),
1619        );
1620        let status_testbed_before_migration: CaStatus =
1621            status_kv_store.get(&testbed_status_key).unwrap().unwrap();
1622
1623        // Initialise the StatusStore using the new (in memory) storage,
1624        // and migrate the data.
1625        let store =
1626            CaStatusStore::create(&test_storage_uri, STATUS_NS).unwrap();
1627        let testbed = CaHandle::from_str("testbed").unwrap();
1628
1629        // Get the migrated status for testbed and verify that it's equivalent
1630        // to the status before migration.
1631        let status_testbed_migrated = store.get_ca_status(&testbed);
1632
1633        assert_eq!(status_testbed_before_migration, status_testbed_migrated);
1634    }
1635}