fil_actor_miner_state/v8/
deadline_assignment.rs

1// Copyright 2019-2022 ChainSafe Systems
2// SPDX-License-Identifier: Apache-2.0, MIT
3
4use std::cmp::Ordering;
5use std::collections::BinaryHeap;
6
7use anyhow::anyhow;
8
9use fil_actors_shared::v8::runtime::Policy;
10
11use super::{Deadline, SectorOnChainInfo};
12
13fn div_rounding_up(dividend: u64, divisor: u64) -> u64 {
14    dividend / divisor + u64::from(dividend % divisor > 0)
15}
16
17struct DeadlineAssignmentInfo {
18    index: usize,
19    live_sectors: u64,
20    total_sectors: u64,
21}
22
23impl DeadlineAssignmentInfo {
24    fn partitions_after_assignment(&self, partition_size: u64) -> u64 {
25        div_rounding_up(
26            self.total_sectors + 1, // after assignment
27            partition_size,
28        )
29    }
30
31    fn compact_partitions_after_assignment(&self, partition_size: u64) -> u64 {
32        div_rounding_up(
33            self.live_sectors + 1, // after assignment
34            partition_size,
35        )
36    }
37
38    fn is_full_now(&self, partition_size: u64) -> bool {
39        self.total_sectors % partition_size == 0
40    }
41
42    fn max_partitions_reached(&self, partition_size: u64, max_partitions: u64) -> bool {
43        self.total_sectors >= partition_size * max_partitions
44    }
45}
46
47fn cmp(a: &DeadlineAssignmentInfo, b: &DeadlineAssignmentInfo, partition_size: u64) -> Ordering {
48    // When assigning partitions to deadlines, we're trying to optimize the
49    // following:
50    //
51    // First, avoid increasing the maximum number of partitions in any
52    // deadline, across all deadlines, after compaction. This would
53    // necessitate buying a new GPU.
54    //
55    // Second, avoid forcing the miner to repeatedly compact partitions. A
56    // miner would be "forced" to compact a partition when a the number of
57    // partitions in any given deadline goes above the current maximum
58    // number of partitions across all deadlines, and compacting that
59    // deadline would then reduce the number of partitions, reducing the
60    // maximum.
61    //
62    // At the moment, the only "forced" compaction happens when either:
63    //
64    // 1. Assignment of the sector into any deadline would force a
65    //    compaction.
66    // 2. The chosen deadline has at least one full partition's worth of
67    //    terminated sectors and at least one fewer partition (after
68    //    compaction) than any other deadline.
69    //
70    // Third, we attempt to assign "runs" of sectors to the same partition
71    // to reduce the size of the bitfields.
72    //
73    // Finally, we try to balance the number of sectors (thus partitions)
74    // assigned to any given deadline over time.
75
76    // Summary:
77    //
78    // 1. Assign to the deadline that will have the _least_ number of
79    //    post-compaction partitions (after sector assignment).
80    // 2. Assign to the deadline that will have the _least_ number of
81    //    pre-compaction partitions (after sector assignment).
82    // 3. Assign to a deadline with a non-full partition.
83    //    - If both have non-full partitions, assign to the most full one (stable assortment).
84    // 4. Assign to the deadline with the least number of live sectors.
85    // 5. Assign sectors to the deadline with the lowest index first.
86
87    // If one deadline would end up with fewer partitions (after
88    // compacting), assign to that one. This ensures we keep the maximum
89    // number of partitions in any given deadline to a minimum.
90    //
91    // Technically, this could increase the maximum number of partitions
92    // before compaction. However, that can only happen if the deadline in
93    // question could save an entire partition by compacting. At that point,
94    // the miner should compact the deadline.
95    a.compact_partitions_after_assignment(partition_size)
96        .cmp(&b.compact_partitions_after_assignment(partition_size))
97        .then_with(|| {
98            // If, after assignment, neither deadline would have fewer
99            // post-compaction partitions, assign to the deadline with the fewest
100            // pre-compaction partitions (after assignment). This will put off
101            // compaction as long as possible.
102            a.partitions_after_assignment(partition_size)
103                .cmp(&b.partitions_after_assignment(partition_size))
104        })
105        .then_with(|| {
106            // Ok, we'll end up with the same number of partitions any which way we
107            // go. Try to fill up a partition instead of opening a new one.
108            a.is_full_now(partition_size)
109                .cmp(&b.is_full_now(partition_size))
110        })
111        .then_with(|| {
112            // Either we have two open partitions, or neither deadline has an open
113            // partition.
114
115            // If we have two open partitions, fill the deadline with the most-full
116            // open partition. This helps us assign runs of sequential sectors into
117            // the same partition.
118            if !a.is_full_now(partition_size) && !b.is_full_now(partition_size) {
119                a.total_sectors.cmp(&b.total_sectors).reverse()
120            } else {
121                Ordering::Equal
122            }
123        })
124        .then_with(|| {
125            // Otherwise, assign to the deadline with the least live sectors. This
126            // will break the tie in one of the two immediately preceding
127            // conditions.
128            a.live_sectors.cmp(&b.live_sectors)
129        })
130        .then_with(|| {
131            // Finally, fall back on the deadline index.
132            a.index.cmp(&b.index)
133        })
134}
135
136// Assigns partitions to deadlines, first filling partial partitions, then
137// adding new partitions to deadlines with the fewest live sectors.
138pub fn assign_deadlines(
139    policy: &Policy,
140    max_partitions: u64,
141    partition_size: u64,
142    deadlines: &[Option<Deadline>],
143    sectors: Vec<SectorOnChainInfo>,
144) -> anyhow::Result<Vec<Vec<SectorOnChainInfo>>> {
145    struct Entry {
146        partition_size: u64,
147        info: DeadlineAssignmentInfo,
148    }
149
150    impl PartialEq for Entry {
151        fn eq(&self, other: &Self) -> bool {
152            self.cmp(other) == Ordering::Equal
153        }
154    }
155
156    impl Eq for Entry {}
157
158    impl PartialOrd for Entry {
159        fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
160            Some(self.cmp(other))
161        }
162    }
163
164    impl Ord for Entry {
165        fn cmp(&self, other: &Self) -> Ordering {
166            // we're using a max heap instead of a min heap, so we need to reverse the ordering
167            cmp(&self.info, &other.info, self.partition_size).reverse()
168        }
169    }
170
171    let mut heap: BinaryHeap<Entry> = deadlines
172        .iter()
173        .enumerate()
174        .filter_map(|(index, deadline)| deadline.as_ref().map(|dl| (index, dl)))
175        .map(|(index, deadline)| Entry {
176            partition_size,
177            info: DeadlineAssignmentInfo {
178                index,
179                live_sectors: deadline.live_sectors,
180                total_sectors: deadline.total_sectors,
181            },
182        })
183        .collect();
184
185    assert!(!heap.is_empty());
186
187    let mut changes = vec![Vec::new(); policy.wpost_period_deadlines as usize];
188
189    for sector in sectors {
190        let info = &mut heap.peek_mut().unwrap().info;
191
192        if info.max_partitions_reached(partition_size, max_partitions) {
193            return Err(anyhow!(
194                "max partitions limit {} reached for all deadlines",
195                max_partitions
196            ));
197        }
198
199        changes[info.index].push(sector);
200        info.live_sectors += 1;
201        info.total_sectors += 1;
202    }
203
204    Ok(changes)
205}