1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
// SPDX-License-Identifier: Apache-2.0
// SPDX-FileCopyrightText: Copyright The Lance Authors

use std::ops::Range;
use std::sync::Arc;

use arrow_array::FixedSizeListArray;
use lance_core::{Error, Result};
use lance_file::{reader::FileReader, writer::FileWriter};
use lance_io::{traits::WriteExt, utils::read_message};
use lance_table::io::manifest::ManifestDescribing;
use log::debug;
use serde::{Deserialize, Serialize};
use snafu::{location, Location};

use crate::pb::Ivf as PbIvf;

pub const IVF_METADATA_KEY: &str = "lance:ivf";
pub const IVF_PARTITION_KEY: &str = "lance:ivf:partition";

#[warn(dead_code)]
#[derive(Debug, Clone, PartialEq)]
pub struct IvfData {
    /// Centroids of the IVF indices. Can be empty.
    centroids: Option<Arc<FixedSizeListArray>>,

    /// Length of each partition.
    lengths: Vec<u32>,

    /// pre-computed row offset for each partition, do not persist.
    partition_row_offsets: Vec<usize>,
}

/// The IVF metadata stored in the Lance Schema
#[derive(Serialize, Deserialize, Debug)]
struct IvfMetadata {
    // The file position to store the protobuf binary of IVF metadata.
    pb_position: usize,
}

impl IvfData {
    pub fn empty() -> Self {
        Self {
            centroids: None,
            lengths: vec![],
            partition_row_offsets: vec![0],
        }
    }

    pub fn with_centroids(centroids: Arc<FixedSizeListArray>) -> Self {
        Self {
            centroids: Some(centroids),
            lengths: vec![],
            partition_row_offsets: vec![0],
        }
    }

    pub async fn load(reader: &FileReader) -> Result<Self> {
        let schema = reader.schema();
        let meta_str = schema.metadata.get(IVF_METADATA_KEY).ok_or(Error::Index {
            message: format!("{} not found during search", IVF_METADATA_KEY),
            location: location!(),
        })?;
        let ivf_metadata: IvfMetadata =
            serde_json::from_str(meta_str).map_err(|e| Error::Index {
                message: format!("Failed to parse IVF metadata: {}", e),
                location: location!(),
            })?;

        let pb: PbIvf = read_message(
            reader.object_reader.as_ref(),
            ivf_metadata.pb_position as usize,
        )
        .await?;
        Self::try_from(pb)
    }

    /// Write the IVF metadata to the lance file.
    pub async fn write(&self, writer: &mut FileWriter<ManifestDescribing>) -> Result<()> {
        let pb = PbIvf::try_from(self)?;
        let pos = writer.object_writer.write_protobuf(&pb).await?;
        let ivf_metadata = IvfMetadata { pb_position: pos };
        writer.add_metadata(IVF_METADATA_KEY, &serde_json::to_string(&ivf_metadata)?);
        Ok(())
    }

    pub fn add_partition(&mut self, num_rows: u32) {
        self.lengths.push(num_rows);
        let last_offset = self.partition_row_offsets.last().copied().unwrap_or(0);
        self.partition_row_offsets
            .push(last_offset + num_rows as usize);
    }

    pub fn has_centroids(&self) -> bool {
        self.centroids.is_some()
    }

    pub fn num_partitions(&self) -> usize {
        self.lengths.len()
    }

    /// Range of the rows for one partition.
    pub fn row_range(&self, partition: usize) -> Range<usize> {
        let start = self.partition_row_offsets[partition];
        let end = self.partition_row_offsets[partition + 1];
        start..end
    }
}

impl TryFrom<PbIvf> for IvfData {
    type Error = Error;

    fn try_from(proto: PbIvf) -> Result<Self> {
        let centroids = if let Some(tensor) = proto.centroids_tensor.as_ref() {
            debug!("Ivf: loading IVF centroids from index format v2");
            Some(Arc::new(FixedSizeListArray::try_from(tensor)?))
        } else {
            None
        };
        // We are not using offsets from the protobuf, which was the file offset in the
        // v1 index format. It will be deprecated soon.
        //
        // This new offset uses the row offset in the lance file.
        let offsets = [0]
            .iter()
            .chain(proto.lengths.iter())
            .scan(0_usize, |state, &x| {
                *state += x as usize;
                Some(*state)
            });
        Ok(Self {
            centroids,
            lengths: proto.lengths.clone(),
            partition_row_offsets: offsets.collect(),
        })
    }
}

impl TryFrom<&IvfData> for PbIvf {
    type Error = Error;

    fn try_from(meta: &IvfData) -> Result<Self> {
        let lengths = meta.lengths.clone();

        Ok(Self {
            centroids: vec![], // Deprecated
            lengths,
            offsets: vec![], // Deprecated
            centroids_tensor: meta
                .centroids
                .as_ref()
                .map(|c| c.as_ref().try_into())
                .transpose()?,
        })
    }
}

#[cfg(test)]
mod tests {
    use arrow_array::{Float32Array, RecordBatch};
    use arrow_schema::{DataType, Field, Schema as ArrowSchema};
    use lance_core::datatypes::Schema;
    use lance_io::object_store::ObjectStore;
    use lance_table::format::SelfDescribingFileReader;
    use object_store::path::Path;

    use super::*;

    #[test]
    fn test_ivf_find_rows() {
        let mut ivf = IvfData::empty();
        ivf.add_partition(20);
        ivf.add_partition(50);

        assert_eq!(ivf.row_range(0), 0..20);
        assert_eq!(ivf.row_range(1), 20..70);
    }

    #[tokio::test]
    async fn test_write_and_load() {
        let mut ivf = IvfData::empty();
        ivf.add_partition(20);
        ivf.add_partition(50);

        let object_store = ObjectStore::memory();
        let path = Path::from("/foo");
        let arrow_schema = ArrowSchema::new(vec![Field::new("a", DataType::Float32, true)]);
        let schema = Schema::try_from(&arrow_schema).unwrap();

        {
            let mut writer =
                FileWriter::try_new(&object_store, &path, schema.clone(), &Default::default())
                    .await
                    .unwrap();
            // Write some dummy data
            let batch = RecordBatch::try_new(
                Arc::new(arrow_schema),
                vec![Arc::new(Float32Array::from(vec![Some(1.0)]))],
            )
            .unwrap();
            writer.write(&[batch]).await.unwrap();
            ivf.write(&mut writer).await.unwrap();
            writer.finish().await.unwrap();
        }

        let reader = FileReader::try_new_self_described(&object_store, &path, None)
            .await
            .unwrap();
        assert!(reader.schema().metadata.contains_key(IVF_METADATA_KEY));

        let ivf2 = IvfData::load(&reader).await.unwrap();
        assert_eq!(ivf, ivf2);
        assert_eq!(ivf2.num_partitions(), 2);
    }
}