use std::fs::create_dir_all;
use std::path::Path;
use std::{collections::HashMap, sync::Arc};
use lance::dataset::refs::Ref;
use lance::dataset::{ReadParams, WriteMode, builder::DatasetBuilder};
use lance::io::{ObjectStore, ObjectStoreParams, WrappingObjectStore};
use lance_datafusion::utils::StreamingWriteSource;
use lance_encoding::version::LanceFileVersion;
use lance_io::object_store::{StorageOptionsAccessor, StorageOptionsProvider};
use lance_table::io::commit::commit_handler_from_url;
use object_store::local::LocalFileSystem;
use snafu::ResultExt;
use crate::connection::ConnectRequest;
use crate::database::ReadConsistency;
use crate::error::{CreateDirSnafu, Error, Result};
use crate::io::object_store::MirroringObjectStoreWrapper;
use crate::table::NativeTable;
use crate::utils::validate_table_name;
use lance_namespace::models::{
CreateNamespaceRequest, CreateNamespaceResponse, DescribeNamespaceRequest,
DescribeNamespaceResponse, DropNamespaceRequest, DropNamespaceResponse, ListNamespacesRequest,
ListNamespacesResponse, ListTablesRequest, ListTablesResponse,
};
use super::{
BaseTable, CloneTableRequest, CreateTableMode, CreateTableRequest, Database, DatabaseOptions,
OpenTableRequest, TableNamesRequest,
};
pub const LANCE_FILE_EXTENSION: &str = "lance";
pub const OPT_NEW_TABLE_STORAGE_VERSION: &str = "new_table_data_storage_version";
pub const OPT_NEW_TABLE_V2_MANIFEST_PATHS: &str = "new_table_enable_v2_manifest_paths";
pub const OPT_NEW_TABLE_ENABLE_STABLE_ROW_IDS: &str = "new_table_enable_stable_row_ids";
#[derive(Clone, Debug, Default)]
pub struct NewTableConfig {
pub data_storage_version: Option<LanceFileVersion>,
pub enable_v2_manifest_paths: Option<bool>,
pub enable_stable_row_ids: Option<bool>,
}
#[derive(Debug, Default, Clone)]
pub struct ListingDatabaseOptions {
pub new_table_config: NewTableConfig,
pub storage_options: HashMap<String, String>,
}
impl ListingDatabaseOptions {
pub fn builder() -> ListingDatabaseOptionsBuilder {
ListingDatabaseOptionsBuilder::new()
}
pub(crate) fn parse_from_map(map: &HashMap<String, String>) -> Result<Self> {
let new_table_config = NewTableConfig {
data_storage_version: map
.get(OPT_NEW_TABLE_STORAGE_VERSION)
.map(|s| s.parse())
.transpose()?,
enable_v2_manifest_paths: map
.get(OPT_NEW_TABLE_V2_MANIFEST_PATHS)
.map(|s| {
s.parse::<bool>().map_err(|_| Error::InvalidInput {
message: format!(
"enable_v2_manifest_paths must be a boolean, received {}",
s
),
})
})
.transpose()?,
enable_stable_row_ids: map
.get(OPT_NEW_TABLE_ENABLE_STABLE_ROW_IDS)
.map(|s| {
s.parse::<bool>().map_err(|_| Error::InvalidInput {
message: format!("enable_stable_row_ids must be a boolean, received {}", s),
})
})
.transpose()?,
};
let storage_options = map
.iter()
.filter(|(key, _)| {
key.as_str() != OPT_NEW_TABLE_STORAGE_VERSION
&& key.as_str() != OPT_NEW_TABLE_V2_MANIFEST_PATHS
&& key.as_str() != OPT_NEW_TABLE_ENABLE_STABLE_ROW_IDS
})
.map(|(key, value)| (key.clone(), value.clone()))
.collect();
Ok(Self {
new_table_config,
storage_options,
})
}
}
impl DatabaseOptions for ListingDatabaseOptions {
fn serialize_into_map(&self, map: &mut HashMap<String, String>) {
if let Some(storage_version) = &self.new_table_config.data_storage_version {
map.insert(
OPT_NEW_TABLE_STORAGE_VERSION.to_string(),
storage_version.to_string(),
);
}
if let Some(enable_v2_manifest_paths) = self.new_table_config.enable_v2_manifest_paths {
map.insert(
OPT_NEW_TABLE_V2_MANIFEST_PATHS.to_string(),
enable_v2_manifest_paths.to_string(),
);
}
if let Some(enable_stable_row_ids) = self.new_table_config.enable_stable_row_ids {
map.insert(
OPT_NEW_TABLE_ENABLE_STABLE_ROW_IDS.to_string(),
enable_stable_row_ids.to_string(),
);
}
}
}
#[derive(Clone, Debug, Default)]
pub struct ListingDatabaseOptionsBuilder {
options: ListingDatabaseOptions,
}
impl ListingDatabaseOptionsBuilder {
pub fn new() -> Self {
Self {
options: ListingDatabaseOptions::default(),
}
}
}
impl ListingDatabaseOptionsBuilder {
pub fn data_storage_version(mut self, data_storage_version: LanceFileVersion) -> Self {
self.options.new_table_config.data_storage_version = Some(data_storage_version);
self
}
pub fn enable_v2_manifest_paths(mut self, enable_v2_manifest_paths: bool) -> Self {
self.options.new_table_config.enable_v2_manifest_paths = Some(enable_v2_manifest_paths);
self
}
pub fn storage_option(mut self, key: impl Into<String>, value: impl Into<String>) -> Self {
self.options
.storage_options
.insert(key.into(), value.into());
self
}
pub fn storage_options(
mut self,
pairs: impl IntoIterator<Item = (impl Into<String>, impl Into<String>)>,
) -> Self {
for (key, value) in pairs {
self.options
.storage_options
.insert(key.into(), value.into());
}
self
}
pub fn build(self) -> ListingDatabaseOptions {
self.options
}
}
#[derive(Debug)]
pub struct ListingDatabase {
object_store: Arc<ObjectStore>,
query_string: Option<String>,
pub(crate) uri: String,
pub(crate) base_path: object_store::path::Path,
pub(crate) store_wrapper: Option<Arc<dyn WrappingObjectStore>>,
read_consistency_interval: Option<std::time::Duration>,
storage_options: HashMap<String, String>,
pub(crate) storage_options_provider: Option<Arc<dyn StorageOptionsProvider>>,
new_table_config: NewTableConfig,
session: Arc<lance::session::Session>,
}
impl std::fmt::Display for ListingDatabase {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"ListingDatabase(uri={}, read_consistency_interval={})",
self.uri,
match self.read_consistency_interval {
None => {
"None".to_string()
}
Some(duration) => {
format!("{}s", duration.as_secs_f64())
}
}
)
}
}
const LANCE_EXTENSION: &str = "lance";
const ENGINE: &str = "engine";
const MIRRORED_STORE: &str = "mirroredStore";
impl ListingDatabase {
pub async fn connect_with_options(request: &ConnectRequest) -> Result<Self> {
let uri = &request.uri;
let parse_res = url::Url::parse(uri);
let options = ListingDatabaseOptions::parse_from_map(&request.options)?;
match parse_res {
Ok(url) if url.scheme().len() == 1 && cfg!(windows) => {
Self::open_path(
uri,
request.read_consistency_interval,
options.new_table_config,
request.session.clone(),
)
.await
}
Ok(mut url) => {
let mut engine = None;
let mut mirrored_store = None;
let mut filtered_querys = vec![];
for (key, value) in url.query_pairs() {
if key == ENGINE {
engine = Some(value.to_string());
} else if key == MIRRORED_STORE {
if cfg!(windows) {
return Err(Error::NotSupported {
message: "mirrored store is not supported on windows".into(),
});
}
mirrored_store = Some(value.to_string());
} else {
filtered_querys.push((key.to_string(), value.to_string()));
}
}
url.query_pairs_mut().clear();
url.query_pairs_mut().extend_pairs(filtered_querys);
let query_string = url.query().map(|s| s.to_string());
url.set_query(None);
let table_base_uri = if let Some(store) = engine {
static WARN_ONCE: std::sync::Once = std::sync::Once::new();
WARN_ONCE.call_once(|| {
log::warn!("Specifying engine is not a publicly supported feature in lancedb yet. THE API WILL CHANGE");
});
let old_scheme = url.scheme().to_string();
let new_scheme = format!("{}+{}", old_scheme, store);
url.to_string().replacen(&old_scheme, &new_scheme, 1)
} else {
url.to_string()
};
let plain_uri = url.to_string();
let session = request
.session
.clone()
.unwrap_or_else(|| Arc::new(lance::session::Session::default()));
let os_params = ObjectStoreParams {
storage_options_accessor: if options.storage_options.is_empty() {
None
} else {
Some(Arc::new(StorageOptionsAccessor::with_static_options(
options.storage_options.clone(),
)))
},
..Default::default()
};
let (object_store, base_path) = ObjectStore::from_uri_and_params(
session.store_registry(),
&plain_uri,
&os_params,
)
.await?;
if object_store.is_local() {
Self::try_create_dir(&plain_uri).context(CreateDirSnafu {
path: plain_uri.clone(),
})?;
}
let write_store_wrapper = match mirrored_store {
Some(path) => {
let mirrored_store = Arc::new(LocalFileSystem::new_with_prefix(path)?);
let wrapper = MirroringObjectStoreWrapper::new(mirrored_store);
Some(Arc::new(wrapper) as Arc<dyn WrappingObjectStore>)
}
None => None,
};
Ok(Self {
uri: table_base_uri,
query_string,
base_path,
object_store,
store_wrapper: write_store_wrapper,
read_consistency_interval: request.read_consistency_interval,
storage_options: options.storage_options,
storage_options_provider: None,
new_table_config: options.new_table_config,
session,
})
}
Err(_) => {
Self::open_path(
uri,
request.read_consistency_interval,
options.new_table_config,
request.session.clone(),
)
.await
}
}
}
async fn open_path(
path: &str,
read_consistency_interval: Option<std::time::Duration>,
new_table_config: NewTableConfig,
session: Option<Arc<lance::session::Session>>,
) -> Result<Self> {
let session = session.unwrap_or_else(|| Arc::new(lance::session::Session::default()));
let (object_store, base_path) = ObjectStore::from_uri_and_params(
session.store_registry(),
path,
&ObjectStoreParams::default(),
)
.await?;
if object_store.is_local() {
Self::try_create_dir(path).context(CreateDirSnafu { path })?;
}
Ok(Self {
uri: path.to_string(),
query_string: None,
base_path,
object_store,
store_wrapper: None,
read_consistency_interval,
storage_options: HashMap::new(),
storage_options_provider: None,
new_table_config,
session,
})
}
fn try_create_dir(path: &str) -> core::result::Result<(), std::io::Error> {
let fs_path = if let Some(stripped) = path.strip_prefix("file://") {
stripped
} else if let Some(stripped) = path.strip_prefix("file:") {
stripped
} else {
path
};
let path = Path::new(fs_path);
if !path.try_exists()? {
create_dir_all(path)?;
}
Ok(())
}
fn table_uri(&self, name: &str) -> Result<String> {
validate_table_name(name)?;
let mut uri = self.uri.clone();
let has_scheme = uri.contains("://");
let ends_with_separator = uri.ends_with('/') || uri.ends_with('\\');
if !ends_with_separator {
if has_scheme {
uri.push('/');
} else {
uri.push(std::path::MAIN_SEPARATOR);
}
}
uri.push_str(&format!("{}.{}", name, LANCE_FILE_EXTENSION));
if let Some(query) = self.query_string.as_ref() {
uri.push('?');
uri.push_str(query.as_str());
}
Ok(uri)
}
async fn drop_tables(&self, names: Vec<String>) -> Result<()> {
let object_store_params = ObjectStoreParams {
storage_options_accessor: if self.storage_options.is_empty() {
None
} else {
Some(Arc::new(StorageOptionsAccessor::with_static_options(
self.storage_options.clone(),
)))
},
..Default::default()
};
let mut uri = self.uri.clone();
if let Some(query_string) = &self.query_string {
uri.push_str(&format!("?{}", query_string));
}
let commit_handler = commit_handler_from_url(&uri, &Some(object_store_params)).await?;
for name in names {
let dir_name = format!("{}.{}", name, LANCE_EXTENSION);
let full_path = self.base_path.child(dir_name.clone());
commit_handler.delete(&full_path).await?;
self.object_store
.remove_dir_all(full_path.clone())
.await
.map_err(|err| match err {
lance::Error::NotFound { .. } => Error::TableNotFound {
name: name.clone(),
source: Box::new(err),
},
_ => Error::from(err),
})?;
}
Ok(())
}
fn inherit_storage_options(&self, target: &mut HashMap<String, String>) {
for (key, value) in self.storage_options.iter() {
if !target.contains_key(key) {
target.insert(key.clone(), value.clone());
}
}
}
fn extract_storage_overrides(
&self,
request: &CreateTableRequest,
) -> Result<(Option<LanceFileVersion>, Option<bool>, Option<bool>)> {
let storage_options = request
.write_options
.lance_write_params
.as_ref()
.and_then(|p| p.store_params.as_ref())
.and_then(|sp| sp.storage_options());
let storage_version_override = storage_options
.and_then(|opts| opts.get(OPT_NEW_TABLE_STORAGE_VERSION))
.map(|s| s.parse::<LanceFileVersion>())
.transpose()?;
let v2_manifest_override = storage_options
.and_then(|opts| opts.get(OPT_NEW_TABLE_V2_MANIFEST_PATHS))
.map(|s| s.parse::<bool>())
.transpose()
.map_err(|_| Error::InvalidInput {
message: "enable_v2_manifest_paths must be a boolean".to_string(),
})?;
let stable_row_ids_override = storage_options
.and_then(|opts| opts.get(OPT_NEW_TABLE_ENABLE_STABLE_ROW_IDS))
.map(|s| s.parse::<bool>())
.transpose()
.map_err(|_| Error::InvalidInput {
message: "enable_stable_row_ids must be a boolean".to_string(),
})?;
Ok((
storage_version_override,
v2_manifest_override,
stable_row_ids_override,
))
}
fn prepare_write_params(
&self,
request: &CreateTableRequest,
storage_version_override: Option<LanceFileVersion>,
v2_manifest_override: Option<bool>,
stable_row_ids_override: Option<bool>,
) -> lance::dataset::WriteParams {
let mut write_params = request
.write_options
.lance_write_params
.clone()
.unwrap_or_default();
if !self.storage_options.is_empty() || self.storage_options_provider.is_some() {
let store_params = write_params
.store_params
.get_or_insert_with(Default::default);
let mut storage_options = store_params.storage_options().cloned().unwrap_or_default();
if !self.storage_options.is_empty() {
self.inherit_storage_options(&mut storage_options);
}
let accessor = if let Some(ref provider) = self.storage_options_provider {
StorageOptionsAccessor::with_initial_and_provider(storage_options, provider.clone())
} else {
StorageOptionsAccessor::with_static_options(storage_options)
};
store_params.storage_options_accessor = Some(Arc::new(accessor));
}
write_params.data_storage_version = self
.new_table_config
.data_storage_version
.or(storage_version_override);
if let Some(enable_v2_manifest_paths) = self
.new_table_config
.enable_v2_manifest_paths
.or(v2_manifest_override)
{
write_params.enable_v2_manifest_paths = enable_v2_manifest_paths;
}
if let Some(enable_stable_row_ids) =
stable_row_ids_override.or(self.new_table_config.enable_stable_row_ids)
{
write_params.enable_stable_row_ids = enable_stable_row_ids;
}
if matches!(&request.mode, CreateTableMode::Overwrite) {
write_params.mode = WriteMode::Overwrite;
}
write_params.session = Some(self.session.clone());
write_params
}
async fn handle_table_exists(
&self,
table_name: &str,
namespace: Vec<String>,
mode: CreateTableMode,
data_schema: &arrow_schema::Schema,
) -> Result<Arc<dyn BaseTable>> {
match mode {
CreateTableMode::Create => Err(Error::TableAlreadyExists {
name: table_name.to_string(),
}),
CreateTableMode::ExistOk(callback) => {
let req = OpenTableRequest {
name: table_name.to_string(),
namespace: namespace.clone(),
index_cache_size: None,
lance_read_params: None,
location: None,
namespace_client: None,
managed_versioning: None,
};
let req = (callback)(req);
let table = self.open_table(req).await?;
let table_schema = table.schema().await?;
if table_schema.as_ref() != data_schema {
return Err(Error::Schema {
message: "Provided schema does not match existing table schema".to_string(),
});
}
Ok(table)
}
CreateTableMode::Overwrite => unreachable!(),
}
}
}
#[async_trait::async_trait]
impl Database for ListingDatabase {
async fn list_namespaces(
&self,
request: ListNamespacesRequest,
) -> Result<ListNamespacesResponse> {
if request.id.as_ref().map(|v| !v.is_empty()).unwrap_or(false) {
return Err(Error::NotSupported {
message: "Namespace operations are not supported for listing database".into(),
});
}
Ok(ListNamespacesResponse {
namespaces: Vec::new(),
page_token: None,
})
}
fn uri(&self) -> &str {
&self.uri
}
async fn read_consistency(&self) -> Result<ReadConsistency> {
if let Some(read_consistency_inverval) = self.read_consistency_interval {
if read_consistency_inverval.is_zero() {
Ok(ReadConsistency::Strong)
} else {
Ok(ReadConsistency::Eventual(read_consistency_inverval))
}
} else {
Ok(ReadConsistency::Manual)
}
}
async fn create_namespace(
&self,
_request: CreateNamespaceRequest,
) -> Result<CreateNamespaceResponse> {
Err(Error::NotSupported {
message: "Namespace operations are not supported for listing database".into(),
})
}
async fn drop_namespace(
&self,
_request: DropNamespaceRequest,
) -> Result<DropNamespaceResponse> {
Err(Error::NotSupported {
message: "Namespace operations are not supported for listing database".into(),
})
}
async fn describe_namespace(
&self,
_request: DescribeNamespaceRequest,
) -> Result<DescribeNamespaceResponse> {
Err(Error::NotSupported {
message: "Namespace operations are not supported for listing database".into(),
})
}
async fn table_names(&self, request: TableNamesRequest) -> Result<Vec<String>> {
if !request.namespace.is_empty() {
return Err(Error::NotSupported {
message: "Namespace parameter is not supported for listing database. Only root namespace is supported.".into(),
});
}
let mut f = self
.object_store
.read_dir(self.base_path.clone())
.await?
.iter()
.map(Path::new)
.filter(|path| {
let is_lance = path
.extension()
.and_then(|e| e.to_str())
.map(|e| e == LANCE_EXTENSION);
is_lance.unwrap_or(false)
})
.filter_map(|p| p.file_stem().and_then(|s| s.to_str().map(String::from)))
.collect::<Vec<String>>();
f.sort();
if let Some(start_after) = request.start_after {
let index = f
.iter()
.position(|name| name.as_str() > start_after.as_str())
.unwrap_or(f.len());
f.drain(0..index);
}
if let Some(limit) = request.limit {
f.truncate(limit as usize);
}
Ok(f)
}
async fn list_tables(&self, request: ListTablesRequest) -> Result<ListTablesResponse> {
if request.id.as_ref().map(|v| !v.is_empty()).unwrap_or(false) {
return Err(Error::NotSupported {
message: "Namespace parameter is not supported for listing database. Only root namespace is supported.".into(),
});
}
let mut f = self
.object_store
.read_dir(self.base_path.clone())
.await?
.iter()
.map(Path::new)
.filter(|path| {
let is_lance = path
.extension()
.and_then(|e| e.to_str())
.map(|e| e == LANCE_EXTENSION);
is_lance.unwrap_or(false)
})
.filter_map(|p| p.file_stem().and_then(|s| s.to_str().map(String::from)))
.collect::<Vec<String>>();
f.sort();
if let Some(ref page_token) = request.page_token {
let index = f
.iter()
.position(|name| name.as_str() > page_token.as_str())
.unwrap_or(f.len());
f.drain(0..index);
}
let next_page_token = if let Some(limit) = request.limit {
if f.len() > limit as usize {
let token = f[limit as usize].clone();
f.truncate(limit as usize);
Some(token)
} else {
None
}
} else {
None
};
Ok(ListTablesResponse {
tables: f,
page_token: next_page_token,
})
}
async fn create_table(&self, request: CreateTableRequest) -> Result<Arc<dyn BaseTable>> {
if !request.namespace.is_empty() && request.location.is_none() {
return Err(Error::InvalidInput {
message: "Location must be provided when namespace is not empty".into(),
});
}
let table_uri = request
.location
.clone()
.unwrap_or_else(|| self.table_uri(&request.name).unwrap());
let (storage_version_override, v2_manifest_override, stable_row_ids_override) =
self.extract_storage_overrides(&request)?;
let write_params = self.prepare_write_params(
&request,
storage_version_override,
v2_manifest_override,
stable_row_ids_override,
);
let data_schema = request.data.arrow_schema();
match NativeTable::create(
&table_uri,
&request.name,
request.namespace.clone(),
request.data,
self.store_wrapper.clone(),
Some(write_params),
self.read_consistency_interval,
request.namespace_client,
false, )
.await
{
Ok(table) => Ok(Arc::new(table)),
Err(Error::TableAlreadyExists { .. }) => {
self.handle_table_exists(
&request.name,
request.namespace.clone(),
request.mode,
&data_schema,
)
.await
}
Err(err) => Err(err),
}
}
async fn clone_table(&self, request: CloneTableRequest) -> Result<Arc<dyn BaseTable>> {
if !request.target_namespace.is_empty() {
return Err(Error::NotSupported {
message: "Namespace parameter is not supported for listing database. Only root namespace is supported.".into(),
});
}
if !request.is_shallow {
return Err(Error::NotSupported {
message: "Deep clone is not yet implemented".to_string(),
});
}
validate_table_name(&request.target_table_name)?;
let storage_params = ObjectStoreParams {
storage_options_accessor: if self.storage_options.is_empty() {
None
} else {
Some(Arc::new(StorageOptionsAccessor::with_static_options(
self.storage_options.clone(),
)))
},
..Default::default()
};
let read_params = ReadParams {
store_options: Some(storage_params.clone()),
session: Some(self.session.clone()),
..Default::default()
};
let mut source_dataset = DatasetBuilder::from_uri(&request.source_uri)
.with_read_params(read_params.clone())
.load()
.await
.map_err(|e| -> Error { e.into() })?;
let version_ref = match (request.source_version, request.source_tag) {
(Some(v), None) => Ok(Ref::Version(None, Some(v))),
(None, Some(tag)) => Ok(Ref::Tag(tag)),
(None, None) => Ok(Ref::Version(None, Some(source_dataset.version().version))),
_ => Err(Error::InvalidInput {
message: "Cannot specify both source_version and source_tag".to_string(),
}),
}?;
let target_uri = self.table_uri(&request.target_table_name)?;
source_dataset
.shallow_clone(&target_uri, version_ref, Some(storage_params))
.await
.map_err(|e| -> Error { e.into() })?;
let cloned_table = NativeTable::open_with_params(
&target_uri,
&request.target_table_name,
request.target_namespace,
self.store_wrapper.clone(),
None,
self.read_consistency_interval,
request.namespace_client,
false, None, )
.await?;
Ok(Arc::new(cloned_table))
}
async fn open_table(&self, mut request: OpenTableRequest) -> Result<Arc<dyn BaseTable>> {
if !request.namespace.is_empty() && request.location.is_none() {
return Err(Error::InvalidInput {
message: "Location must be provided when namespace is not empty".into(),
});
}
let table_uri = request
.location
.clone()
.unwrap_or_else(|| self.table_uri(&request.name).unwrap());
if !self.storage_options.is_empty() || self.storage_options_provider.is_some() {
let store_params = request
.lance_read_params
.get_or_insert_with(Default::default)
.store_options
.get_or_insert_with(Default::default);
let mut storage_options = store_params.storage_options().cloned().unwrap_or_default();
if !self.storage_options.is_empty() {
self.inherit_storage_options(&mut storage_options);
}
let request_provider = store_params
.storage_options_accessor
.as_ref()
.and_then(|a| a.provider().cloned());
let provider = self.storage_options_provider.clone().or(request_provider);
let accessor = if let Some(provider) = provider {
StorageOptionsAccessor::with_initial_and_provider(storage_options, provider)
} else {
StorageOptionsAccessor::with_static_options(storage_options)
};
store_params.storage_options_accessor = Some(Arc::new(accessor));
}
let mut read_params = request.lance_read_params.unwrap_or_else(|| {
let mut default_params = ReadParams::default();
if let Some(index_cache_size) = request.index_cache_size {
#[allow(deprecated)]
default_params.index_cache_size(index_cache_size as usize);
}
default_params
});
read_params.session(self.session.clone());
let native_table = Arc::new(
NativeTable::open_with_params(
&table_uri,
&request.name,
request.namespace,
self.store_wrapper.clone(),
Some(read_params),
self.read_consistency_interval,
request.namespace_client,
false, request.managed_versioning, )
.await?,
);
Ok(native_table)
}
async fn rename_table(
&self,
_cur_name: &str,
_new_name: &str,
cur_namespace: &[String],
new_namespace: &[String],
) -> Result<()> {
if !cur_namespace.is_empty() {
return Err(Error::NotSupported {
message: "Namespace parameter is not supported for listing database.".into(),
});
}
if !new_namespace.is_empty() {
return Err(Error::NotSupported {
message: "Namespace parameter is not supported for listing database.".into(),
});
}
Err(Error::NotSupported {
message: "rename_table is not supported in LanceDB OSS".into(),
})
}
async fn drop_table(&self, name: &str, namespace: &[String]) -> Result<()> {
if !namespace.is_empty() {
return Err(Error::NotSupported {
message: "Namespace parameter is not supported for listing database.".into(),
});
}
self.drop_tables(vec![name.to_string()]).await
}
#[allow(deprecated)]
async fn drop_all_tables(&self, namespace: &[String]) -> Result<()> {
if !namespace.is_empty() {
return Err(Error::NotSupported {
message: "Namespace parameter is not supported for listing database.".into(),
});
}
let tables = self.table_names(TableNamesRequest::default()).await?;
self.drop_tables(tables).await
}
fn as_any(&self) -> &dyn std::any::Any {
self
}
async fn namespace_client(&self) -> Result<Arc<dyn lance_namespace::LanceNamespace>> {
let mut builder = lance_namespace_impls::DirectoryNamespaceBuilder::new(&self.uri);
if !self.storage_options.is_empty() {
builder = builder.storage_options(self.storage_options.clone());
}
builder = builder.session(self.session.clone());
let namespace = builder.build().await.map_err(|e| Error::Runtime {
message: format!("Failed to create namespace client: {}", e),
})?;
Ok(Arc::new(namespace) as Arc<dyn lance_namespace::LanceNamespace>)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::Table;
use crate::connection::ConnectRequest;
use crate::data::scannable::Scannable;
use crate::database::{CreateTableMode, CreateTableRequest};
use crate::table::WriteOptions;
use arrow_array::{Int32Array, RecordBatch, StringArray};
use arrow_schema::{DataType, Field, Schema};
use std::path::PathBuf;
use tempfile::tempdir;
async fn setup_database() -> (tempfile::TempDir, ListingDatabase) {
let tempdir = tempdir().unwrap();
let uri = tempdir.path().to_str().unwrap();
let request = ConnectRequest {
uri: uri.to_string(),
#[cfg(feature = "remote")]
client_config: Default::default(),
options: Default::default(),
read_consistency_interval: None,
session: None,
};
let db = ListingDatabase::connect_with_options(&request)
.await
.unwrap();
(tempdir, db)
}
#[tokio::test]
async fn test_clone_table_basic() {
let (_tempdir, db) = setup_database().await;
let schema = Arc::new(Schema::new(vec![
Field::new("id", DataType::Int32, false),
Field::new("name", DataType::Utf8, false),
]));
let source_table = db
.create_table(CreateTableRequest {
name: "source_table".to_string(),
namespace: vec![],
data: Box::new(RecordBatch::new_empty(schema.clone())) as Box<dyn Scannable>,
mode: CreateTableMode::Create,
write_options: Default::default(),
location: None,
namespace_client: None,
})
.await
.unwrap();
let source_uri = db.table_uri("source_table").unwrap();
let cloned_table = db
.clone_table(CloneTableRequest {
target_table_name: "cloned_table".to_string(),
target_namespace: vec![],
source_uri: source_uri.clone(),
source_version: None,
source_tag: None,
is_shallow: true,
namespace_client: None,
})
.await
.unwrap();
#[allow(deprecated)]
let table_names = db.table_names(TableNamesRequest::default()).await.unwrap();
assert!(table_names.contains(&"source_table".to_string()));
assert!(table_names.contains(&"cloned_table".to_string()));
assert_eq!(
source_table.schema().await.unwrap(),
cloned_table.schema().await.unwrap()
);
}
#[tokio::test]
async fn test_clone_table_with_data() {
let (_tempdir, db) = setup_database().await;
let schema = Arc::new(Schema::new(vec![
Field::new("id", DataType::Int32, false),
Field::new("name", DataType::Utf8, false),
]));
let batch = RecordBatch::try_new(
schema.clone(),
vec![
Arc::new(Int32Array::from(vec![1, 2, 3])),
Arc::new(StringArray::from(vec!["a", "b", "c"])),
],
)
.unwrap();
let source_table = db
.create_table(CreateTableRequest {
name: "source_with_data".to_string(),
namespace: vec![],
data: Box::new(batch) as Box<dyn Scannable>,
mode: CreateTableMode::Create,
write_options: Default::default(),
location: None,
namespace_client: None,
})
.await
.unwrap();
let source_uri = db.table_uri("source_with_data").unwrap();
let cloned_table = db
.clone_table(CloneTableRequest {
target_table_name: "cloned_with_data".to_string(),
target_namespace: vec![],
source_uri,
source_version: None,
source_tag: None,
is_shallow: true,
namespace_client: None,
})
.await
.unwrap();
let source_count = source_table.count_rows(None).await.unwrap();
let cloned_count = cloned_table.count_rows(None).await.unwrap();
assert_eq!(source_count, cloned_count);
assert_eq!(source_count, 3);
}
#[tokio::test]
async fn test_clone_table_with_storage_options() {
let tempdir = tempdir().unwrap();
let uri = tempdir.path().to_str().unwrap();
let mut options = HashMap::new();
options.insert("test_option".to_string(), "test_value".to_string());
let request = ConnectRequest {
uri: uri.to_string(),
#[cfg(feature = "remote")]
client_config: Default::default(),
options: options.clone(),
read_consistency_interval: None,
session: None,
};
let db = ListingDatabase::connect_with_options(&request)
.await
.unwrap();
let schema = Arc::new(Schema::new(vec![Field::new("id", DataType::Int32, false)]));
db.create_table(CreateTableRequest {
name: "source".to_string(),
namespace: vec![],
data: Box::new(RecordBatch::new_empty(schema)) as Box<dyn Scannable>,
mode: CreateTableMode::Create,
write_options: Default::default(),
location: None,
namespace_client: None,
})
.await
.unwrap();
let source_uri = db.table_uri("source").unwrap();
let cloned = db
.clone_table(CloneTableRequest {
target_table_name: "cloned".to_string(),
target_namespace: vec![],
source_uri,
source_version: None,
source_tag: None,
is_shallow: true,
namespace_client: None,
})
.await;
assert!(cloned.is_ok());
}
#[tokio::test]
async fn test_clone_table_deep_not_supported() {
let (_tempdir, db) = setup_database().await;
let schema = Arc::new(Schema::new(vec![Field::new("id", DataType::Int32, false)]));
db.create_table(CreateTableRequest {
name: "source".to_string(),
namespace: vec![],
data: Box::new(RecordBatch::new_empty(schema)) as Box<dyn Scannable>,
mode: CreateTableMode::Create,
write_options: Default::default(),
location: None,
namespace_client: None,
})
.await
.unwrap();
let source_uri = db.table_uri("source").unwrap();
let result = db
.clone_table(CloneTableRequest {
target_table_name: "cloned".to_string(),
target_namespace: vec![],
source_uri,
source_version: None,
source_tag: None,
is_shallow: false, namespace_client: None,
})
.await;
assert!(result.is_err());
assert!(matches!(
result.unwrap_err(),
Error::NotSupported { message } if message.contains("Deep clone")
));
}
#[tokio::test]
async fn test_clone_table_with_namespace_not_supported() {
let (_tempdir, db) = setup_database().await;
let schema = Arc::new(Schema::new(vec![Field::new("id", DataType::Int32, false)]));
db.create_table(CreateTableRequest {
name: "source".to_string(),
namespace: vec![],
data: Box::new(RecordBatch::new_empty(schema)) as Box<dyn Scannable>,
mode: CreateTableMode::Create,
write_options: Default::default(),
location: None,
namespace_client: None,
})
.await
.unwrap();
let source_uri = db.table_uri("source").unwrap();
let result = db
.clone_table(CloneTableRequest {
target_table_name: "cloned".to_string(),
target_namespace: vec!["namespace".to_string()], source_uri,
source_version: None,
source_tag: None,
is_shallow: true,
namespace_client: None,
})
.await;
assert!(result.is_err());
assert!(matches!(
result.unwrap_err(),
Error::NotSupported { message } if message.contains("Namespace parameter is not supported")
));
}
#[tokio::test]
async fn test_clone_table_invalid_target_name() {
let (_tempdir, db) = setup_database().await;
let schema = Arc::new(Schema::new(vec![Field::new("id", DataType::Int32, false)]));
db.create_table(CreateTableRequest {
name: "source".to_string(),
namespace: vec![],
data: Box::new(RecordBatch::new_empty(schema)) as Box<dyn Scannable>,
mode: CreateTableMode::Create,
write_options: Default::default(),
location: None,
namespace_client: None,
})
.await
.unwrap();
let source_uri = db.table_uri("source").unwrap();
let result = db
.clone_table(CloneTableRequest {
target_table_name: "invalid/name".to_string(), target_namespace: vec![],
source_uri,
source_version: None,
source_tag: None,
is_shallow: true,
namespace_client: None,
})
.await;
assert!(result.is_err());
}
#[tokio::test]
async fn test_clone_table_source_not_found() {
let (_tempdir, db) = setup_database().await;
let result = db
.clone_table(CloneTableRequest {
target_table_name: "cloned".to_string(),
target_namespace: vec![],
source_uri: "/nonexistent/table.lance".to_string(),
source_version: None,
source_tag: None,
is_shallow: true,
namespace_client: None,
})
.await;
assert!(result.is_err());
}
#[tokio::test]
async fn test_clone_table_with_version_and_tag_error() {
let (_tempdir, db) = setup_database().await;
let schema = Arc::new(Schema::new(vec![Field::new("id", DataType::Int32, false)]));
db.create_table(CreateTableRequest {
name: "source".to_string(),
namespace: vec![],
data: Box::new(RecordBatch::new_empty(schema)) as Box<dyn Scannable>,
mode: CreateTableMode::Create,
write_options: Default::default(),
location: None,
namespace_client: None,
})
.await
.unwrap();
let source_uri = db.table_uri("source").unwrap();
let result = db
.clone_table(CloneTableRequest {
target_table_name: "cloned".to_string(),
target_namespace: vec![],
source_uri,
source_version: Some(1),
source_tag: Some("v1.0".to_string()),
is_shallow: true,
namespace_client: None,
})
.await;
assert!(result.is_err());
assert!(matches!(
result.unwrap_err(),
Error::InvalidInput { message } if message.contains("Cannot specify both source_version and source_tag")
));
}
#[tokio::test]
async fn test_clone_table_with_specific_version() {
let (_tempdir, db) = setup_database().await;
let schema = Arc::new(Schema::new(vec![
Field::new("id", DataType::Int32, false),
Field::new("value", DataType::Utf8, false),
]));
let batch1 = RecordBatch::try_new(
schema.clone(),
vec![
Arc::new(Int32Array::from(vec![1, 2])),
Arc::new(StringArray::from(vec!["a", "b"])),
],
)
.unwrap();
let source_table = db
.create_table(CreateTableRequest {
name: "versioned_source".to_string(),
namespace: vec![],
data: Box::new(batch1) as Box<dyn Scannable>,
mode: CreateTableMode::Create,
write_options: Default::default(),
location: None,
namespace_client: None,
})
.await
.unwrap();
let initial_version = source_table.version().await.unwrap();
let batch2 = RecordBatch::try_new(
schema.clone(),
vec![
Arc::new(Int32Array::from(vec![3, 4])),
Arc::new(StringArray::from(vec!["c", "d"])),
],
)
.unwrap();
let db = Arc::new(db);
let source_table_obj = Table::new(source_table.clone(), db.clone());
source_table_obj.add(batch2).execute().await.unwrap();
assert_eq!(source_table.count_rows(None).await.unwrap(), 4);
let source_uri = db.table_uri("versioned_source").unwrap();
let cloned_table = db
.clone_table(CloneTableRequest {
target_table_name: "cloned_from_version".to_string(),
target_namespace: vec![],
source_uri,
source_version: Some(initial_version),
source_tag: None,
is_shallow: true,
namespace_client: None,
})
.await
.unwrap();
assert_eq!(cloned_table.count_rows(None).await.unwrap(), 2);
assert_eq!(source_table.count_rows(None).await.unwrap(), 4);
}
#[tokio::test]
async fn test_clone_table_with_tag() {
let (_tempdir, db) = setup_database().await;
let schema = Arc::new(Schema::new(vec![
Field::new("id", DataType::Int32, false),
Field::new("value", DataType::Utf8, false),
]));
let batch1 = RecordBatch::try_new(
schema.clone(),
vec![
Arc::new(Int32Array::from(vec![1, 2])),
Arc::new(StringArray::from(vec!["a", "b"])),
],
)
.unwrap();
let source_table = db
.create_table(CreateTableRequest {
name: "tagged_source".to_string(),
namespace: vec![],
data: Box::new(batch1),
mode: CreateTableMode::Create,
write_options: Default::default(),
location: None,
namespace_client: None,
})
.await
.unwrap();
let db = Arc::new(db);
let source_table_obj = Table::new(source_table.clone(), db.clone());
let mut tags = source_table_obj.tags().await.unwrap();
tags.create("v1.0", source_table.version().await.unwrap())
.await
.unwrap();
let batch2 = RecordBatch::try_new(
schema.clone(),
vec![
Arc::new(Int32Array::from(vec![3, 4])),
Arc::new(StringArray::from(vec!["c", "d"])),
],
)
.unwrap();
let source_table_obj = Table::new(source_table.clone(), db.clone());
source_table_obj.add(batch2).execute().await.unwrap();
assert_eq!(source_table.count_rows(None).await.unwrap(), 4);
let source_uri = db.table_uri("tagged_source").unwrap();
let cloned_table = db
.clone_table(CloneTableRequest {
target_table_name: "cloned_from_tag".to_string(),
target_namespace: vec![],
source_uri,
source_version: None,
source_tag: Some("v1.0".to_string()),
is_shallow: true,
namespace_client: None,
})
.await
.unwrap();
assert_eq!(cloned_table.count_rows(None).await.unwrap(), 2);
}
#[tokio::test]
async fn test_cloned_tables_evolve_independently() {
let (_tempdir, db) = setup_database().await;
let schema = Arc::new(Schema::new(vec![
Field::new("id", DataType::Int32, false),
Field::new("value", DataType::Utf8, false),
]));
let batch1 = RecordBatch::try_new(
schema.clone(),
vec![
Arc::new(Int32Array::from(vec![1, 2])),
Arc::new(StringArray::from(vec!["a", "b"])),
],
)
.unwrap();
let source_table = db
.create_table(CreateTableRequest {
name: "independent_source".to_string(),
namespace: vec![],
data: Box::new(batch1),
mode: CreateTableMode::Create,
write_options: Default::default(),
location: None,
namespace_client: None,
})
.await
.unwrap();
let source_uri = db.table_uri("independent_source").unwrap();
let cloned_table = db
.clone_table(CloneTableRequest {
target_table_name: "independent_clone".to_string(),
target_namespace: vec![],
source_uri,
source_version: None,
source_tag: None,
is_shallow: true,
namespace_client: None,
})
.await
.unwrap();
assert_eq!(source_table.count_rows(None).await.unwrap(), 2);
assert_eq!(cloned_table.count_rows(None).await.unwrap(), 2);
let batch_clone = RecordBatch::try_new(
schema.clone(),
vec![
Arc::new(Int32Array::from(vec![3, 4, 5])),
Arc::new(StringArray::from(vec!["c", "d", "e"])),
],
)
.unwrap();
let db = Arc::new(db);
let cloned_table_obj = Table::new(cloned_table.clone(), db.clone());
cloned_table_obj.add(batch_clone).execute().await.unwrap();
let batch_source = RecordBatch::try_new(
schema.clone(),
vec![
Arc::new(Int32Array::from(vec![10, 11])),
Arc::new(StringArray::from(vec!["x", "y"])),
],
)
.unwrap();
let source_table_obj = Table::new(source_table.clone(), db);
source_table_obj.add(batch_source).execute().await.unwrap();
assert_eq!(source_table.count_rows(None).await.unwrap(), 4); assert_eq!(cloned_table.count_rows(None).await.unwrap(), 5); }
#[tokio::test]
async fn test_clone_latest_version() {
let (_tempdir, db) = setup_database().await;
let schema = Arc::new(Schema::new(vec![Field::new("id", DataType::Int32, false)]));
let batch1 =
RecordBatch::try_new(schema.clone(), vec![Arc::new(Int32Array::from(vec![1, 2]))])
.unwrap();
let source_table = db
.create_table(CreateTableRequest {
name: "latest_version_source".to_string(),
namespace: vec![],
data: Box::new(batch1),
mode: CreateTableMode::Create,
write_options: Default::default(),
location: None,
namespace_client: None,
})
.await
.unwrap();
let db = Arc::new(db);
for i in 0..3 {
let batch = RecordBatch::try_new(
schema.clone(),
vec![Arc::new(Int32Array::from(vec![i * 10, i * 10 + 1]))],
)
.unwrap();
let source_table_obj = Table::new(source_table.clone(), db.clone());
source_table_obj.add(batch).execute().await.unwrap();
}
let source_count = source_table.count_rows(None).await.unwrap();
assert_eq!(source_count, 8);
let source_uri = db.table_uri("latest_version_source").unwrap();
let cloned_table = db
.clone_table(CloneTableRequest {
target_table_name: "cloned_latest".to_string(),
target_namespace: vec![],
source_uri,
source_version: None,
source_tag: None,
is_shallow: true,
namespace_client: None,
})
.await
.unwrap();
assert_eq!(cloned_table.count_rows(None).await.unwrap(), 8);
}
#[tokio::test]
async fn test_create_table_with_stable_row_ids_connection_level() {
let tempdir = tempdir().unwrap();
let uri = tempdir.path().to_str().unwrap();
let mut options = HashMap::new();
options.insert(
OPT_NEW_TABLE_ENABLE_STABLE_ROW_IDS.to_string(),
"true".to_string(),
);
let request = ConnectRequest {
uri: uri.to_string(),
#[cfg(feature = "remote")]
client_config: Default::default(),
options,
read_consistency_interval: None,
session: None,
};
let db = ListingDatabase::connect_with_options(&request)
.await
.unwrap();
assert_eq!(db.new_table_config.enable_stable_row_ids, Some(true));
let schema = Arc::new(Schema::new(vec![Field::new("id", DataType::Int32, false)]));
let batch = RecordBatch::try_new(
schema.clone(),
vec![Arc::new(Int32Array::from(vec![1, 2, 3]))],
)
.unwrap();
let table = db
.create_table(CreateTableRequest {
name: "test_stable".to_string(),
namespace: vec![],
data: Box::new(batch),
mode: CreateTableMode::Create,
write_options: Default::default(),
location: None,
namespace_client: None,
})
.await
.unwrap();
assert_eq!(table.count_rows(None).await.unwrap(), 3);
}
#[tokio::test]
async fn test_create_table_with_stable_row_ids_table_level() {
let (_tempdir, db) = setup_database().await;
assert_eq!(db.new_table_config.enable_stable_row_ids, None);
let schema = Arc::new(Schema::new(vec![Field::new("id", DataType::Int32, false)]));
let batch = RecordBatch::try_new(
schema.clone(),
vec![Arc::new(Int32Array::from(vec![1, 2, 3]))],
)
.unwrap();
let mut storage_options = HashMap::new();
storage_options.insert(
OPT_NEW_TABLE_ENABLE_STABLE_ROW_IDS.to_string(),
"true".to_string(),
);
let write_options = WriteOptions {
lance_write_params: Some(lance::dataset::WriteParams {
store_params: Some(lance::io::ObjectStoreParams {
storage_options_accessor: Some(Arc::new(
StorageOptionsAccessor::with_static_options(storage_options),
)),
..Default::default()
}),
..Default::default()
}),
};
let table = db
.create_table(CreateTableRequest {
name: "test_stable_table_level".to_string(),
namespace: vec![],
data: Box::new(batch),
mode: CreateTableMode::Create,
write_options,
location: None,
namespace_client: None,
})
.await
.unwrap();
assert_eq!(table.count_rows(None).await.unwrap(), 3);
}
#[tokio::test]
async fn test_create_table_stable_row_ids_table_overrides_connection() {
let tempdir = tempdir().unwrap();
let uri = tempdir.path().to_str().unwrap();
let mut options = HashMap::new();
options.insert(
OPT_NEW_TABLE_ENABLE_STABLE_ROW_IDS.to_string(),
"true".to_string(),
);
let request = ConnectRequest {
uri: uri.to_string(),
#[cfg(feature = "remote")]
client_config: Default::default(),
options,
read_consistency_interval: None,
session: None,
};
let db = ListingDatabase::connect_with_options(&request)
.await
.unwrap();
assert_eq!(db.new_table_config.enable_stable_row_ids, Some(true));
let schema = Arc::new(Schema::new(vec![Field::new("id", DataType::Int32, false)]));
let batch = RecordBatch::try_new(
schema.clone(),
vec![Arc::new(Int32Array::from(vec![1, 2, 3]))],
)
.unwrap();
let mut storage_options = HashMap::new();
storage_options.insert(
OPT_NEW_TABLE_ENABLE_STABLE_ROW_IDS.to_string(),
"false".to_string(),
);
let write_options = WriteOptions {
lance_write_params: Some(lance::dataset::WriteParams {
store_params: Some(lance::io::ObjectStoreParams {
storage_options_accessor: Some(Arc::new(
StorageOptionsAccessor::with_static_options(storage_options),
)),
..Default::default()
}),
..Default::default()
}),
};
let table = db
.create_table(CreateTableRequest {
name: "test_override".to_string(),
namespace: vec![],
data: Box::new(batch),
mode: CreateTableMode::Create,
write_options,
location: None,
namespace_client: None,
})
.await
.unwrap();
assert_eq!(table.count_rows(None).await.unwrap(), 3);
}
#[tokio::test]
async fn test_stable_row_ids_invalid_value() {
let tempdir = tempdir().unwrap();
let uri = tempdir.path().to_str().unwrap();
let mut options = HashMap::new();
options.insert(
OPT_NEW_TABLE_ENABLE_STABLE_ROW_IDS.to_string(),
"not_a_boolean".to_string(),
);
let request = ConnectRequest {
uri: uri.to_string(),
#[cfg(feature = "remote")]
client_config: Default::default(),
options,
read_consistency_interval: None,
session: None,
};
let result = ListingDatabase::connect_with_options(&request).await;
assert!(result.is_err());
assert!(matches!(
result.unwrap_err(),
Error::InvalidInput { message } if message.contains("enable_stable_row_ids must be a boolean")
));
}
#[test]
fn test_stable_row_ids_config_serialization() {
let mut options = HashMap::new();
options.insert(
OPT_NEW_TABLE_ENABLE_STABLE_ROW_IDS.to_string(),
"true".to_string(),
);
let db_options = ListingDatabaseOptions::parse_from_map(&options).unwrap();
assert_eq!(
db_options.new_table_config.enable_stable_row_ids,
Some(true)
);
let mut serialized = HashMap::new();
db_options.serialize_into_map(&mut serialized);
assert_eq!(
serialized.get(OPT_NEW_TABLE_ENABLE_STABLE_ROW_IDS),
Some(&"true".to_string())
);
}
#[test]
fn test_stable_row_ids_config_parse_false() {
let mut options = HashMap::new();
options.insert(
OPT_NEW_TABLE_ENABLE_STABLE_ROW_IDS.to_string(),
"false".to_string(),
);
let db_options = ListingDatabaseOptions::parse_from_map(&options).unwrap();
assert_eq!(
db_options.new_table_config.enable_stable_row_ids,
Some(false)
);
}
#[test]
fn test_stable_row_ids_config_not_set() {
let options = HashMap::new();
let db_options = ListingDatabaseOptions::parse_from_map(&options).unwrap();
assert_eq!(db_options.new_table_config.enable_stable_row_ids, None);
}
#[tokio::test]
async fn test_table_uri() {
let (_tempdir, db) = setup_database().await;
let mut pb = PathBuf::new();
pb.push(db.uri.clone());
pb.push("test.lance");
let expected = pb.to_str().unwrap();
let uri = db.table_uri("test").ok().unwrap();
assert_eq!(uri, expected);
}
#[tokio::test]
async fn test_namespace_client() {
let (_tempdir, db) = setup_database().await;
let schema = Arc::new(Schema::new(vec![
Field::new("id", DataType::Int32, false),
Field::new("name", DataType::Utf8, false),
]));
db.create_table(CreateTableRequest {
name: "table1".to_string(),
namespace: vec![],
data: Box::new(RecordBatch::new_empty(schema.clone())) as Box<dyn Scannable>,
mode: CreateTableMode::Create,
write_options: Default::default(),
location: None,
namespace_client: None,
})
.await
.unwrap();
db.create_table(CreateTableRequest {
name: "table2".to_string(),
namespace: vec![],
data: Box::new(RecordBatch::new_empty(schema)) as Box<dyn Scannable>,
mode: CreateTableMode::Create,
write_options: Default::default(),
location: None,
namespace_client: None,
})
.await
.unwrap();
let namespace_client = db.namespace_client().await;
assert!(namespace_client.is_ok());
let namespace_client = namespace_client.unwrap();
let list_result = namespace_client
.list_tables(lance_namespace::models::ListTablesRequest {
id: Some(vec![]),
..Default::default()
})
.await;
assert!(
list_result.is_ok(),
"list_tables failed: {:?}",
list_result.err()
);
let tables = list_result.unwrap().tables;
assert_eq!(tables.len(), 2);
assert!(tables.contains(&"table1".to_string()));
assert!(tables.contains(&"table2".to_string()));
}
}