pub struct Resource<T> {
pub api_client: Arc<ApiClient>,
/* private fields */
}Expand description
A resource instance contains methods for accessing a single CDF resource type.
Fields§
§api_client: Arc<ApiClient>A reference to the shared API Client.
Implementations§
Source§impl Resource<Asset>
impl Resource<Asset>
Sourcepub async fn retrieve<R>(
&self,
asset_ids: impl Into<IdentityList<R>>,
ignore_unknown_ids: bool,
aggregated_properties: Option<Vec<AssetAggregatedProperty>>,
) -> Result<Vec<Asset>>
pub async fn retrieve<R>( &self, asset_ids: impl Into<IdentityList<R>>, ignore_unknown_ids: bool, aggregated_properties: Option<Vec<AssetAggregatedProperty>>, ) -> Result<Vec<Asset>>
Retrieve a list of assets by their IDs.
Will fail if ignore_unknown_ids is false and the assets are not present in CDF.
§Arguments
asset_ids- List of IDs or external IDs to retrieve.ignore_unknown_ids- Iftrue, missing assets will be ignored, instead of causing the request to fail.aggregated_properties- List of aggregated properties to include in response.
Sourcepub async fn delete<R>(
&self,
asset_ids: impl Into<IdentityList<R>>,
ignore_unknown_ids: bool,
recursive: bool,
) -> Result<()>
pub async fn delete<R>( &self, asset_ids: impl Into<IdentityList<R>>, ignore_unknown_ids: bool, recursive: bool, ) -> Result<()>
Delete a list of assets by their IDs.
Will fail if ignore_unknown_ids is false and the assets are not present in CDF.
§Arguments
asset_ids- List of IDs or external IDs to delete.ignore_unknown_ids- Iftrue, missing assets will be ignored, instead of causing the request to fail.recursive- Iftrue, recursively delete any children of the deleted assets.
Sourcepub async fn aggregate(
&self,
aggregate: AssetAggregateRequest,
) -> Result<Vec<AssetAggregateResponse>>
pub async fn aggregate( &self, aggregate: AssetAggregateRequest, ) -> Result<Vec<AssetAggregateResponse>>
Compute aggregates over assets, such as getting the count of all assets in a project, checking different names and descriptions of assets in your project, etc.
§Arguments
aggregate- Aggregate to compute
The returned aggregates depend on which aggregates were requested.
Source§impl Resource<Event>
impl Resource<Event>
Sourcepub async fn aggregate(
&self,
aggregate: EventAggregateRequest,
) -> Result<Vec<EventAggregateResponse>>
pub async fn aggregate( &self, aggregate: EventAggregateRequest, ) -> Result<Vec<EventAggregateResponse>>
Compute aggregates over events, such as getting the count of all events in a project, checking different names and descriptions of events in your project, etc.
§Arguments
aggregate- Aggregate to compute
The returned aggregates depend on which aggregates were requested.
Source§impl Resource<FileMetadata>
impl Resource<FileMetadata>
Sourcepub async fn upload_stream<S>(
&self,
mime_type: &str,
url: &str,
stream: S,
stream_chunked: bool,
) -> Result<()>
pub async fn upload_stream<S>( &self, mime_type: &str, url: &str, stream: S, stream_chunked: bool, ) -> Result<()>
Upload a stream to a url, the url is received from Files::upload
§Arguments
mime_type- Mime type of file to upload. For exampleapplication/pdf.url- URL to upload stream to.stream- Stream to upload.stream_chunked- Set this totrueto use chunked streaming. Note that this is not supported for the azure file backend. If this is set tofalse, the entire file is read into memory before uploading, which may be very expensive. Useupload_stream_known_sizeif the size of the file is known.
§Example
use tokio_util::codec::{BytesCodec, FramedRead};
let file = tokio::fs::File::open("my-file");
let stream = FramedRead::new(file, BytesCodec::new());
cognite_client.files.upload_stream(&file.mime_type.unwrap(), &file.upload_url, stream, true).await?;Note that stream_chunked being true is in general more efficient, but it is not supported
for the azure file backend.
Sourcepub async fn upload_stream_known_size<S>(
&self,
mime_type: &str,
url: &str,
stream: S,
size: u64,
) -> Result<()>
pub async fn upload_stream_known_size<S>( &self, mime_type: &str, url: &str, stream: S, size: u64, ) -> Result<()>
Upload a stream to an url, the url is received from Files::upload
This method requires that the length of the stream in bytes is known before hand.
If the specified size is wrong, the request may fail or even hang.
§Arguments
mime_type- Mime type of file to upload. For exampleapplication/pdf.url- URL to upload stream to.stream- Stream to upload.size- Known size of stream in bytes. Note: Do not use this method if the size is not actually known!
§Example
use tokio_util::codec::{BytesCodec, FramedRead};
let file = tokio::fs::File::open("my-file").await?;
let size = file.metadata().await?.len();
let stream = FramedRead::new(file, BytesCodec::new());
cognite_client.files.upload_stream_known_size(&file_res.mime_type.unwrap(), &file_res.extra.upload_url, stream, size).await?;Note that this will still stream the data from disk, so it should be as efficient as upload_stream with
upload_chunked, but not require the target to accept content-encoding: chunked.
Sourcepub async fn upload_file(
&self,
mime_type: &str,
url: &str,
file: File,
) -> Result<()>
pub async fn upload_file( &self, mime_type: &str, url: &str, file: File, ) -> Result<()>
Upload a file as a stream to CDF. url should be the upload URL returned from
upload.
§Arguments
mime_type- Mime type of file to upload. For exampleapplication/pdf.url- URL to upload the file to.file- File to upload.
Sourcepub async fn upload_blob(
&self,
mime_type: &str,
url: &str,
blob: impl Into<Bytes>,
) -> Result<()>
pub async fn upload_blob( &self, mime_type: &str, url: &str, blob: impl Into<Bytes>, ) -> Result<()>
Upload a binary vector to url.
§Arguments
mime_type- Mime type of file to upload. For exampleapplication/pdf.url- URL to upload blob to.blob- File to upload, as bytes.
Sourcepub async fn upload(
&self,
overwrite: bool,
item: &AddFile,
) -> Result<FileUploadResult<UploadUrl>>
pub async fn upload( &self, overwrite: bool, item: &AddFile, ) -> Result<FileUploadResult<UploadUrl>>
Create a file, optionally overwriting an existing file.
The result will contain an upload URL that can be used to upload a file.
§Arguments
overwrite- Set this totrueto overwrite existing files with the sameexternal_id. If this isfalse, and a file with the givenexternal_idalready exists, the request will fail.item- The file to upload.
Sourcepub async fn get_upload_link(
&self,
id: &IdentityOrInstance,
) -> Result<FileUploadResult<UploadUrl>>
pub async fn get_upload_link( &self, id: &IdentityOrInstance, ) -> Result<FileUploadResult<UploadUrl>>
Get an upload link for a file with given identity.
§Arguments
id - Identity of file metadata or data models file.
Sourcepub async fn get_multipart_upload_link(
&self,
id: &IdentityOrInstance,
parts: u32,
) -> Result<FileUploadResult<MultiUploadUrls>>
pub async fn get_multipart_upload_link( &self, id: &IdentityOrInstance, parts: u32, ) -> Result<FileUploadResult<MultiUploadUrls>>
Get multipart upload link for an existing file metadata or data models file.
§Arguments
id- Identity of file metadata or data models file.parts- Number of parts to be uploaded.
Sourcepub async fn multipart_upload<'a>(
&'a self,
overwrite: bool,
parts: u32,
item: &AddFile,
) -> Result<(MultipartUploader<'a>, FileMetadata)>
pub async fn multipart_upload<'a>( &'a self, overwrite: bool, parts: u32, item: &AddFile, ) -> Result<(MultipartUploader<'a>, FileMetadata)>
Create a file, specifying that it should be uploaded in multiple parts.
This returns a MultipartUploader, which wraps the upload process.
§Arguments
overwrite- Set this totrueto overwrite existing files with the sameexternal_id. If this isfalse, and a file with the givenexternal_idalready exists, the request will fail.parts- The number of parts to upload, should be a number between 1 and 250.item- The file to upload.
Sourcepub async fn multipart_upload_existing<'a>(
&'a self,
id: &IdentityOrInstance,
parts: u32,
) -> Result<(MultipartUploader<'a>, FileMetadata)>
pub async fn multipart_upload_existing<'a>( &'a self, id: &IdentityOrInstance, parts: u32, ) -> Result<(MultipartUploader<'a>, FileMetadata)>
Upload files for an existing file metadata or data models file.
This returns a MultipartUploader, which wraps the upload process.
§Arguments
parts- The number of parts to upload, should be a number between 1 and 250.id- Identity of file metadata or data models file.
Sourcepub async fn init_multipart_upload(
&self,
overwrite: bool,
parts: u32,
item: &AddFile,
) -> Result<FileUploadResult<MultiUploadUrls>>
pub async fn init_multipart_upload( &self, overwrite: bool, parts: u32, item: &AddFile, ) -> Result<FileUploadResult<MultiUploadUrls>>
Create a file, specifying that it should be uploaded in multiple parts.
§Arguments
overwrite- Set this totrueto overwrite existing files with the sameexternal_id. If this isfalse, and a file with the givenexternal_idalready exists, the request will fail.parts- The number of parts to upload, should be a number between 1 and 250.item- The file to upload.
Sourcepub async fn complete_multipart_upload(
&self,
id: IdentityOrInstance,
upload_id: String,
) -> Result<()>
pub async fn complete_multipart_upload( &self, id: IdentityOrInstance, upload_id: String, ) -> Result<()>
Complete a multipart upload. This endpoint must be called after all parts of a multipart file upload have been uploaded.
§Arguments
id- ID of the file that was uploaded.upload_id-upload_idreturned byinit_multipart_upload.
Sourcepub async fn download_link(
&self,
ids: &[IdentityOrInstance],
) -> Result<Vec<FileDownloadUrl>>
pub async fn download_link( &self, ids: &[IdentityOrInstance], ) -> Result<Vec<FileDownloadUrl>>
Sourcepub async fn download(
&self,
url: &str,
) -> Result<impl TryStream<Ok = Bytes, Error = Error>>
pub async fn download( &self, url: &str, ) -> Result<impl TryStream<Ok = Bytes, Error = Error>>
Sourcepub async fn download_file(
&self,
id: IdentityOrInstance,
) -> Result<impl TryStream<Ok = Bytes, Error = Error>>
pub async fn download_file( &self, id: IdentityOrInstance, ) -> Result<impl TryStream<Ok = Bytes, Error = Error>>
Source§impl Resource<Sequence>
impl Resource<Sequence>
Sourcepub async fn insert_rows(&self, rows: &[InsertSequenceRows]) -> Result<()>
pub async fn insert_rows(&self, rows: &[InsertSequenceRows]) -> Result<()>
Sourcepub async fn retrieve_rows(
&self,
query: RetrieveSequenceRows,
) -> Result<RetrieveSequenceRowsResponse>
pub async fn retrieve_rows( &self, query: RetrieveSequenceRows, ) -> Result<RetrieveSequenceRowsResponse>
Sourcepub async fn retrieve_last_row(
&self,
query: RetrieveLastSequenceRow,
) -> Result<RetrieveSequenceRowsResponse>
pub async fn retrieve_last_row( &self, query: RetrieveLastSequenceRow, ) -> Result<RetrieveSequenceRowsResponse>
Retrieve the last row from a sequence. The last row is the one with the highest row number, not necessarily the one that was ingested the most recently.
§Arguments
query- Sequence row retrieval query.
Sourcepub async fn delete_rows(&self, query: &[DeleteSequenceRows]) -> Result<()>
pub async fn delete_rows(&self, query: &[DeleteSequenceRows]) -> Result<()>
Source§impl Resource<TimeSeries>
impl Resource<TimeSeries>
Sourcepub async fn insert_datapoints(
&self,
add_datapoints: Vec<AddDatapoints>,
) -> Result<()>
pub async fn insert_datapoints( &self, add_datapoints: Vec<AddDatapoints>, ) -> Result<()>
Insert datapoints for a set of timeseries. Any existing datapoints with the same timestamp will be overwritten.
Note: datapoints are inserted using protobuf, this converts from a slightly more ergonomic type
to the protobuf types used directly in insert_datapoints_proto.
For very performance intensive workloads, consider using insert_datapoints_proto
directly.
§Arguments
add_datapoints- List of datapoint batches to insert.
Sourcepub async fn insert_datapoints_proto(
&self,
add_datapoints: &DataPointInsertionRequest,
) -> Result<()>
pub async fn insert_datapoints_proto( &self, add_datapoints: &DataPointInsertionRequest, ) -> Result<()>
Insert datapoints for a set of timeseries. Any existing datapoints with the same timestamp will be overwritten.
§Arguments
add_datapoints- Datapoint batches to insert.
Sourcepub async fn insert_datapoints_proto_create_missing<T: Iterator<Item = AddDmOrTimeSeries>>(
&self,
add_datapoints: &DataPointInsertionRequest,
generator: &impl Fn(&[IdentityOrInstance]) -> T,
) -> Result<()>
pub async fn insert_datapoints_proto_create_missing<T: Iterator<Item = AddDmOrTimeSeries>>( &self, add_datapoints: &DataPointInsertionRequest, generator: &impl Fn(&[IdentityOrInstance]) -> T, ) -> Result<()>
Insert datapoints for a set of time series, then create any missing time series.
In order for this to work correctly, generator must return an iterator over time series
with the same length as the passed slice.
§Arguments
add_datapoints- Datapoint batches to insert.generator- Method called to produce timeseries that does not exist.
§Example
client.time_series.insert_datapoints_proto_create_missing(
&dps,
|idts| idts.iter().map(|idt| AddTimeSeries {
external_id: idt.as_external_id().unwrap(),
..Default::default()
})
)Sourcepub async fn insert_datapoints_create_missing<T: Iterator<Item = AddDmOrTimeSeries>>(
&self,
add_datapoints: Vec<AddDatapoints>,
generator: &impl Fn(&[IdentityOrInstance]) -> T,
) -> Result<()>
pub async fn insert_datapoints_create_missing<T: Iterator<Item = AddDmOrTimeSeries>>( &self, add_datapoints: Vec<AddDatapoints>, generator: &impl Fn(&[IdentityOrInstance]) -> T, ) -> Result<()>
Insert datapoints for a set of time series, then create any missing time series.
In order for this to work correctly, generator must return an iterator over time series
with the same length as the passed slice.
§Arguments
add_datapoints- Datapoint batches to insert.generator- Method called to produce timeseries that does not exist.
§Example
client.time_series.insert_datapoints_create_missing(
&dps,
|idts| idts.iter().map(|idt| AddTimeSeries {
external_id: idt.as_external_id().unwrap(),
..Default::default()
})
)Sourcepub async fn insert_datapoints_proto_ignore_missing(
&self,
add_datapoints: &DataPointInsertionRequest,
) -> Result<()>
pub async fn insert_datapoints_proto_ignore_missing( &self, add_datapoints: &DataPointInsertionRequest, ) -> Result<()>
Insert datapoints for a set of timeseries. If the request fails due to any missing time series, remove them from the request and retry.
§Arguments
add_datapoints- Datapoint batches to insert.
Sourcepub async fn insert_datapoints_ignore_missing(
&self,
add_datapoints: Vec<AddDatapoints>,
) -> Result<()>
pub async fn insert_datapoints_ignore_missing( &self, add_datapoints: Vec<AddDatapoints>, ) -> Result<()>
Insert datapoints for a set of timeseries. If the request fails due to any missing time series, remove them from the request and retry.
§Arguments
add_datapoints- Datapoint batches to insert.
Sourcepub async fn retrieve_datapoints(
&self,
datapoints_filter: &DatapointsFilter,
) -> Result<Vec<DatapointsResponse>>
pub async fn retrieve_datapoints( &self, datapoints_filter: &DatapointsFilter, ) -> Result<Vec<DatapointsResponse>>
Retrieve datapoints for a collection of time series.
Note: datapoints are inserted using protobuf, this converts to a slightly more ergonomic type
from the type returned by retrieve_datapoints_proto.
For very performance intensive workloads, consider using retrieve_datapoints_proto
directly.
§Arguments
datapoints_filter- Filter describing which datapoints to retrieve.
Sourcepub async fn retrieve_datapoints_proto(
&self,
datapoints_filter: &DatapointsFilter,
) -> Result<DataPointListResponse>
pub async fn retrieve_datapoints_proto( &self, datapoints_filter: &DatapointsFilter, ) -> Result<DataPointListResponse>
Retrieve datapoints for a collection of time series.
§Arguments
datapoints_filter- Filter describing which datapoints to retrieve.
Sourcepub async fn retrieve_latest_datapoints(
&self,
items: &[LatestDatapointsQuery],
ignore_unknown_ids: bool,
) -> Result<Vec<LatestDatapointsResponse>>
pub async fn retrieve_latest_datapoints( &self, items: &[LatestDatapointsQuery], ignore_unknown_ids: bool, ) -> Result<Vec<LatestDatapointsResponse>>
Retrieve the latest datapoint before a given time for a list of time series.
§Arguments
items- Queries for latest datapoint.ignore_unknown_ids- Set this totrueto ignore timeseries that do not exist.
Sourcepub async fn delete_datapoints(
&self,
query: &[DeleteDatapointsQuery],
) -> Result<()>
pub async fn delete_datapoints( &self, query: &[DeleteDatapointsQuery], ) -> Result<()>
Delete ranges of datapoints for a list of time series.
§Arguments
query- Ranges of datapoints to delete.
Sourcepub async fn query_synthetic_timeseries(
&self,
query: &[SyntheticTimeSeriesQuery],
) -> Result<Vec<SyntheticQueryResponse>>
pub async fn query_synthetic_timeseries( &self, query: &[SyntheticTimeSeriesQuery], ) -> Result<Vec<SyntheticQueryResponse>>
Query synthetic time series. Synthetic time series lets you combine various input time series, constants, and operators, to create completely new time series.
See synthetic timeseries for more details.
§Arguments
query- Synthetic datapoints queries.
Sourcepub fn stream_datapoints(
&self,
filter: DatapointsFilter,
options: DatapointsStreamOptions,
) -> impl Stream<Item = Result<DataPointRef>> + '_
pub fn stream_datapoints( &self, filter: DatapointsFilter, options: DatapointsStreamOptions, ) -> impl Stream<Item = Result<DataPointRef>> + '_
Stream datapoints for a list of timeseries. The datapoints are returned in ascending order, but we do not guarantee anything on the order between timeseries.
We batch for you, so the items array in DatapointsFilter can contain more than 100 entries,
but batch_size should not be set larger than 100.
parallelism controls how many requests we have in-flight at any given time.
Avoid setting this too high, as it may lead to rate limiting, which will reduce the actual
throughput.
§Arguments
filter- Filter describing common filter properties and a list of timeseries to retrieve data from.options- Options for controlling the stream.
Sourcepub fn stream_datapoint_batches(
&self,
filter: DatapointsFilter,
options: DatapointsStreamOptions,
) -> impl Stream<Item = Result<DataPointListResponse>> + '_
pub fn stream_datapoint_batches( &self, filter: DatapointsFilter, options: DatapointsStreamOptions, ) -> impl Stream<Item = Result<DataPointListResponse>> + '_
Stream datapoints for a list of timeseries. This returns raw batches of datapoints as they arrive from CDF. Use stream_datapoints if you want to work with individual datapoints.
We batch for you, so the items array in DatapointsFilter can contain more than 100 entries,
but batch_size should not be set larger than 100.
parallelism controls how many requests we have in-flight at any given time.
Avoid setting this too high, as it may lead to rate limiting, which will reduce the actual
throughput.
§Arguments
filter- Filter describing common filter properties and a list of timeseries to retrieve data from.options- Options for controlling the stream.
Source§impl Resource<RawRow>
impl Resource<RawRow>
Sourcepub async fn list_databases(
&self,
limit: Option<i32>,
cursor: Option<String>,
) -> Result<ItemsVec<Database, Cursor>>
pub async fn list_databases( &self, limit: Option<i32>, cursor: Option<String>, ) -> Result<ItemsVec<Database, Cursor>>
List Raw databases in the project.
§Arguments
limit- Maximum number of databases to retrieve.cursor- Optional cursor for pagination.
Sourcepub async fn delete_databases(
&self,
to_delete: &DeleteDatabasesRequest,
) -> Result<()>
pub async fn delete_databases( &self, to_delete: &DeleteDatabasesRequest, ) -> Result<()>
Delete a list of raw databases.
§Arguments
to_delete- Request describing which databases to delete and how.
Sourcepub async fn list_tables(
&self,
db_name: &str,
limit: Option<i32>,
cursor: Option<String>,
) -> Result<ItemsVec<Table, Cursor>>
pub async fn list_tables( &self, db_name: &str, limit: Option<i32>, cursor: Option<String>, ) -> Result<ItemsVec<Table, Cursor>>
List tables in a a raw database.
§Arguments
db_name- Database to list tables in.limit- Maximum number of tables to retrieve.cursor- Optional cursor for pagination.
Sourcepub async fn create_tables(
&self,
db_name: &str,
ensure_parent: bool,
tables: &[Table],
) -> Result<Vec<Table>>
pub async fn create_tables( &self, db_name: &str, ensure_parent: bool, tables: &[Table], ) -> Result<Vec<Table>>
Create tables in a raw database.
§Arguments
db_name- Database to create tables in.ensure_parent- If this is set totrue, create database if it doesn’t already exist.tables- Tables to create.
Sourcepub async fn delete_tables(
&self,
db_name: &str,
to_delete: &[Table],
) -> Result<()>
pub async fn delete_tables( &self, db_name: &str, to_delete: &[Table], ) -> Result<()>
Delete tables in a raw database.
§Arguments
db_name- Database to delete tables from.to_delete- Tables to delete.
Sourcepub async fn retrieve_cursors_for_parallel_reads(
&self,
db_name: &str,
table_name: &str,
params: Option<RetrieveCursorsQuery>,
) -> Result<Vec<String>>
pub async fn retrieve_cursors_for_parallel_reads( &self, db_name: &str, table_name: &str, params: Option<RetrieveCursorsQuery>, ) -> Result<Vec<String>>
Retrieve cursors for parallel reads. This can be used to efficiently download large volumes of data from a raw table in parallel.
§Arguments
db_name- Database to retrieve from.table_name- Table to retrieve from.params- Optional filter parameters.
Sourcepub async fn retrieve_rows(
&self,
db_name: &str,
table_name: &str,
params: Option<RetrieveRowsQuery>,
) -> Result<ItemsVec<RawRow, Cursor>>
pub async fn retrieve_rows( &self, db_name: &str, table_name: &str, params: Option<RetrieveRowsQuery>, ) -> Result<ItemsVec<RawRow, Cursor>>
Retrieve rows from a table, with some basic filtering options.
§Arguments
db_name- Database to retrieve rows from.table_name- Table to retrieve rows from.params- Optional filter parameters.
Sourcepub fn retrieve_all_rows_stream<'a>(
&'a self,
db_name: &'a str,
table_name: &'a str,
params: Option<RetrieveRowsQuery>,
) -> impl TryStream<Ok = RawRow, Error = Error, Item = Result<RawRow>> + Send + 'a
pub fn retrieve_all_rows_stream<'a>( &'a self, db_name: &'a str, table_name: &'a str, params: Option<RetrieveRowsQuery>, ) -> impl TryStream<Ok = RawRow, Error = Error, Item = Result<RawRow>> + Send + 'a
Retrieve all rows from a table, following cursors. This returns a stream, you can abort the stream whenever you want and only resources retrieved up to that point will be returned.
Each item in the stream will be a result, after the first error is returned the stream will end.
limit in the filter only affects how many rows are returned per request.
§Arguments
db_name- Database to retrieve rows from.table_name- Table to retrieve rows from.params- Optional filter parameters. This can set a cursor to start streaming from there.
Sourcepub async fn retrieve_all_rows(
&self,
db_name: &str,
table_name: &str,
params: Option<RetrieveRowsQuery>,
) -> Result<Vec<RawRow>>
pub async fn retrieve_all_rows( &self, db_name: &str, table_name: &str, params: Option<RetrieveRowsQuery>, ) -> Result<Vec<RawRow>>
Retrieve all rows from a table, following cursors.
limit in the filter only affects how many rows are returned per request.
§Arguments
db_name- Database to retrieve rows from.table_name- Table to retrieve rows from.params- Optional filter parameters. This can set a cursor to start reading from there.
Sourcepub async fn retrieve_all_rows_partitioned(
&self,
db_name: &str,
table_name: &str,
params: RetrieveAllPartitionedQuery,
) -> Result<Vec<RawRow>>
pub async fn retrieve_all_rows_partitioned( &self, db_name: &str, table_name: &str, params: RetrieveAllPartitionedQuery, ) -> Result<Vec<RawRow>>
Retrieve all rows from a table, following cursors and reading from multiple streams in parallel.
The order of the returned values is not guaranteed to be in any way consistent.
db_name- Database to retrieve rows from.table_name- Table to retrieve rows from.params- Optional filter parameters.
Sourcepub fn retrieve_all_rows_partitioned_stream<'a>(
&'a self,
db_name: &'a str,
table_name: &'a str,
params: RetrieveAllPartitionedQuery,
) -> impl TryStream<Ok = RawRow, Error = Error, Item = Result<RawRow>> + Send + 'a
pub fn retrieve_all_rows_partitioned_stream<'a>( &'a self, db_name: &'a str, table_name: &'a str, params: RetrieveAllPartitionedQuery, ) -> impl TryStream<Ok = RawRow, Error = Error, Item = Result<RawRow>> + Send + 'a
Retrieve all rows from a table, following cursors and reading from multiple streams in parallel.
The order of the returned values is not guaranteed to be in any way consistent.
db_name- Database to retrieve rows from.table_name- Table to retrieve rows from.params- Optional filter parameters.
Sourcepub async fn insert_rows(
&self,
db_name: &str,
table_name: &str,
ensure_parent: bool,
rows: &[RawRowCreate],
) -> Result<()>
pub async fn insert_rows( &self, db_name: &str, table_name: &str, ensure_parent: bool, rows: &[RawRowCreate], ) -> Result<()>
Insert rows into a table.
If ensure_parent is true, create the database and/or table if they do not exist.
§Arguments
db_name- Database to insert rows into.table_name- Table to insert rows into.ensure_parent- Create database and/or table if they do not exist.rows- Raw rows to create.
Source§impl Resource<ContainerDefinition>
impl Resource<ContainerDefinition>
Sourcepub async fn delete_constraints(
&self,
items: &[ContainerComponentId],
) -> Result<Vec<ContainerComponentId>>
pub async fn delete_constraints( &self, items: &[ContainerComponentId], ) -> Result<Vec<ContainerComponentId>>
Sourcepub async fn delete_indexes(
&self,
items: &[ContainerComponentId],
) -> Result<Vec<ContainerComponentId>>
pub async fn delete_indexes( &self, items: &[ContainerComponentId], ) -> Result<Vec<ContainerComponentId>>
Source§impl Resource<SlimNodeOrEdge>
impl Resource<SlimNodeOrEdge>
Sourcepub async fn filter_with_type_info<TProperties: DeserializeOwned + Send + Sync + 'static>(
&self,
req: FilterInstancesRequest,
) -> Result<InstancesFilterResponse<TProperties>>
pub async fn filter_with_type_info<TProperties: DeserializeOwned + Send + Sync + 'static>( &self, req: FilterInstancesRequest, ) -> Result<InstancesFilterResponse<TProperties>>
Filter instances optionally returning type information.
§Arguments
req- Request with optional filter.
Sourcepub async fn query<TProperties: DeserializeOwned + Send + Sync + 'static>(
&self,
query: QueryInstancesRequest,
) -> Result<QueryInstancesResponse<TProperties>>
pub async fn query<TProperties: DeserializeOwned + Send + Sync + 'static>( &self, query: QueryInstancesRequest, ) -> Result<QueryInstancesResponse<TProperties>>
Sourcepub async fn sync<TProperties: DeserializeOwned + Send + Sync + 'static>(
&self,
query: QueryInstancesRequest,
) -> Result<QueryInstancesResponse<TProperties>>
pub async fn sync<TProperties: DeserializeOwned + Send + Sync + 'static>( &self, query: QueryInstancesRequest, ) -> Result<QueryInstancesResponse<TProperties>>
Perform a complex query against data models. This always returns cursors, so you can keep querying to get any changes since the last query.
§Arguments
query- Query to execute.
Sourcepub async fn aggregate(
&self,
req: AggregateInstancesRequest,
) -> Result<AggregateInstancesResponse>
pub async fn aggregate( &self, req: AggregateInstancesRequest, ) -> Result<AggregateInstancesResponse>
Sourcepub async fn search<TProperties: DeserializeOwned + Send + Sync + 'static>(
&self,
req: SearchInstancesRequest,
) -> Result<NodeAndEdgeRetrieveResponse<TProperties>>
pub async fn search<TProperties: DeserializeOwned + Send + Sync + 'static>( &self, req: SearchInstancesRequest, ) -> Result<NodeAndEdgeRetrieveResponse<TProperties>>
Sourcepub async fn fetch<TEntity, TProperties>(
&self,
items: &[NodeOrEdgeSpecification],
view: Option<&ViewReference>,
) -> Result<Vec<TEntity>>where
TProperties: Serialize + DeserializeOwned + Send + Sync,
TEntity: FromReadable<TProperties> + WithView + Send,
pub async fn fetch<TEntity, TProperties>(
&self,
items: &[NodeOrEdgeSpecification],
view: Option<&ViewReference>,
) -> Result<Vec<TEntity>>where
TProperties: Serialize + DeserializeOwned + Send + Sync,
TEntity: FromReadable<TProperties> + WithView + Send,
Fetch special data models instance collection.
§Arguments
items- A list of specifications of node/edges to retrieve.
Sourcepub async fn apply<TEntity, TProperties>(
&self,
col: &[TEntity],
auto_create_direct_relations: Option<bool>,
auto_create_start_nodes: Option<bool>,
auto_create_end_nodes: Option<bool>,
skip_on_version_conflict: Option<bool>,
replace: bool,
) -> Result<Vec<SlimNodeOrEdge>>where
TProperties: Serialize + DeserializeOwned + Send + Sync,
TEntity: Clone + Into<NodeOrEdgeCreate<TProperties>> + Send,
pub async fn apply<TEntity, TProperties>(
&self,
col: &[TEntity],
auto_create_direct_relations: Option<bool>,
auto_create_start_nodes: Option<bool>,
auto_create_end_nodes: Option<bool>,
skip_on_version_conflict: Option<bool>,
replace: bool,
) -> Result<Vec<SlimNodeOrEdge>>where
TProperties: Serialize + DeserializeOwned + Send + Sync,
TEntity: Clone + Into<NodeOrEdgeCreate<TProperties>> + Send,
Upsert data models instances of this type.
§Arguments
col- A list of this type to be created.auto_create_direct_relation- Whether to auto create direct relation that do no exist.auto_create_start_nodes- Whether to auto create end nodes that do not exist.auto_create_end_nodes- Whether to auto create end nodes that do not exist.skip_on_version_conflict- Whether to skip when a version conflict is encountered.replace- Whether to replace all matching and existing values with the supplied values.
Source§impl Resource<Record<HashMap<String, RawValue>>>
impl Resource<Record<HashMap<String, RawValue>>>
Sourcepub async fn ingest<T: Serialize>(
&self,
stream_id: &str,
records: &[RecordWrite<T>],
) -> Result<()>
pub async fn ingest<T: Serialize>( &self, stream_id: &str, records: &[RecordWrite<T>], ) -> Result<()>
Ingest records into a stream.
Note: The maximum total request size is 10 MB.
§Arguments
stream_id- ID of the stream to ingest records into.records- Records to ingest.
Sourcepub async fn upsert<T: Serialize>(
&self,
stream_id: &str,
records: &[RecordWrite<T>],
) -> Result<()>
pub async fn upsert<T: Serialize>( &self, stream_id: &str, records: &[RecordWrite<T>], ) -> Result<()>
Upsert records into a stream.
Note: The maximum total request size is 10 MB.
§Arguments
stream_id- ID of the stream to ingest records into.records- Records to ingest.
Sourcepub async fn retrieve<T: DeserializeOwned>(
&self,
stream_id: &str,
request: &RecordsRetrieveRequest,
) -> Result<ItemsVec<Record<T>>>
pub async fn retrieve<T: DeserializeOwned>( &self, stream_id: &str, request: &RecordsRetrieveRequest, ) -> Result<ItemsVec<Record<T>>>
Retrieve records from a stream.
§Arguments
stream_id- ID of the stream to retrieve records from.request- Request with optional filter and sort.
Sourcepub async fn sync<T: DeserializeOwned>(
&self,
stream_id: &str,
request: &RecordsSyncRequest,
) -> Result<ItemsVec<Record<T>, CursorAndHasNext>>
pub async fn sync<T: DeserializeOwned>( &self, stream_id: &str, request: &RecordsSyncRequest, ) -> Result<ItemsVec<Record<T>, CursorAndHasNext>>
Subscribe to changes for records from the stream, matching a supplied filter.
§Arguments
stream_id- ID of the stream to subscribe to.request- Request with optional filter.
Source§impl Resource<Stream>
impl Resource<Stream>
Source§impl Resource<DataSet>
impl Resource<DataSet>
Sourcepub async fn count(&self, filter: DataSetFilter) -> Result<DataSetsCount>
pub async fn count(&self, filter: DataSetFilter) -> Result<DataSetsCount>
Calculate the total number of data sets in the project matching the given filter
§Arguments
filter- Optional filter.
Source§impl Resource<Relationship>
impl Resource<Relationship>
Sourcepub async fn retrieve(
&self,
relationship_ids: &[CogniteExternalId],
ignore_unknown_ids: bool,
fetch_resources: bool,
) -> Result<Vec<Relationship>>
pub async fn retrieve( &self, relationship_ids: &[CogniteExternalId], ignore_unknown_ids: bool, fetch_resources: bool, ) -> Result<Vec<Relationship>>
Retrieve a list of relationships by their ID.
§Arguments
relationship_ids- IDs of relationships to retrieve.ignore_unknown_ids- Set this totrueto ignore any IDs not found in CDF. If this isfalse, any missing IDs will cause the request to fail.fetch_resources- Whether to fetch the associated resources along with the relationship itself.